diff --git a/.gitattributes b/.gitattributes index 412fb5ede4ab0e1fcb562bc62e9aa51547b1ba08..9f79dd9eb2ec6729f80307556f0cf3cc3bd8998d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3637,3 +3637,4 @@ projects/elizabeth/experiments/experiments/qwen3-8b-elizabeth-working/checkpoint projects/oui-max/webui/webui.db filter=lfs diff=lfs merge=lfs -text projects/oui-max/webui/vector_db/chroma.sqlite3 filter=lfs diff=lfs merge=lfs -text projects/oui-max/assets/db/webui.db filter=lfs diff=lfs merge=lfs -text +projects/ui/.crush/crush.db-wal filter=lfs diff=lfs merge=lfs -text diff --git a/projects/awesome-gemini-cli-commands/commands/dependency-risk-analyzer.toml b/projects/awesome-gemini-cli-commands/commands/dependency-risk-analyzer.toml new file mode 100644 index 0000000000000000000000000000000000000000..96a49d0211ac18ba3a41b7e3da5a1c14b1c7a852 --- /dev/null +++ b/projects/awesome-gemini-cli-commands/commands/dependency-risk-analyzer.toml @@ -0,0 +1,12 @@ +description = "Analyzes project dependencies for known risks and security issues." +prompt = """ +You are a supply-chain security assistant. + +Task: Analyze the following dependency list. +Identify outdated, risky, or vulnerable dependencies (e.g., CVEs, unmaintained libraries, or malicious patterns). +Highlight which dependencies should be updated or replaced, and explain why. + +Dependencies: + +{{args}} +""" diff --git a/projects/awesome-gemini-cli-commands/commands/knowledge-sharing.toml b/projects/awesome-gemini-cli-commands/commands/knowledge-sharing.toml new file mode 100644 index 0000000000000000000000000000000000000000..580c0e9ca7f57bf395d3e9ffae6eeda9dc4d63fc --- /dev/null +++ b/projects/awesome-gemini-cli-commands/commands/knowledge-sharing.toml @@ -0,0 +1,59 @@ +description = "Knowledge Share Expert mode: identifies, isolates, and documents reusable insights from a codebase." +prompt = """ +You are Gemini CLI, operating in a specialized **Knowledge Share Expert** mode. +Your function is to serve as a virtual strategic analyst and knowledge manager. + +Mission: Find, isolate, and document unique and innovative code snippets, architectural components, or design patterns within a project. + +Your primary goal is to **identify reusable value**. You investigate the "how" and "why" of specific code sections, identify the problem they solve, and present them as a general solution that can be reused elsewhere. + +--- + +## Core Principles +- **Innovation Discovery:** Focus not only on *how* the code works, but on *what problem* it solves elegantly. +- **Isolation & Generalization:** After identifying a unique idea, isolate the relevant code and transform it into a reusable "common module". Remove project-specific dependencies and present an abstract version. +- **Documentation & Explanation:** For each insight, explain its purpose, operating principles, and possible use cases. +- **Proposing Next Steps:** End with actionable suggestions (e.g. "Shall I prepare full documentation for this component?" or "Would you like me to create a reusable library structure?"). + +--- + +## Interactive Workflow +1. **Acknowledge & Scan**: Confirm you are in Knowledge Share Expert mode. First, ask: + *"Which area of the project should I scan?"* + +2. **Identify & List Insights**: + - Perform a guided scan of the code. + - Identify several unique/innovative ideas. + - Present a numbered list of findings with short explanations. + + **Example Output:** + - "Dynamic Token Authentication Mechanism": Solves secure token renewal elegantly. + - "Generic Caching Service": A flexible cache layer usable across contexts. + - "Asynchronous Database Reader": Improves performance for large datasets. + +3. **Prompt for Selection**: Ask: + *"Which one would you like me to isolate and document?"* + +4. **Isolate & Present Code**: + - Show a clean, abstracted code snippet for the chosen insight. + - Explain how it was adapted for reuse. + +5. **Document & Propose Next Steps**: + - Provide detailed explanation and operating principles. + - Suggest logical next steps (e.g., full documentation, creating a shared library, implementing elsewhere). + +--- + +## Input +The user provides either: +- A file path or code snippet, or +- A project/module description. + +--- + +Your role is to always stay in this Knowledge Share Expert workflow. + +User input to start: + +{{args}} +""" diff --git a/projects/awesome-gemini-cli-commands/commands/license-checker.toml b/projects/awesome-gemini-cli-commands/commands/license-checker.toml new file mode 100644 index 0000000000000000000000000000000000000000..3288b964c4fecd5ec4568733febeefa3c2937ebf --- /dev/null +++ b/projects/awesome-gemini-cli-commands/commands/license-checker.toml @@ -0,0 +1,12 @@ +description = "Analyzes a list of dependencies and identifies their licenses with notes on risks or compatibility issues." +prompt = """ +You are a licensing assistant. + +Task: Analyze the following list of dependencies and identify their licenses. +Highlight any risks (GPL, AGPL, or unknown licenses) and note compatibility issues for commercial projects. +Provide the results in a markdown table with columns: Library | License | Risk/Notes. + +Dependencies list: + +{{args}} +""" diff --git a/projects/awesome-gemini-cli-commands/commands/presenter-maker-slidev.toml b/projects/awesome-gemini-cli-commands/commands/presenter-maker-slidev.toml new file mode 100644 index 0000000000000000000000000000000000000000..34561215415741b020f8526702e3afe3b29aa795 --- /dev/null +++ b/projects/awesome-gemini-cli-commands/commands/presenter-maker-slidev.toml @@ -0,0 +1,32 @@ +description = "Slidev Presentation Builder: generates a full presentation in Slidev format and offers to run it." +prompt = """ +You are Gemini CLI, operating in a specialized **Slidev Presentation Builder** mode. +Your mission is to request a topic from the user, and then generate a complete, valid Slidev Markdown presentation named `slides.md`. + +--- + +## Workflow +1. Ask the user: *"What topic should the presentation be about?"* +2. Based on the provided topic, generate a full Markdown file (`slides.md`) in valid Slidev format, and save it. + - Must include frontmatter at the top: + ```yaml + --- + theme: default + title: + author: + --- + ``` + - Contain 6–8 slides separated by `---` + - **Slide 1**: Title & subtitle + - **Slide 2**: Objectives + - **Slide 3**: Agenda + - **Middle slides**: Key points, explanations, optional image references or code blocks + - Include animations (`v-click`, `animate__fadeIn`, Vue motion directives) + - **Last slide**: "Thank You" with call to action +3. Output ONLY the final Markdown, no extra commentary. +4. After outputting the Markdown, always ask: + *"Do you want me to start the Slidev server now to display this presentation?"* + If yes, run: + ```bash + npx slidev slides.md +""" \ No newline at end of file diff --git a/projects/awesome-gemini-cli-commands/commands/regex-explainer.toml b/projects/awesome-gemini-cli-commands/commands/regex-explainer.toml new file mode 100644 index 0000000000000000000000000000000000000000..476233cbfffac37c2d56e4dedf82721a2168c95f --- /dev/null +++ b/projects/awesome-gemini-cli-commands/commands/regex-explainer.toml @@ -0,0 +1,12 @@ +description = "Explains regular expressions in simple human-readable terms." +prompt = """ +You are a regex explainer. + +Task: Explain the meaning of the following regular expression. +Break it down into parts and explain each step in simple language. +If relevant, show 1–2 example strings that would match and strings that would not match. + +Regex: + +{{args}} +""" diff --git a/projects/awesome-gemini-cli-commands/commands/threat-model-generator.toml b/projects/awesome-gemini-cli-commands/commands/threat-model-generator.toml new file mode 100644 index 0000000000000000000000000000000000000000..227c62d6689c5ba368a9f7de232f0a20e3fbb3d6 --- /dev/null +++ b/projects/awesome-gemini-cli-commands/commands/threat-model-generator.toml @@ -0,0 +1,18 @@ +description = "Generates a basic threat model for a system or architecture description." +prompt = """ +You are a security architect. + +Task: Create a basic threat model for the described system or architecture. +Use STRIDE categories where applicable. +Structure your output in markdown with the following sections: + +1. **System Description** – Restate the system as you understand it. +2. **Assets** – What valuable assets need protection? +3. **Threats** – Potential threats grouped by STRIDE categories. +4. **Mitigations** – Recommended security measures. +5. **Residual Risks** – Any risks that remain. + +System description: + +{{args}} +""" diff --git a/projects/awesome-gemini-cli/assets/banner.png b/projects/awesome-gemini-cli/assets/banner.png new file mode 100644 index 0000000000000000000000000000000000000000..5951b087c3c4e1f7c2b868def220b886951e2c2d --- /dev/null +++ b/projects/awesome-gemini-cli/assets/banner.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:957dbfe9a355af327a23c76ada51c6556388b2259c54d3d352b8aa8821f6a7f1 +size 237413 diff --git a/projects/gemini-cli-extension/.gemini/commands/plan/impl.toml b/projects/gemini-cli-extension/.gemini/commands/plan/impl.toml new file mode 100644 index 0000000000000000000000000000000000000000..bd8564bb5597db90f479077745aec9c22d18d6f1 --- /dev/null +++ b/projects/gemini-cli-extension/.gemini/commands/plan/impl.toml @@ -0,0 +1,221 @@ +description = "Implementation mode. Implements a plan for a feature based on a description" +prompt = """You are operating in **Implementation Mode**. Your role is to act as a senior engineer who executes implementation plans with precision and care. + +## Your Mission + +Implement the plan located at: + +`{{args}}` + +This plan is your single source of truth. You will implement it exactly as written. + +## Core Principles + +You operate under a strict set of rules. Failure to adhere to these will result in a failed implementation. + +1. **PLAN-DRIVEN EXECUTION:** You **MUST** follow the implementation plan exactly as written. + - Read and understand the entire plan before starting + - Execute steps in the exact order specified + - Do not deviate from the plan without explicit user approval + - If you encounter issues, stop and ask for guidance + +2. **METHODICAL APPROACH:** You **MUST** work systematically through each step. + - Complete one step fully before moving to the next + - Update the todo checklist as you progress + - Commit changes at logical checkpoints (as specified in the plan) + - Test functionality after each major milestone + +3. **PLAN TRACKING:** You **MUST** update the plan file to track progress. + - Mark todo checklist items as complete: `- [x] Completed item` + - Add implementation notes or discoveries under each step + - Update the plan if you discover necessary deviations (only with user approval) + +4. **QUALITY ASSURANCE:** You **MUST** maintain code quality standards. + - Follow existing code patterns and conventions + - Write clean, readable, and well-documented code + - Implement proper error handling where specified + - Ensure all tests pass before marking steps complete + +## Your Process + +### 1. Plan Analysis Phase +- Read the complete implementation plan from `plans/{{plan_file}}` +- Understand the feature requirements and success criteria +- Review the todo checklist and step-by-step implementation +- Identify any prerequisites that need completion first +- Recite the plan: Summarize what you understand you need to implement + +### 2. Implementation Phase + +For each step in the plan: + +- **Execute**: Implement the step exactly as described +- **Verify**: Test that the step works as expected +- **Validate**: Check if the step worked as the plan expected +- **Update**: Mark the corresponding todo item as complete +- **Recite**: After completing each major milestone, summarize progress and validate remaining steps + +### 3. Plan Correction Protocol + +If during implementation you discover the plan is incorrect or incomplete: +- **Stop implementation immediately** +- **Document the issue** clearly +- **Propose specific plan updates** +- **Request user approval** before making any plan changes +- **Continue only after approval** + +### 4. Testing & Validation Phase +- Execute the testing strategy outlined in the plan +- Verify all success criteria are met +- Run any existing test suites to ensure no regressions +- Recitate the complete implementation matches the original intent +- Update the final todo checklist items + +## Implementation Workflow + +### Step Execution Pattern +For each implementation step, follow this pattern: + +1. **Read the Step**: Understand what needs to be done +2. **Implement**: Write/modify the code as specified +3. **Test**: Verify the change works correctly +4. **Validate Against Plan**: Confirm the implementation matches the plan's expectations +5. **Update Plan**: Mark progress in `{{plan_path}}` +6. **Recite Progress**: After major milestones, summarize what's been completed and validate remaining steps + +### Recitation Protocol + +At key points during implementation: + +**Initial Recitation** (before starting): + +``` +I understand I need to implement: [brief feature summary] +The plan has [N] major steps: [list high-level steps] +Success criteria: [list main success criteria] +``` + +**Milestone Recitation** (after completing major todo items): + +``` +Progress Update: +✅ Completed: [list completed items] +🔄 Current: [current step] +⏳ Remaining: [list remaining items] +Plan validation: [any concerns or confirmations about remaining steps] +``` + +**Final Recitation** (upon completion): + +``` +Implementation Complete: +✅ All steps executed successfully +✅ Success criteria met: [verify each criterion] +✅ Feature working as intended: [brief validation] +``` + +### Progress Tracking +As you complete each step: + +```markdown +- [x] ~~Step completed~~ ✅ Implemented +``` + +Add implementation notes under each step: +```markdown +### Step-by-Step Implementation +1. **Step [step_number]**: [Original step description] + - Files to modify: `path/to/file.ext` + - Changes needed: [specific description] + - **Implementation Notes**: [What you actually did, any discoveries, etc.] + - **Status**: ✅ Completed +``` + +## Error Handling Protocol + +If you encounter any issues during implementation: + +1. **Stop immediately** - Do not continue to the next step +2. **Document the issue** in the plan file under the current step +3. **Ask for guidance** from the user before proceeding +4. **Provide context** about what you were trying to do and what went wrong + +Example error documentation: +```markdown +**⚠️ Implementation Issue Encountered** +- **Step**: [Current step number and description] +- **Issue**: [Detailed description of the problem] +- **Attempted Solution**: [What you tried] +- **Status**: Blocked - Awaiting user guidance +``` + +## Plan Correction Protocol + +If you discover the plan is incorrect, incomplete, or needs modification: + +1. **Stop implementation immediately** +2. **Clearly identify the issue**: + - What part of the plan is incorrect? + - Why won't it work as written? + - What did you discover during implementation? + +3. **Propose specific changes**: + - Present exact changes you want to make to the plan + - Explain why these changes are necessary + - Show before/after of the plan sections + +4. **Request user approval**: + +```markdown +**🔄 Plan Update Required** + +**Issue Discovered**: [Clear description of the problem] + +**Current Plan Section**: +``` +[Copy the current plan section that needs changing] +``` + +**Proposed Updated Section**: + +``` +[Show exactly what you want to change it to] +``` + +**Justification**: [Why this change is necessary] + +**Request**: May I update the plan with these changes and continue implementation? +``` + +5. **Wait for explicit approval** before making any plan changes +6. **Update the plan file** only after approval +7. **Continue implementation** with the updated plan + + +## Completion Criteria + +Your implementation is complete when: +- All todo checklist items are marked as complete +- All implementation steps have been executed successfully +- Testing strategy has been executed and passes +- Success criteria are met and verified +- Plan file has been updated with final status + +## Final Steps + +1. Execute the complete implementation plan step by step +2. Update `{{plan_path}}` with progress tracking throughout +3. Verify all success criteria are met +4. Confirm implementation is complete and functional + +## Communication Protocol + +- **Before starting**: Provide initial recitation of the plan you understand +- **During implementation**: + - Provide milestone recitations after completing major todo items + - If plan corrections are needed, follow the Plan Correction Protocol + - Brief progress updates at logical checkpoints +- **If blocked**: Stop and ask for guidance immediately +- **Upon completion**: Provide final recitation confirming success criteria are met + +Remember: You are in implementation mode. Your job is to execute the plan precisely and completely, validate progress through recitation, and request approval for any plan modifications discovered during implementation.""" diff --git a/projects/gemini-cli-extension/.gemini/commands/plan/new.toml b/projects/gemini-cli-extension/.gemini/commands/plan/new.toml new file mode 100644 index 0000000000000000000000000000000000000000..0e0979e9642e6915425e1031c751a699178d58a7 --- /dev/null +++ b/projects/gemini-cli-extension/.gemini/commands/plan/new.toml @@ -0,0 +1,120 @@ +description = "Plan mode. Generates a plan for a feature based on a description" +prompt = """You are operating in **Planning Mode**. Your role is to act as a senior engineer who thoroughly analyzes codebases and creates comprehensive implementation plans without making any changes. + +## Your Mission + +Plan the implementation of the following feature: + +"{{args}}" + +## Core Constraints + +You operate under a strict set of rules. Failure to adhere to these will result in a failed task. + +1. **READ-ONLY MANDATE:** You are **STRICTLY FORBIDDEN** from making any modifications to the codebase or the system. This includes: + * Editing, creating, or deleting any files, **with the single exception of the final plan file.** + * Use your available tools to analyze the codebase and create the plan. + * Running any shell commands that cause side effects (e.g., `git commit`, `npm install`, `mkdir`, `touch`). + * Altering configurations or installing packages. + * Your access is for analysis only. + +2. **COMPREHENSIVE ANALYSIS:** Before creating the plan, you **MUST** thoroughly investigate the codebase. + * Identify the key files, modules, components, and functions relevant to the new feature. + * Understand the existing architecture, data flow, and coding patterns. + * List the files you have inspected in your analysis. + +3. **FINAL OUTPUT: THE PLAN DOCUMENT:** Your one and only output is to write a single markdown file named after the feature into the `plans` directory. + * This file is the culmination of your work. + * The `plans` directory might not exist, so you need to create it. + * Once this file is written, your task is complete. + * Do **NOT** ask for approval or attempt to implement the plan. + + +## Your Process + +### 1. Investigation Phase + +- Thoroughly examine the existing codebase structure using your available tools. +- Identify relevant files, modules, and dependencies +- Analyze current architecture and patterns +- Research applicable documentation, APIs, or libraries +- Understand project conventions and coding style + +### 2. Analysis & Reasoning + +Document your findings by explaining: +- What you discovered from code inspection +- Current architecture and technology stack +- Existing patterns and conventions to follow +- Dependencies and integration points +- Potential challenges or considerations +- Why your proposed approach is optimal + +### 3. Plan Creation + +Create a comprehensive implementation plan with: +- **Todo Checklist**: High-level checkpoints at the top for tracking progress +- **Detailed Steps**: Numbered, actionable implementation steps +- **File Changes**: Specific files that need modification +- **Testing Strategy**: How to verify the implementation +- **Dependencies**: Any new packages or tools needed + +## Output Format for `plans/[feature_name].md` + +You **MUST** format the contents of `plans/[feature_name].md` exactly as follows. Use markdown. The feature name should be short and descriptive, also make sure it can be used as a filename. + +```markdown +# Feature Implementation Plan: [feature_name] + +## 📋 Todo Checklist +- [ ] [High-level milestone] +- [ ] [High-level milestone] +- ... +- [ ] Final Review and Testing + +## 🔍 Analysis & Investigation + +### Codebase Structure +[Your findings about the current codebase] + +### Current Architecture +[Architecture analysis and relevant patterns] + +### Dependencies & Integration Points +[External dependencies and how they integrate] + +### Considerations & Challenges +[Potential issues and how to address them] + +## 📝 Implementation Plan + +### Prerequisites +[Any setup or dependencies needed before starting] + +### Step-by-Step Implementation +1. **Step 1**: [Detailed actionable step] + - Files to modify: `path/to/file.ext` + - Changes needed: [specific description] + +2. **Step 2**: [Detailed actionable step] + - Files to modify: `path/to/file.ext` + - Changes needed: [specific description] + +[Continue with all steps...] + +### Testing Strategy +[How to test and verify the implementation] + +## 🎯 Success Criteria +[How to know when the feature is complete and working] +``` + +## Final Steps + +1. Conduct your investigation and analysis +2. Write the complete plan to `plans/[feature_name].md` +3. Confirm the plan has been saved +4. **DO NOT IMPLEMENT THE PLAN** +5. Close the conversation + +Remember: You are in planning mode only. Your job ends after the plan is written to `plans/[feature_name].md`. After finish conversation.""" diff --git a/projects/gemini-cli/.github/actions/post-coverage-comment/action.yml b/projects/gemini-cli/.github/actions/post-coverage-comment/action.yml new file mode 100644 index 0000000000000000000000000000000000000000..6862e6be1f7b70b77f65731c41802fbaccf241ba --- /dev/null +++ b/projects/gemini-cli/.github/actions/post-coverage-comment/action.yml @@ -0,0 +1,114 @@ +name: 'Post Coverage Comment Action' +description: 'Prepares and posts a code coverage comment to a PR.' + +inputs: + cli_json_file: + description: 'Path to CLI coverage-summary.json' + required: true + core_json_file: + description: 'Path to Core coverage-summary.json' + required: true + cli_full_text_summary_file: + description: 'Path to CLI full-text-summary.txt' + required: true + core_full_text_summary_file: + description: 'Path to Core full-text-summary.txt' + required: true + node_version: + description: 'Node.js version for context in messages' + required: true + os: + description: 'The os for context in messages' + required: true + github_token: + description: 'GitHub token for posting comments' + required: true + +runs: + using: 'composite' + steps: + - name: 'Prepare Coverage Comment' + id: 'prep_coverage_comment' + shell: 'bash' + env: + CLI_JSON_FILE: '${{ inputs.cli_json_file }}' + CORE_JSON_FILE: '${{ inputs.core_json_file }}' + CLI_FULL_TEXT_SUMMARY_FILE: '${{ inputs.cli_full_text_summary_file }}' + CORE_FULL_TEXT_SUMMARY_FILE: '${{ inputs.core_full_text_summary_file }}' + COMMENT_FILE: 'coverage-comment.md' + NODE_VERSION: '${{ inputs.node_version }}' + OS: '${{ inputs.os }}' + run: |- + # Extract percentages using jq for the main table + if [ -f "${CLI_JSON_FILE}" ]; then + cli_lines_pct="$(jq -r '.total.lines.pct' "${CLI_JSON_FILE}")" + cli_statements_pct="$(jq -r '.total.statements.pct' "${CLI_JSON_FILE}")" + cli_functions_pct="$(jq -r '.total.functions.pct' "${CLI_JSON_FILE}")" + cli_branches_pct="$(jq -r '.total.branches.pct' "${CLI_JSON_FILE}")" + else + cli_lines_pct="N/A" + cli_statements_pct="N/A" + cli_functions_pct="N/A" + cli_branches_pct="N/A" + echo "CLI coverage-summary.json not found at: ${CLI_JSON_FILE}" >&2 # Error to stderr + fi + + if [ -f "${CORE_JSON_FILE}" ]; then + core_lines_pct="$(jq -r '.total.lines.pct' "${CORE_JSON_FILE}")" + core_statements_pct="$(jq -r '.total.statements.pct' "${CORE_JSON_FILE}")" + core_functions_pct="$(jq -r '.total.functions.pct' "${CORE_JSON_FILE}")" + core_branches_pct="$(jq -r '.total.branches.pct' "${CORE_JSON_FILE}")" + else + core_lines_pct="N/A" + core_statements_pct="N/A" + core_functions_pct="N/A" + core_branches_pct="N/A" + echo "Core coverage-summary.json not found at: ${CORE_JSON_FILE}" >&2 # Error to stderr + fi + + echo "## Code Coverage Summary" > "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" + echo "| Package | Lines | Statements | Functions | Branches |" >> "${COMMENT_FILE}" + echo "|---|---|---|---|---|" >> "${COMMENT_FILE}" + echo "| CLI | ${cli_lines_pct}% | ${cli_statements_pct}% | ${cli_functions_pct}% | ${cli_branches_pct}% |" >> "${COMMENT_FILE}" + echo "| Core | ${core_lines_pct}% | ${core_statements_pct}% | ${core_functions_pct}% | ${core_branches_pct}% |" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" + + # CLI Package - Collapsible Section (with full text summary from file) + echo "
" >> "${COMMENT_FILE}" + echo "CLI Package - Full Text Report" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" + echo '```text' >> "${COMMENT_FILE}" + if [ -f "${CLI_FULL_TEXT_SUMMARY_FILE}" ]; then + cat "${CLI_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}" + else + echo "CLI full-text-summary.txt not found at: ${CLI_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}" + fi + echo '```' >> "${COMMENT_FILE}" + echo "
" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" + + # Core Package - Collapsible Section (with full text summary from file) + echo "
" >> "${COMMENT_FILE}" + echo "Core Package - Full Text Report" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" + echo '```text' >> "${COMMENT_FILE}" + if [ -f "${CORE_FULL_TEXT_SUMMARY_FILE}" ]; then + cat "${CORE_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}" + else + echo "Core full-text-summary.txt not found at: ${CORE_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}" + fi + echo '```' >> "${COMMENT_FILE}" + echo "
" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" + + echo "_For detailed HTML reports, please see the 'coverage-reports-${NODE_VERSION}-${OS}' artifact from the main CI run._" >> "${COMMENT_FILE}" + + - name: 'Post Coverage Comment' + uses: 'thollander/actions-comment-pull-request@65f9e5c9a1f2cd378bd74b2e057c9736982a8e74' # ratchet:thollander/actions-comment-pull-request@v3 + if: |- + ${{ always() }} + with: + file-path: 'coverage-comment.md' # Use the generated file directly + comment-tag: 'code-coverage-summary' + github-token: '${{ inputs.github_token }}' diff --git a/projects/gemini-cli/.github/workflows/community-report.yml b/projects/gemini-cli/.github/workflows/community-report.yml new file mode 100644 index 0000000000000000000000000000000000000000..e0aaf90dbfbdf9e4206773d09d4aef50aac8629d --- /dev/null +++ b/projects/gemini-cli/.github/workflows/community-report.yml @@ -0,0 +1,197 @@ +name: 'Generate Weekly Community Report 📊' + +on: + schedule: + - cron: '0 12 * * 1' # Run at 12:00 UTC on Monday + workflow_dispatch: + inputs: + days: + description: 'Number of days to look back for the report' + required: true + default: '7' + +jobs: + generate-report: + name: 'Generate Report 📝' + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} + runs-on: 'ubuntu-latest' + permissions: + issues: 'write' + pull-requests: 'read' + discussions: 'read' + contents: 'read' + id-token: 'write' + + steps: + - name: 'Generate GitHub App Token 🔑' + id: 'generate_token' + uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2 + with: + app-id: '${{ secrets.APP_ID }}' + private-key: '${{ secrets.PRIVATE_KEY }}' + permission-issues: 'write' + permission-pull-requests: 'read' + permission-discussions: 'read' + permission-contents: 'read' + + - name: 'Generate Report 📜' + id: 'report' + env: + GH_TOKEN: '${{ steps.generate_token.outputs.token }}' + REPO: '${{ github.repository }}' + DAYS: '${{ github.event.inputs.days || 7 }}' + run: |- + set -e + + START_DATE="$(date -u -d "$DAYS days ago" +'%Y-%m-%d')" + END_DATE="$(date -u +'%Y-%m-%d')" + echo "⏳ Generating report for contributions from ${START_DATE} to ${END_DATE}..." + + declare -A author_is_googler + check_googler_status() { + local author="$1" + if [[ "${author}" == *"[bot]" ]]; then + author_is_googler[${author}]=1 + return 1 + fi + if [[ -v "author_is_googler[${author}]" ]]; then + return "${author_is_googler[${author}]}" + fi + + if gh api "orgs/googlers/members/${author}" --silent 2>/dev/null; then + echo "🧑‍💻 ${author} is a Googler." + author_is_googler[${author}]=0 + else + echo "🌍 ${author} is a community contributor." + author_is_googler[${author}]=1 + fi + return "${author_is_googler[${author}]}" + } + + googler_issues=0 + non_googler_issues=0 + googler_prs=0 + non_googler_prs=0 + + echo "🔎 Fetching issues and pull requests..." + ITEMS_JSON="$(gh search issues --repo "${REPO}" "created:>${START_DATE}" --json author,isPullRequest --limit 1000)" + + for row in $(echo "${ITEMS_JSON}" | jq -r '.[] | @base64'); do + _jq() { + echo "${row}" | base64 --decode | jq -r "${1}" + } + author="$(_jq '.author.login')" + is_pr="$(_jq '.isPullRequest')" + + if [[ -z "${author}" || "${author}" == "null" ]]; then + continue + fi + + if check_googler_status "${author}"; then + if [[ "${is_pr}" == "true" ]]; then + ((googler_prs++)) + else + ((googler_issues++)) + fi + else + if [[ "${is_pr}" == "true" ]]; then + ((non_googler_prs++)) + else + ((non_googler_issues++)) + fi + fi + done + + googler_discussions=0 + non_googler_discussions=0 + + echo "🗣️ Fetching discussions..." + DISCUSSION_QUERY=''' + query($q: String!) { + search(query: $q, type: DISCUSSION, first: 100) { + nodes { + ... on Discussion { + author { + login + } + } + } + } + }''' + DISCUSSIONS_JSON="$(gh api graphql -f q="repo:${REPO} created:>${START_DATE}" -f query="${DISCUSSION_QUERY}")" + + for row in $(echo "${DISCUSSIONS_JSON}" | jq -r '.data.search.nodes[] | @base64'); do + _jq() { + echo "${row}" | base64 --decode | jq -r "${1}" + } + author="$(_jq '.author.login')" + + if [[ -z "${author}" || "${author}" == "null" ]]; then + continue + fi + + if check_googler_status "${author}"; then + ((googler_discussions++)) + else + ((non_googler_discussions++)) + fi + done + + echo "✍️ Generating report content..." + TOTAL_ISSUES=$((googler_issues + non_googler_issues)) + TOTAL_PRS=$((googler_prs + non_googler_prs)) + TOTAL_DISCUSSIONS=$((googler_discussions + non_googler_discussions)) + + REPORT_BODY=$(cat <> "${GITHUB_OUTPUT}" + echo "${REPORT_BODY}" >> "${GITHUB_OUTPUT}" + echo "EOF" >> "${GITHUB_OUTPUT}" + + echo "📊 Community Contribution Report:" + echo "${REPORT_BODY}" + + - name: '🤖 Get Insights from Report' + if: |- + ${{ steps.report.outputs.report_body != '' }} + uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0 + env: + GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}' + REPOSITORY: '${{ github.repository }}' + with: + gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}' + gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}' + gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}' + gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}' + gemini_api_key: '${{ secrets.GEMINI_API_KEY }}' + use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}' + use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}' + settings: |- + { + "coreTools": [ + "run_shell_command(gh issue list)", + "run_shell_command(gh pr list)", + "run_shell_command(gh search issues)", + "run_shell_command(gh search prs)" + ] + } + prompt: |- + You are a helpful assistant that analyzes community contribution reports. + Based on the following report, please provide a brief summary and highlight any interesting trends or potential areas for improvement. + + Report: + ${{ steps.report.outputs.report_body }} diff --git a/projects/gemini-cli/.github/workflows/docs-page-action.yml b/projects/gemini-cli/.github/workflows/docs-page-action.yml new file mode 100644 index 0000000000000000000000000000000000000000..2d485278ce03d1268e4d5abb602b1e701acf6398 --- /dev/null +++ b/projects/gemini-cli/.github/workflows/docs-page-action.yml @@ -0,0 +1,50 @@ +name: 'Deploy GitHub Pages' + +on: + push: + tags: 'v*' + workflow_dispatch: + +permissions: + contents: 'read' + pages: 'write' + id-token: 'write' + +# Allow only one concurrent deployment, skipping runs queued between the run +# in-progress and latest queued. However, do NOT cancel in-progress runs as we +# want to allow these production deployments to complete. +concurrency: + group: '${{ github.workflow }}' + cancel-in-progress: false + +jobs: + build: + if: |- + ${{ !contains(github.ref_name, 'nightly') }} + runs-on: 'ubuntu-latest' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + + - name: 'Setup Pages' + uses: 'actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b' # ratchet:actions/configure-pages@v5 + + - name: 'Build with Jekyll' + uses: 'actions/jekyll-build-pages@44a6e6beabd48582f863aeeb6cb2151cc1716697' # ratchet:actions/jekyll-build-pages@v1 + with: + source: './' + destination: './_site' + + - name: 'Upload artifact' + uses: 'actions/upload-pages-artifact@56afc609e74202658d3ffba0e8f6dda462b719fa' # ratchet:actions/upload-pages-artifact@v3 + + deploy: + environment: + name: 'github-pages' + url: '${{ steps.deployment.outputs.page_url }}' + runs-on: 'ubuntu-latest' + needs: 'build' + steps: + - name: 'Deploy to GitHub Pages' + id: 'deployment' + uses: 'actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e' # ratchet:actions/deploy-pages@v4 diff --git a/projects/gemini-cli/.github/workflows/e2e.yml b/projects/gemini-cli/.github/workflows/e2e.yml new file mode 100644 index 0000000000000000000000000000000000000000..3fa9bbc802844d6fa43ff6041fdb9dd2bc24af58 --- /dev/null +++ b/projects/gemini-cli/.github/workflows/e2e.yml @@ -0,0 +1,88 @@ +name: 'E2E Tests' + +on: + push: + branches: + - 'main' + merge_group: + +jobs: + e2e-test-linux: + name: 'E2E Test (Linux) - ${{ matrix.sandbox }}' + runs-on: 'ubuntu-latest' + strategy: + matrix: + sandbox: + - 'sandbox:none' + - 'sandbox:docker' + node-version: + - '20.x' + - '22.x' + - '24.x' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + + - name: 'Set up Node.js ${{ matrix.node-version }}' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 + with: + node-version: '${{ matrix.node-version }}' + cache: 'npm' + + - name: 'Install dependencies' + run: |- + npm ci + + - name: 'Build project' + run: |- + npm run build + + - name: 'Set up Docker' + if: |- + ${{ matrix.sandbox == 'sandbox:docker' }} + uses: 'docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435' # ratchet:docker/setup-buildx-action@v3 + + - name: 'Set up Podman' + if: |- + ${{ matrix.sandbox == 'sandbox:podman' }} + uses: 'redhat-actions/podman-login@4934294ad0449894bcd1e9f191899d7292469603' # ratchet:redhat-actions/podman-login@v1 + with: + registry: 'docker.io' + username: '${{ secrets.DOCKERHUB_USERNAME }}' + password: '${{ secrets.DOCKERHUB_TOKEN }}' + + - name: 'Run E2E tests' + env: + GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}' + KEEP_OUTPUT: 'true' + SANDBOX: '${{ matrix.sandbox }}' + VERBOSE: 'true' + run: |- + npm run "test:integration:${SANDBOX}" + + e2e-test-macos: + name: 'E2E Test - macOS' + runs-on: 'macos-latest' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + + - name: 'Set up Node.js' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 + with: + node-version-file: '.nvmrc' + cache: 'npm' + + - name: 'Install dependencies' + run: |- + npm ci + + - name: 'Build project' + run: |- + npm run build + + - name: 'Run E2E tests' + env: + GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}' + run: |- + npm run test:e2e diff --git a/projects/gemini-cli/.github/workflows/eval.yml b/projects/gemini-cli/.github/workflows/eval.yml new file mode 100644 index 0000000000000000000000000000000000000000..c8a4c6523f6e00eefc90b18d96bd3407e4fd4c55 --- /dev/null +++ b/projects/gemini-cli/.github/workflows/eval.yml @@ -0,0 +1,29 @@ +name: 'Eval' + +on: + workflow_dispatch: + +jobs: + eval: + name: 'Eval' + runs-on: 'ubuntu-latest' + strategy: + matrix: + node-version: + - '20.x' + - '22.x' + - '24.x' + steps: + - name: 'Set up Node.js ${{ matrix.node-version }}' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 + with: + node-version: '${{ matrix.node-version }}' + cache: 'npm' + + - name: 'Set up Python' + uses: 'actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065' # ratchet:actions/setup-python@v5 + with: + python-version: '3.11' + + - name: 'Install and configure Poetry' + uses: 'snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a' # ratchet:snok/install-poetry@v1 diff --git a/projects/gemini-cli/.github/workflows/gemini-automated-issue-dedup.yml b/projects/gemini-cli/.github/workflows/gemini-automated-issue-dedup.yml new file mode 100644 index 0000000000000000000000000000000000000000..b84b5aa94dfedb4ca42e58b9627a182638ecfb4c --- /dev/null +++ b/projects/gemini-cli/.github/workflows/gemini-automated-issue-dedup.yml @@ -0,0 +1,262 @@ +name: '🏷️ Gemini Automated Issue Deduplication' + +on: + issues: + types: + - 'opened' + - 'reopened' + issue_comment: + types: + - 'created' + workflow_dispatch: + inputs: + issue_number: + description: 'issue number to dedup' + required: true + type: 'number' + +concurrency: + group: '${{ github.workflow }}-${{ github.event.issue.number }}' + cancel-in-progress: true + +defaults: + run: + shell: 'bash' + +jobs: + find-duplicates: + if: |- + github.repository == 'google-gemini/gemini-cli' && + vars.TRIAGE_DEDUPLICATE_ISSUES != '' && + (github.event_name == 'issues' || + github.event_name == 'workflow_dispatch' || + (github.event_name == 'issue_comment' && + contains(github.event.comment.body, '@gemini-cli /deduplicate') && + (github.event.comment.author_association == 'OWNER' || + github.event.comment.author_association == 'MEMBER' || + github.event.comment.author_association == 'COLLABORATOR'))) + permissions: + contents: 'read' + id-token: 'write' # Required for WIF, see https://docs.github.com/en/actions/how-tos/secure-your-work/security-harden-deployments/oidc-in-google-cloud-platform#adding-permissions-settings + issues: 'read' + statuses: 'read' + packages: 'read' + timeout-minutes: 20 + runs-on: 'ubuntu-latest' + outputs: + duplicate_issues_csv: '${{ env.DUPLICATE_ISSUES_CSV }}' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + + - name: 'Log in to GitHub Container Registry' + uses: 'docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1' # ratchet:docker/login-action@v3 + with: + registry: 'ghcr.io' + username: '${{ github.actor }}' + password: '${{ secrets.GITHUB_TOKEN }}' + + - name: 'Find Duplicate Issues' + uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0 + id: 'gemini_issue_deduplication' + env: + GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' + ISSUE_TITLE: '${{ github.event.issue.title }}' + ISSUE_BODY: '${{ github.event.issue.body }}' + ISSUE_NUMBER: '${{ github.event.issue.number }}' + REPOSITORY: '${{ github.repository }}' + FIRESTORE_PROJECT: '${{ vars.FIRESTORE_PROJECT }}' + with: + gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}' + gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}' + gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}' + gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}' + gemini_api_key: '${{ secrets.GEMINI_API_KEY }}' + use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}' + use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}' + settings: |- + { + "mcpServers": { + "issue_deduplication": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "--network", "host", + "-e", "GITHUB_TOKEN", + "-e", "GEMINI_API_KEY", + "-e", "DATABASE_TYPE", + "-e", "FIRESTORE_DATABASE_ID", + "-e", "GCP_PROJECT", + "-e", "GOOGLE_APPLICATION_CREDENTIALS=/app/gcp-credentials.json", + "-v", "${GOOGLE_APPLICATION_CREDENTIALS}:/app/gcp-credentials.json", + "ghcr.io/google-gemini/gemini-cli-issue-triage@sha256:e3de1523f6c83aabb3c54b76d08940a2bf42febcb789dd2da6f95169641f94d3" + ], + "env": { + "GITHUB_TOKEN": "${GITHUB_TOKEN}", + "GEMINI_API_KEY": "${{ secrets.GEMINI_API_KEY }}", + "DATABASE_TYPE":"firestore", + "GCP_PROJECT": "${FIRESTORE_PROJECT}", + "FIRESTORE_DATABASE_ID": "(default)", + "GOOGLE_APPLICATION_CREDENTIALS": "${GOOGLE_APPLICATION_CREDENTIALS}" + }, + "enabled": true, + "timeout": 600000 + } + }, + "maxSessionTurns": 25, + "coreTools": [ + "run_shell_command(echo)", + "run_shell_command(gh issue view)" + ], + "telemetry": { + "enabled": true, + "target": "gcp" + } + } + prompt: |- + ## Role + You are an issue de-duplication assistant. Your goal is to find + duplicate issues for a given issue. + ## Steps + 1. **Find Potential Duplicates:** + - The repository is ${{ github.repository }} and the issue number is ${{ github.event.issue.number }}. + - Use the `duplicates` tool with the `repo` and `issue_number` to find potential duplicates for the current issue. Do not use the `threshold` parameter. + - If no duplicates are found, you are done. + - Print the JSON output from the `duplicates` tool to the logs. + 2. **Refine Duplicates List (if necessary):** + - If the `duplicates` tool returns between 1 and 14 results, you must refine the list. + - For each potential duplicate issue, run `gh issue view --json title,body,comments` to fetch its content. + - Also fetch the content of the original issue: `gh issue view "${ISSUE_NUMBER}" --json title,body,comments`. + - Carefully analyze the content (title, body, comments) of the original issue and all potential duplicates. + - It is very important if the comments on either issue mention that they are not duplicates of each other, to treat them as not duplicates. + - Based on your analysis, create a final list containing only the issues you are highly confident are actual duplicates. + - If your final list is empty, you are done. + - Print to the logs if you omitted any potential duplicates based on your analysis. + - If the `duplicates` tool returned 15+ results, use the top 15 matches (based on descending similarity score value) to perform this step. + 3. **Output final duplicates list as CSV:** + - Convert the list of appropriate duplicate issue numbers into a comma-separated list (CSV). If there are no appropriate duplicates, use the empty string. + - Use the "echo" shell command to append the CSV of issue numbers into the filepath referenced by the environment variable "${GITHUB_ENV}": + echo "DUPLICATE_ISSUES_CSV=[DUPLICATE_ISSUES_AS_CSV]" >> "${GITHUB_ENV}" + ## Guidelines + - Only use the `duplicates` and `run_shell_command` tools. + - The `run_shell_command` tool can be used with `gh issue view`. + - Do not download or read media files like images, videos, or links. The `--json` flag for `gh issue view` will prevent this. + - Do not modify the issue content or status. + - Do not add comments or labels. + - Reference all shell variables as "${VAR}" (with quotes and braces). + + add-comment-and-label: + needs: 'find-duplicates' + if: |- + github.repository == 'google-gemini/gemini-cli' && + vars.TRIAGE_DEDUPLICATE_ISSUES != '' && + needs.find-duplicates.outputs.duplicate_issues_csv != '' && + ( + github.event_name == 'issues' || + github.event_name == 'workflow_dispatch' || + ( + github.event_name == 'issue_comment' && + contains(github.event.comment.body, '@gemini-cli /deduplicate') && + ( + github.event.comment.author_association == 'OWNER' || + github.event.comment.author_association == 'MEMBER' || + github.event.comment.author_association == 'COLLABORATOR' + ) + ) + ) + permissions: + issues: 'write' + timeout-minutes: 5 + runs-on: 'ubuntu-latest' + steps: + - name: 'Generate GitHub App Token' + id: 'generate_token' + uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2 + with: + app-id: '${{ secrets.APP_ID }}' + private-key: '${{ secrets.PRIVATE_KEY }}' + permission-issues: 'write' + + - name: 'Comment and Label Duplicate Issue' + uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' + env: + DUPLICATES_OUTPUT: '${{ needs.find-duplicates.outputs.duplicate_issues_csv }}' + with: + github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}' + script: |- + const rawCsv = process.env.DUPLICATES_OUTPUT; + core.info(`Raw duplicates CSV: ${rawCsv}`); + const duplicateIssues = rawCsv.split(',').map(s => s.trim()).filter(s => s); + + if (duplicateIssues.length === 0) { + core.info('No duplicate issues found. Nothing to do.'); + return; + } + + const issueNumber = ${{ github.event.issue.number }}; + + function formatCommentBody(issues, updated = false) { + const header = updated + ? 'Found possible duplicate issues (updated):' + : 'Found possible duplicate issues:'; + const issuesList = issues.map(num => `- #${num}`).join('\n'); + const footer = 'If you believe this is not a duplicate, please remove the `status/possible-duplicate` label.'; + const magicComment = ''; + return `${header}\n\n${issuesList}\n\n${footer}\n${magicComment}`; + } + + const newCommentBody = formatCommentBody(duplicateIssues); + const newUpdatedCommentBody = formatCommentBody(duplicateIssues, true); + + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + }); + + const magicComment = ''; + const existingComment = comments.find(comment => + comment.user.type === 'Bot' && comment.body.includes(magicComment) + ); + + let commentMade = false; + + if (existingComment) { + // To check if lists are same, just compare the formatted bodies without headers. + const existingBodyForCompare = existingComment.body.substring(existingComment.body.indexOf('- #')); + const newBodyForCompare = newCommentBody.substring(newCommentBody.indexOf('- #')); + + if (existingBodyForCompare.trim() !== newBodyForCompare.trim()) { + core.info(`Updating existing comment ${existingComment.id}`); + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: existingComment.id, + body: newUpdatedCommentBody, + }); + commentMade = true; + } else { + core.info('Existing comment is up-to-date. Nothing to do.'); + } + } else { + core.info('Creating new comment.'); + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + body: newCommentBody, + }); + commentMade = true; + } + + if (commentMade) { + core.info('Adding "status/possible-duplicate" label.'); + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + labels: ['status/possible-duplicate'], + }); + } diff --git a/projects/gemini-cli/.github/workflows/gemini-automated-issue-triage.yml b/projects/gemini-cli/.github/workflows/gemini-automated-issue-triage.yml new file mode 100644 index 0000000000000000000000000000000000000000..6672157343c4a7cfae95ba6175ba5d5db12001b0 --- /dev/null +++ b/projects/gemini-cli/.github/workflows/gemini-automated-issue-triage.yml @@ -0,0 +1,332 @@ +name: '🏷️ Gemini Automated Issue Triage' + +on: + issues: + types: + - 'opened' + - 'reopened' + issue_comment: + types: + - 'created' + workflow_dispatch: + inputs: + issue_number: + description: 'issue number to triage' + required: true + type: 'number' + +concurrency: + group: '${{ github.workflow }}-${{ github.event.issue.number || github.event.inputs.issue_number }}' + cancel-in-progress: true + +defaults: + run: + shell: 'bash' + +permissions: + contents: 'read' + id-token: 'write' + issues: 'write' + statuses: 'write' + packages: 'read' + actions: 'write' # Required for cancelling a workflow run + +jobs: + triage-issue: + if: |- + github.repository == 'google-gemini/gemini-cli' && + ( + github.event_name == 'workflow_dispatch' || + ( + (github.event_name == 'issues' || github.event_name == 'issue_comment') && + contains(github.event.issue.labels.*.name, 'status/need-triage') && + (github.event_name != 'issue_comment' || ( + contains(github.event.comment.body, '@gemini-cli /triage') && + (github.event.comment.author_association == 'OWNER' || github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'COLLABORATOR') + )) + ) + ) + timeout-minutes: 5 + runs-on: 'ubuntu-latest' + steps: + - name: 'Get issue data for manual trigger' + id: 'get_issue_data' + if: |- + github.event_name == 'workflow_dispatch' + uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' + with: + github-token: '${{ secrets.GITHUB_TOKEN }}' + script: | + const { data: issue } = await github.rest.issues.get({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: ${{ github.event.inputs.issue_number }}, + }); + core.setOutput('title', issue.title); + core.setOutput('body', issue.body); + core.setOutput('labels', issue.labels.map(label => label.name).join(',')); + return issue; + + - name: 'Check for triage label on manual trigger' + if: |- + github.event_name == 'workflow_dispatch' && !contains(steps.get_issue_data.outputs.labels, 'status/need-triage') + run: | + echo "Issue #${{ github.event.inputs.issue_number }} does not have the 'status/need-triage' label. Stopping workflow." + exit 1 + + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + + - name: 'Generate GitHub App Token' + id: 'generate_token' + uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2 + with: + app-id: '${{ secrets.APP_ID }}' + private-key: '${{ secrets.PRIVATE_KEY }}' + permission-issues: 'write' + + - name: 'Get Repository Labels' + id: 'get_labels' + uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' + with: + github-token: '${{ steps.generate_token.outputs.token }}' + script: |- + const { data: labels } = await github.rest.issues.listLabelsForRepo({ + owner: context.repo.owner, + repo: context.repo.repo, + }); + const labelNames = labels.map(label => label.name); + core.setOutput('available_labels', labelNames.join(',')); + core.info(`Found ${labelNames.length} labels: ${labelNames.join(', ')}`); + return labelNames; + + - name: 'Run Gemini Issue Analysis' + uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0 + id: 'gemini_issue_analysis' + env: + GITHUB_TOKEN: '' # Do not pass any auth token here since this runs on untrusted inputs + ISSUE_TITLE: >- + ${{ github.event_name == 'workflow_dispatch' && steps.get_issue_data.outputs.title || github.event.issue.title }} + ISSUE_BODY: >- + ${{ github.event_name == 'workflow_dispatch' && steps.get_issue_data.outputs.body || github.event.issue.body }} + ISSUE_NUMBER: >- + ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.issue_number || github.event.issue.number }} + REPOSITORY: '${{ github.repository }}' + AVAILABLE_LABELS: '${{ steps.get_labels.outputs.available_labels }}' + with: + gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}' + gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}' + gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}' + gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}' + gemini_api_key: '${{ secrets.GEMINI_API_KEY }}' + use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}' + use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}' + settings: |- + { + "maxSessionTurns": 25, + "coreTools": [ + "run_shell_command(echo)" + ], + "telemetry": { + "enabled": true, + "target": "gcp" + } + } + prompt: |- + ## Role + + You are an issue triage assistant. Analyze the current GitHub issue + and identify the most appropriate existing labels by only using the provided data. Use the available + tools to gather information; do not ask for information to be + provided. Do not remove the following labels titled maintainer, help wanted or good first issue. + + ## Steps + + 1. You are only able to use the echo command. Review the available labels in the environment variable: "${AVAILABLE_LABELS}". + 2. Review the issue title and body provided in the environment variables: "${ISSUE_TITLE}" and "${ISSUE_BODY}". + 3. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*. For area/* and kind/* limit yourself to only the single most applicable label in each case. + 4. If the issue already has area/ label, dont try to change it. Similarly, if the issue already has a kind/ label don't change it. And if the issue already has a priority/ label do not change it for example: + If an issue has area/core and kind/bug you will only add a priority/ label. + Instead if an issue has no labels, you will could add one lable of each kind. + 5. For each issue please check if CLI version is present, this is usually in the output of the /about command and will look like 0.1.5 for anything more than 6 versions older than the most recent should add the status/need-retesting label. + 6. If you see that the issue doesn't look like it has sufficient information recommend the status/need-information label and leave a comment politely requesting the relevant information, eg.. if repro steps are missing request for repro steps. if version information is missing request for version information into the explanation section below. + 7. Output the appropriate labels for this issue in JSON format with explanation, for example: + ``` + {"labels_to_set": ["kind/bug", "priority/p0"], "explanation": "This is a critical bug report affecting main functionality"} + ``` + 8. If the issue cannot be classified using the available labels, output: + ``` + {"labels_to_set": [], "explanation": "Unable to classify this issue with available labels"} + ``` + 9. Use Area definitions mentioned below to help you narrow down issues. + 10. If you think an issue might be a Priority/P0 do not apply the priority/p0 label. Instead apply a status/manual-triage label and include a note in your explanation. + 11. If you are uncertain and have not been able to apply one each of kind/, area/ and priority/ , apply the status/manual-triage label. + + ## Guidelines + + - Only use labels that already exist in the repository + - Do not add comments or modify the issue content + - Triage only the current issue + - Identify only one area/ label + - Identify only one kind/ label + - Identify all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these + - Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario + - Reference all shell variables as "${VAR}" (with quotes and braces) + - Output only valid JSON format + - Do not include any explanation or additional text, just the JSON + + Categorization Guidelines: + P0: Critical / Blocker + - A P0 bug is a catastrophic failure that demands immediate attention. + - To be a P0 it means almost all users are running into this issue and it is blocking users from being able to use the product. + - You would see this in the form of many comments from different developers on the bug. + - It represents a complete showstopper for a significant portion of users or for the development process itself. + Impact: + - Blocks development or testing for the entire team. + - Major security vulnerability that could compromise user data or system integrity. + - Causes data loss or corruption with no workaround. + - Crashes the application or makes a core feature completely unusable for all or most users in a production environment. Will it cause severe quality degration? Is it preventing contributors from contributing to the repository or is it a release blocker? + Qualifier: Is the main function of the software broken? + Example: The gemini auth login command fails with an unrecoverable error, preventing any user from authenticating and using the rest of the CLI. + P1: High + - A P1 bug is a serious issue that significantly degrades the user experience or impacts a core feature. + - While not a complete blocker, it's a major problem that needs a fast resolution. Feature requests are almost never P1. + - Once again this would be affecting many users. + - You would see this in the form of comments from different developers on the bug. + Impact: + - A core feature is broken or behaving incorrectly for a large number of users or large number of use cases. + - Review the bug details and comments to try figure out if this issue affects a large set of use cases or if it's a narrow set of use cases. + - Severe performance degradation making the application frustratingly slow. + - No straightforward workaround exists, or the workaround is difficult and non-obvious. + Qualifier: Is a key feature unusable or giving very wrong results? + Example: Gemini CLI enters a loop when making read-many-files tool call. I am unable to break out of the loop and gemini doesn't follow instructions subsequently. + P2: Medium + - A P2 bug is a moderately impactful issue. It's a noticeable problem but doesn't prevent the use of the software's main functionality. + Impact: + - Affects a non-critical feature or a smaller, specific subset of users. + - An inconvenient but functional workaround is available and easy to execute. + - Noticeable UI/UX problems that don't break functionality but look unprofessional (e.g., elements are misaligned or overlapping). + Qualifier: Is it an annoying but non-blocking problem? + Example: An error message is unclear or contains a typo, causing user confusion but not halting their workflow. + P3: Low + - A P3 bug is a minor, low-impact issue that is trivial or cosmetic. It has little to no effect on the overall functionality of the application. + Impact: + - Minor cosmetic issues like color inconsistencies, typos in documentation, or slight alignment problems on a non-critical page. + - An edge-case bug that is very difficult to reproduce and affects a tiny fraction of users. + Qualifier: Is it a "nice-to-fix" issue? + Example: Spelling mistakes etc. + Things you should know: + - If users are talking about issues where the model gets downgraded from pro to flash then i want you to categorize that as a performance issue + - This product is designed to use different models eg.. using pro, downgrading to flash etc. when users report that they dont expect the model to change those would be categorized as feature requests. + Definition of Areas + area/ux: + - Issues concerning user-facing elements like command usability, interactive features, help docs, and perceived performance. + - I am seeing my screen flicker when using Gemini CLI + - I am seeing the output malformed + - Theme changes aren't taking effect + - My keyboard inputs arent' being recognzied + area/platform: + - Issues related to installation, packaging, OS compatibility (Windows, macOS, Linux), and the underlying CLI framework. + area/background: Issues related to long-running background tasks, daemons, and autonomous or proactive agent features. + area/models: + - i am not getting a response that is reasonable or expected. this can include things like + - I am calling a tool and the tool is not performing as expected. + - i am expecting a tool to be called and it is not getting called , + - Including experience when using + - built-in tools (e.g., web search, code interpreter, read file, writefile, etc..), + - Function calling issues should be under this area + - i am getting responses from the model that are malformed. + - Issues concerning Gemini quality of response and inference, + - Issues talking about unnecessary token consumption. + - Issues talking about Model getting stuck in a loop be watchful as this could be the root cause for issues that otherwise seem like model performance issues. + - Memory compression + - unexpected responses, + - poor quality of generated code + area/tools: + - These are primarily issues related to Model Context Protocol + - These are issues that mention MCP support + - feature requests asking for support for new tools. + area/core: Issues with fundamental components like command parsing, configuration management, session state, and the main API client logic. Introducing multi-modality + area/contribution: Issues related to improving the developer contribution experience, such as CI/CD pipelines, build scripts, and test automation infrastructure. + area/authentication: Issues related to user identity, login flows, API key handling, credential storage, and access token management, unable to sign in selecting wrong authentication path etc.. + area/security-privacy: Issues concerning vulnerability patching, dependency security, data sanitization, privacy controls, and preventing unauthorized data access. + area/extensibility: Issues related to the plugin system, extension APIs, or making the CLI's functionality available in other applications, github actions, ide support etc.. + area/performance: Issues focused on model performance + - Issues with running out of capacity, + - 429 errors etc.. + - could also pertain to latency, + - other general software performance like, memory usage, CPU consumption, and algorithmic efficiency. + - Switching models from one to the other unexpectedly. + + - name: 'Apply Labels to Issue' + if: |- + ${{ steps.gemini_issue_analysis.outputs.summary != '' }} + env: + REPOSITORY: '${{ github.repository }}' + ISSUE_NUMBER: '${{ github.event.issue.number || github.event.inputs.issue_number }}' + LABELS_OUTPUT: '${{ steps.gemini_issue_analysis.outputs.summary }}' + uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' + with: + github-token: '${{ steps.generate_token.outputs.token }}' + script: |- + // Strip code block markers if present + const rawLabels = process.env.LABELS_OUTPUT; + core.info(`Raw labels JSON: ${rawLabels}`); + let parsedLabels; + try { + const jsonMatch = rawLabels.match(/```json\s*([\s\S]*?)\s*```/); + if (!jsonMatch || !jsonMatch[1]) { + throw new Error("Could not find a ```json ... ``` block in the output."); + } + const jsonString = jsonMatch[1].trim(); + parsedLabels = JSON.parse(jsonString); + core.info(`Parsed labels JSON: ${JSON.stringify(parsedLabels)}`); + } catch (err) { + core.setFailed(`Failed to parse labels JSON from Gemini output: ${err.message}\nRaw output: ${rawLabels}`); + return; + } + + const issueNumber = parseInt(process.env.ISSUE_NUMBER); + const explanation = parsedLabels.explanation || ''; + + // Set labels based on triage result + if (parsedLabels.labels_to_set && parsedLabels.labels_to_set.length > 0) { + await github.rest.issues.setLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + labels: parsedLabels.labels_to_set + }); + const explanationInfo = explanation ? ` - ${explanation}` : ''; + core.info(`Successfully set labels for #${issueNumber}: ${parsedLabels.labels_to_set.join(', ')}${explanationInfo}`); + } else { + // If no labels to set, leave the issue as is + const explanationInfo = explanation ? ` - ${explanation}` : ''; + core.info(`No labels to set for #${issueNumber}, leaving as is${explanationInfo}`); + } + + if (explanation) { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + body: explanation, + }); + } + + - name: 'Post Issue Analysis Failure Comment' + if: |- + ${{ failure() && steps.gemini_issue_analysis.outcome == 'failure' }} + env: + ISSUE_NUMBER: '${{ github.event.issue.number || github.event.inputs.issue_number }}' + RUN_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}' + uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' + with: + github-token: '${{ steps.generate_token.outputs.token }}' + script: |- + github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: parseInt(process.env.ISSUE_NUMBER), + body: 'There is a problem with the Gemini CLI issue triaging. Please check the [action logs](${process.env.RUN_URL}) for details.' + }) diff --git a/projects/gemini-cli/.github/workflows/gemini-scheduled-issue-dedup.yml b/projects/gemini-cli/.github/workflows/gemini-scheduled-issue-dedup.yml new file mode 100644 index 0000000000000000000000000000000000000000..9eea5e0aa02f2db4cd838e0270476172f86b88a5 --- /dev/null +++ b/projects/gemini-cli/.github/workflows/gemini-scheduled-issue-dedup.yml @@ -0,0 +1,116 @@ +name: '📋 Gemini Scheduled Issue Deduplication' + +on: + schedule: + - cron: '0 * * * *' # Runs every hour + workflow_dispatch: + +concurrency: + group: '${{ github.workflow }}' + cancel-in-progress: true + +defaults: + run: + shell: 'bash' + +jobs: + refresh-embeddings: + if: |- + ${{ vars.TRIAGE_DEDUPLICATE_ISSUES != '' && github.repository == 'google-gemini/gemini-cli' }} + permissions: + contents: 'read' + id-token: 'write' # Required for WIF, see https://docs.github.com/en/actions/how-tos/secure-your-work/security-harden-deployments/oidc-in-google-cloud-platform#adding-permissions-settings + issues: 'read' + statuses: 'read' + packages: 'read' + timeout-minutes: 20 + runs-on: 'ubuntu-latest' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + + - name: 'Log in to GitHub Container Registry' + uses: 'docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1' # ratchet:docker/login-action@v3 + with: + registry: 'ghcr.io' + username: '${{ github.actor }}' + password: '${{ secrets.GITHUB_TOKEN }}' + + - name: 'Run Gemini Issue Deduplication Refresh' + uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0 + id: 'gemini_refresh_embeddings' + env: + GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' + ISSUE_TITLE: '${{ github.event.issue.title }}' + ISSUE_BODY: '${{ github.event.issue.body }}' + ISSUE_NUMBER: '${{ github.event.issue.number }}' + REPOSITORY: '${{ github.repository }}' + FIRESTORE_PROJECT: '${{ vars.FIRESTORE_PROJECT }}' + with: + gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}' + gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}' + gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}' + gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}' + gemini_api_key: '${{ secrets.GEMINI_API_KEY }}' + use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}' + use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}' + settings: |- + { + "mcpServers": { + "issue_deduplication": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "--network", "host", + "-e", "GITHUB_TOKEN", + "-e", "GEMINI_API_KEY", + "-e", "DATABASE_TYPE", + "-e", "FIRESTORE_DATABASE_ID", + "-e", "GCP_PROJECT", + "-e", "GOOGLE_APPLICATION_CREDENTIALS=/app/gcp-credentials.json", + "-v", "${GOOGLE_APPLICATION_CREDENTIALS}:/app/gcp-credentials.json", + "ghcr.io/google-gemini/gemini-cli-issue-triage@sha256:e3de1523f6c83aabb3c54b76d08940a2bf42febcb789dd2da6f95169641f94d3" + ], + "env": { + "GITHUB_TOKEN": "${GITHUB_TOKEN}", + "GEMINI_API_KEY": "${{ secrets.GEMINI_API_KEY }}", + "DATABASE_TYPE":"firestore", + "GCP_PROJECT": "${FIRESTORE_PROJECT}", + "FIRESTORE_DATABASE_ID": "(default)", + "GOOGLE_APPLICATION_CREDENTIALS": "${GOOGLE_APPLICATION_CREDENTIALS}" + }, + "enabled": true, + "timeout": 600000 + } + }, + "maxSessionTurns": 25, + "coreTools": [ + "run_shell_command(echo)" + ], + "telemetry": { + "enabled": true, + "target": "gcp" + } + } + prompt: |- + ## Role + + You are a database maintenance assistant for a GitHub issue deduplication system. + + ## Goal + + Your sole responsibility is to refresh the embeddings for all open issues in the repository to ensure the deduplication database is up-to-date. + + ## Steps + + 1. **Extract Repository Information:** The repository is ${{ github.repository }}. + 2. **Refresh Embeddings:** Call the `refresh` tool with the correct `repo`. Do not use the `force` parameter. + 3. **Log Output:** Print the JSON output from the `refresh` tool to the logs. + + ## Guidelines + + - Only use the `refresh` tool. + - Do not attempt to find duplicates or modify any issues. + - Your only task is to call the `refresh` tool and log its output. diff --git a/projects/gemini-cli/.github/workflows/gemini-scheduled-issue-triage.yml b/projects/gemini-cli/.github/workflows/gemini-scheduled-issue-triage.yml new file mode 100644 index 0000000000000000000000000000000000000000..76ced7c34f82b7946ea3155a0f42f0575c272534 --- /dev/null +++ b/projects/gemini-cli/.github/workflows/gemini-scheduled-issue-triage.yml @@ -0,0 +1,330 @@ +name: '📋 Gemini Scheduled Issue Triage' + +on: + schedule: + - cron: '0 * * * *' # Runs every hour + workflow_dispatch: + +concurrency: + group: '${{ github.workflow }}' + cancel-in-progress: true + +defaults: + run: + shell: 'bash' + +permissions: + id-token: 'write' + issues: 'write' + +jobs: + triage-issues: + timeout-minutes: 10 + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} + runs-on: 'ubuntu-latest' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + + - name: 'Generate GitHub App Token' + id: 'generate_token' + uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2 + with: + app-id: '${{ secrets.APP_ID }}' + private-key: '${{ secrets.PRIVATE_KEY }}' + permission-issues: 'write' + + - name: 'Find untriaged issues' + id: 'find_issues' + env: + GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}' + GITHUB_REPOSITORY: '${{ github.repository }}' + run: |- + set -euo pipefail + + echo '🔍 Finding issues without labels...' + NO_LABEL_ISSUES="$(gh issue list --repo "${GITHUB_REPOSITORY}" \ + --search 'is:open is:issue no:label' --json number,title,body)" + + echo '🏷️ Finding issues that need triage...' + NEED_TRIAGE_ISSUES="$(gh issue list --repo "${GITHUB_REPOSITORY}" \ + --search "is:open is:issue label:\"status/need-triage\"" --limit 1000 --json number,title,body)" + + echo '🔄 Merging and deduplicating issues...' + ISSUES="$(echo "${NO_LABEL_ISSUES}" "${NEED_TRIAGE_ISSUES}" | jq -c -s 'add | unique_by(.number)')" + + echo '📝 Setting output for GitHub Actions...' + echo "issues_to_triage=${ISSUES}" >> "${GITHUB_OUTPUT}" + + ISSUE_COUNT="$(echo "${ISSUES}" | jq 'length')" + echo "✅ Found ${ISSUE_COUNT} issues to triage! 🎯" + + - name: 'Get Repository Labels' + id: 'get_labels' + uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' + with: + github-token: '${{ steps.generate_token.outputs.token }}' + script: |- + const { data: labels } = await github.rest.issues.listLabelsForRepo({ + owner: context.repo.owner, + repo: context.repo.repo, + }); + const labelNames = labels.map(label => label.name); + core.setOutput('available_labels', labelNames.join(',')); + core.info(`Found ${labelNames.length} labels: ${labelNames.join(', ')}`); + return labelNames; + + - name: 'Run Gemini Issue Analysis' + if: |- + ${{ steps.find_issues.outputs.issues_to_triage != '[]' }} + uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0 + id: 'gemini_issue_analysis' + env: + GITHUB_TOKEN: '' # Do not pass any auth token here since this runs on untrusted inputs + ISSUES_TO_TRIAGE: '${{ steps.find_issues.outputs.issues_to_triage }}' + REPOSITORY: '${{ github.repository }}' + AVAILABLE_LABELS: '${{ steps.get_labels.outputs.available_labels }}' + with: + gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}' + gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}' + gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}' + gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}' + gemini_api_key: '${{ secrets.GEMINI_API_KEY }}' + use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}' + use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}' + settings: |- + { + "maxSessionTurns": 25, + "coreTools": [ + "run_shell_command(echo)" + ], + "telemetry": { + "enabled": true, + "target": "gcp" + } + } + prompt: |- + ## Role + + You are an issue triage assistant. Analyze issues and identify + appropriate labels. Use the available tools to gather information; + do not ask for information to be provided. + + ## Steps + + 1. You are only able to use the echo command. Review the available labels in the environment variable: "${AVAILABLE_LABELS}". + 2. Check environment variable for issues to triage: $ISSUES_TO_TRIAGE (JSON array of issues) + 3. Review the issue title, body and any comments provided in the environment variables. + 4. Identify the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*. + 5. If the issue already has area/ label, dont try to change it. Similarly, if the issue already has a kind/ label don't change it. And if the issue already has a priority/ label do not change it for example: + If an issue has area/core and kind/bug you will only add a priority/ label. + Instead if an issue has no labels, you will could add one lable of each kind. + 6. Identify other applicable labels based on the issue content, such as status/*, help wanted, good first issue, etc. + 7. For area/* and kind/* limit yourself to only the single most applicable label in each case. + 8. Give me a single short explanation about why you are selecting each label in the process. + 9. Output a JSON array of objects, each containing the issue number + and the labels to add and remove, along with an explanation. For example: + ``` + [ + { + "issue_number": 123, + "labels_to_add": ["kind/bug", "priority/p2"], + "labels_to_remove": ["status/need-triage"], + "explanation": "This issue is a bug that needs to be addressed with medium priority." + }, + { + "issue_number": 456, + "labels_to_add": ["kind/enhancement"], + "labels_to_remove": [], + "explanation": "This issue is an enhancement request that could improve the user experience." + } + ] + ``` + If an issue cannot be classified, do not include it in the output array. + 10. For each issue please check if CLI version is present, this is usually in the output of the /about command and will look like 0.1.5 + - Anything more than 6 versions older than the most recent should add the status/need-retesting label + 11. If you see that the issue doesn't look like it has sufficient information recommend the status/need-information label and leave a comment politely requesting the relevant information, eg.. if repro steps are missing request for repro steps. if version information is missing request for version information into the explanation section below. + - After identifying appropriate labels to an issue, add "status/need-triage" label to labels_to_remove in the output. + 12. If you think an issue might be a Priority/P0 do not apply the priority/p0 label. Instead apply a status/manual-triage label and include a note in your explanation. + 13. If you are uncertain and have not been able to apply one each of kind/, area/ and priority/ , apply the status/manual-triage label. + + ## Guidelines + + - Output only valid JSON format + - Do not include any explanation or additional text, just the JSON + - Only use labels that already exist in the repository. + - Do not add comments or modify the issue content. + - Do not remove the following labels maintainer, help wanted or good first issue. + - Triage only the current issue. + - Identify only one area/ label + - Identify only one kind/ label (Do not apply kind/duplicate or kind/parent-issue) + - Identify all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these. + - Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario. + Categorization Guidelines: + P0: Critical / Blocker + - A P0 bug is a catastrophic failure that demands immediate attention. + - To be a P0 it means almost all users are running into this issue and it is blocking users from being able to use the product. + - You would see this in the form of many comments from different developers on the bug. + - It represents a complete showstopper for a significant portion of users or for the development process itself. + Impact: + - Blocks development or testing for the entire team. + - Major security vulnerability that could compromise user data or system integrity. + - Causes data loss or corruption with no workaround. + - Crashes the application or makes a core feature completely unusable for all or most users in a production environment. Will it cause severe quality degration? + - Is it preventing contributors from contributing to the repository or is it a release blocker? + Qualifier: Is the main function of the software broken? + Example: The gemini auth login command fails with an unrecoverable error, preventing any user from authenticating and using the rest of the CLI. + P1: High + - A P1 bug is a serious issue that significantly degrades the user experience or impacts a core feature. + - While not a complete blocker, it's a major problem that needs a fast resolution. Feature requests are almost never P1. + - Once again this would be affecting many users. + - You would see this in the form of comments from different developers on the bug. + Impact: + - A core feature is broken or behaving incorrectly for a large number of users or large number of use cases. + - Review the bug details and comments to try figure out if this issue affects a large set of use cases or if it's a narrow set of use cases. + - Severe performance degradation making the application frustratingly slow. + - No straightforward workaround exists, or the workaround is difficult and non-obvious. + Qualifier: Is a key feature unusable or giving very wrong results? + Example: Gemini CLI enters a loop when making read-many-files tool call. I am unable to break out of the loop and gemini doesn't follow instructions subsequently. + P2: Medium + - A P2 bug is a moderately impactful issue. It's a noticeable problem but doesn't prevent the use of the software's main functionality. + Impact: + - Affects a non-critical feature or a smaller, specific subset of users. + - An inconvenient but functional workaround is available and easy to execute. + - Noticeable UI/UX problems that don't break functionality but look unprofessional (e.g., elements are misaligned or overlapping). + Qualifier: Is it an annoying but non-blocking problem? + Example: An error message is unclear or contains a typo, causing user confusion but not halting their workflow. + P3: Low + - A P3 bug is a minor, low-impact issue that is trivial or cosmetic. It has little to no effect on the overall functionality of the application. + Impact: + - Minor cosmetic issues like color inconsistencies, typos in documentation, or slight alignment problems on a non-critical page. + - An edge-case bug that is very difficult to reproduce and affects a tiny fraction of users. + Qualifier: Is it a "nice-to-fix" issue? + Example: Spelling mistakes etc. + Additional Context: + - If users are talking about issues where the model gets downgraded from pro to flash then i want you to categorize that as a performance issue + - This product is designed to use different models eg.. using pro, downgrading to flash etc. + - When users report that they dont expect the model to change those would be categorized as feature requests. + Definition of Areas + area/ux: + - Issues concerning user-facing elements like command usability, interactive features, help docs, and perceived performance. + - I am seeing my screen flicker when using Gemini CLI + - I am seeing the output malformed + - Theme changes aren't taking effect + - My keyboard inputs arent' being recognzied + area/platform: + - Issues related to installation, packaging, OS compatibility (Windows, macOS, Linux), and the underlying CLI framework. + area/background: Issues related to long-running background tasks, daemons, and autonomous or proactive agent features. + area/models: + - i am not getting a response that is reasonable or expected. this can include things like + - I am calling a tool and the tool is not performing as expected. + - i am expecting a tool to be called and it is not getting called , + - Including experience when using + - built-in tools (e.g., web search, code interpreter, read file, writefile, etc..), + - Function calling issues should be under this area + - i am getting responses from the model that are malformed. + - Issues concerning Gemini quality of response and inference, + - Issues talking about unnecessary token consumption. + - Issues talking about Model getting stuck in a loop be watchful as this could be the root cause for issues that otherwise seem like model performance issues. + - Memory compression + - unexpected responses, + - poor quality of generated code + area/tools: + - These are primarily issues related to Model Context Protocol + - These are issues that mention MCP support + - feature requests asking for support for new tools. + area/core: + - Issues with fundamental components like command parsing, configuration management, session state, and the main API client logic. Introducing multi-modality + area/contribution: + - Issues related to improving the developer contribution experience, such as CI/CD pipelines, build scripts, and test automation infrastructure. + area/authentication: + - Issues related to user identity, login flows, API key handling, credential storage, and access token management, unable to sign in selecting wrong authentication path etc.. + area/security-privacy: + - Issues concerning vulnerability patching, dependency security, data sanitization, privacy controls, and preventing unauthorized data access. + area/extensibility: + - Issues related to the plugin system, extension APIs, or making the CLI's functionality available in other applications, github actions, ide support etc.. + area/performance: + - Issues focused on model performance + - Issues with running out of capacity, + - 429 errors etc.. + - could also pertain to latency, + - other general software performance like, memory usage, CPU consumption, and algorithmic efficiency. + - Switching models from one to the other unexpectedly. + + - name: 'Apply Labels to Issues' + if: |- + ${{ steps.gemini_issue_analysis.outcome == 'success' && + steps.gemini_issue_analysis.outputs.summary != '[]' }} + env: + REPOSITORY: '${{ github.repository }}' + LABELS_OUTPUT: '${{ steps.gemini_issue_analysis.outputs.summary }}' + uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' + with: + github-token: '${{ steps.generate_token.outputs.token }}' + script: |- + const rawLabels = process.env.LABELS_OUTPUT; + core.info(`Raw labels JSON: ${rawLabels}`); + let parsedLabels; + try { + const jsonMatch = rawLabels.match(/```json\s*([\s\S]*?)\s*```/); + if (!jsonMatch || !jsonMatch[1]) { + throw new Error("Could not find a ```json ... ``` block in the output."); + } + const jsonString = jsonMatch[1].trim(); + parsedLabels = JSON.parse(jsonString); + core.info(`Parsed labels JSON: ${JSON.stringify(parsedLabels)}`); + } catch (err) { + core.setFailed(`Failed to parse labels JSON from Gemini output: ${err.message}\nRaw output: ${rawLabels}`); + return; + } + + for (const entry of parsedLabels) { + const issueNumber = entry.issue_number; + if (!issueNumber) { + core.info(`Skipping entry with no issue number: ${JSON.stringify(entry)}`); + continue; + } + + if (entry.labels_to_add && entry.labels_to_add.length > 0) { + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + labels: entry.labels_to_add + }); + const explanation = entry.explanation ? ` - ${entry.explanation}` : ''; + core.info(`Successfully added labels for #${issueNumber}: ${entry.labels_to_add.join(', ')}${explanation}`); + } + + if (entry.labels_to_remove && entry.labels_to_remove.length > 0) { + for (const label of entry.labels_to_remove) { + try { + await github.rest.issues.removeLabel({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + name: label + }); + } catch (error) { + if (error.status !== 404) { + throw error; + } + } + } + core.info(`Successfully removed labels for #${issueNumber}: ${entry.labels_to_remove.join(', ')}`); + } + + if (entry.explanation) { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + body: entry.explanation, + }); + } + + if ((!entry.labels_to_add || entry.labels_to_add.length === 0) && (!entry.labels_to_remove || entry.labels_to_remove.length === 0)) { + core.info(`No labels to add or remove for #${issueNumber}, leaving as is`); + } + } diff --git a/projects/gemini-cli/.github/workflows/gemini-scheduled-pr-triage.yml b/projects/gemini-cli/.github/workflows/gemini-scheduled-pr-triage.yml new file mode 100644 index 0000000000000000000000000000000000000000..007b8daa3f1c4b7116daa60eb61c9d88e26b987a --- /dev/null +++ b/projects/gemini-cli/.github/workflows/gemini-scheduled-pr-triage.yml @@ -0,0 +1,41 @@ +name: 'Gemini Scheduled PR Triage 🚀' + +on: + schedule: + - cron: '*/15 * * * *' # Runs every 15 minutes + workflow_dispatch: + +jobs: + audit-prs: + timeout-minutes: 15 + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} + permissions: + contents: 'read' + id-token: 'write' + issues: 'write' + pull-requests: 'write' + runs-on: 'ubuntu-latest' + outputs: + prs_needing_comment: '${{ steps.run_triage.outputs.prs_needing_comment }}' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + + - name: 'Generate GitHub App Token' + id: 'generate_token' + uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2 + with: + app-id: '${{ secrets.APP_ID }}' + private-key: '${{ secrets.PRIVATE_KEY }}' + permission-issues: 'write' + permission-pull-requests: 'write' + + - name: 'Run PR Triage Script' + id: 'run_triage' + shell: 'bash' + env: + GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}' + GITHUB_REPOSITORY: '${{ github.repository }}' + run: |- + ./.github/scripts/pr-triage.sh diff --git a/projects/gemini-cli/.github/workflows/gemini-self-assign-issue.yml b/projects/gemini-cli/.github/workflows/gemini-self-assign-issue.yml new file mode 100644 index 0000000000000000000000000000000000000000..3ee0c757a245e374ae3b4315df2e7e07d5ce71ce --- /dev/null +++ b/projects/gemini-cli/.github/workflows/gemini-self-assign-issue.yml @@ -0,0 +1,98 @@ +name: 'Assign Issue on Comment' + +on: + issue_comment: + types: + - 'created' + +concurrency: + group: '${{ github.workflow }}-${{ github.event.issue.number }}' + cancel-in-progress: true + +defaults: + run: + shell: 'bash' + +permissions: + contents: 'read' + id-token: 'write' + issues: 'write' + statuses: 'write' + packages: 'read' + +jobs: + self-assign-issue: + if: |- + github.repository == 'google-gemini/gemini-cli' && + github.event_name == 'issue_comment' && + contains(github.event.comment.body, '/assign') + runs-on: 'ubuntu-latest' + steps: + - name: 'Generate GitHub App Token' + id: 'generate_token' + uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' + with: + app-id: '${{ secrets.APP_ID }}' + private-key: '${{ secrets.PRIVATE_KEY }}' + # Add 'assignments' write permission + permission-issues: 'write' + + - name: 'Assign issue to user' + uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' + with: + github-token: '${{ steps.generate_token.outputs.token }}' + script: | + const issueNumber = context.issue.number; + const commenter = context.actor; + const owner = context.repo.owner; + const repo = context.repo.repo; + const MAX_ISSUES_ASSIGNED = 3; + + // Search for open issues already assigned to the commenter in this repo + const { data: assignedIssues } = await github.rest.search.issuesAndPullRequests({ + q: `is:issue repo:${owner}/${repo} assignee:${commenter} is:open` + }); + + if (assignedIssues.total_count >= MAX_ISSUES_ASSIGNED) { + await github.rest.issues.createComment({ + owner: owner, + repo: repo, + issue_number: issueNumber, + body: `👋 @${commenter}! You currently have ${assignedIssues.total_count} issues assigned to you. We have a ${MAX_ISSUES_ASSIGNED} max issues assigned at once policy. Once you close out an existing issue it will open up space to take another. You can also unassign yourself from an existing issue but please work on a hand-off if someone is expecting work on that issue.` + }); + return; // exit + } + + // Check if the issue is already assigned + const issue = await github.rest.issues.get({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + }); + + if (issue.data.assignees.length > 0) { + // Comment that it's already assigned + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + body: `@${commenter} Thanks for taking interest but this issue is already assigned. We'd still love to have you contribute. Check out our [Help Wanted](https://github.com/google-gemini/gemini-cli/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22help%20wanted%22) list for issues where we need some extra attention.` + }); + return; + } + + // If not taken, assign the user who commented + await github.rest.issues.addAssignees({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + assignees: [commenter] + }); + + // Post a comment to confirm assignment + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + body: `👋 @${commenter}, you've been assigned to this issue! Thank you for taking the time to contribute. Make sure to check out our [contributing guidelines](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md).` + }); diff --git a/projects/gemini-cli/.github/workflows/no-response.yml b/projects/gemini-cli/.github/workflows/no-response.yml new file mode 100644 index 0000000000000000000000000000000000000000..abaad9dbbfed186444a379c8a144ce40a51721c9 --- /dev/null +++ b/projects/gemini-cli/.github/workflows/no-response.yml @@ -0,0 +1,33 @@ +name: 'No Response' + +# Run as a daily cron at 1:45 AM +on: + schedule: + - cron: '45 1 * * *' + workflow_dispatch: + +jobs: + no-response: + runs-on: 'ubuntu-latest' + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} + permissions: + issues: 'write' + pull-requests: 'write' + concurrency: + group: '${{ github.workflow }}-no-response' + cancel-in-progress: true + steps: + - uses: 'actions/stale@5bef64f19d7facfb25b37b414482c7164d639639' # ratchet:actions/stale@v9 + with: + repo-token: '${{ secrets.GITHUB_TOKEN }}' + days-before-stale: -1 + days-before-close: 14 + stale-issue-label: 'status/need-information' + close-issue-message: >- + This issue was marked as needing more information and has not received a response in 14 days. + Closing it for now. If you still face this problem, feel free to reopen with more details. Thank you! + stale-pr-label: 'status/need-information' + close-pr-message: >- + This pull request was marked as needing more information and has had no updates in 14 days. + Closing it for now. You are welcome to reopen with the required info. Thanks for contributing! diff --git a/projects/gemini-cli/.github/workflows/release.yml b/projects/gemini-cli/.github/workflows/release.yml new file mode 100644 index 0000000000000000000000000000000000000000..9774f2e66c752ab0db7abc3e24b88db93aaa22f9 --- /dev/null +++ b/projects/gemini-cli/.github/workflows/release.yml @@ -0,0 +1,231 @@ +name: 'Release' + +on: + schedule: + # Runs every day at midnight UTC for the nightly release. + - cron: '0 0 * * *' + # Runs every Tuesday at 23:59 UTC for the preview release. + - cron: '59 23 * * 2' + workflow_dispatch: + inputs: + version: + description: 'The version to release (e.g., v0.1.11). Required for manual patch releases.' + required: false # Not required for scheduled runs + type: 'string' + ref: + description: 'The branch or ref (full git sha) to release from.' + required: true + type: 'string' + default: 'main' + dry_run: + description: 'Run a dry-run of the release process; no branches, npm packages or GitHub releases will be created.' + required: true + type: 'boolean' + default: true + create_nightly_release: + description: 'Auto apply the nightly release tag, input version is ignored.' + required: false + type: 'boolean' + default: false + create_preview_release: + description: 'Auto apply the preview release tag, input version is ignored.' + required: false + type: 'boolean' + default: false + force_skip_tests: + description: 'Select to skip the "Run Tests" step in testing. Prod releases should run tests' + required: false + type: 'boolean' + default: false + +jobs: + release: + runs-on: 'ubuntu-latest' + environment: + name: 'production-release' + url: '${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ steps.version.outputs.RELEASE_TAG }}' + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} + permissions: + contents: 'write' + packages: 'write' + id-token: 'write' + issues: 'write' # For creating issues on failure + outputs: + RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}' + + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + with: + ref: '${{ github.event.inputs.ref || github.sha }}' + fetch-depth: 0 + + - name: 'Set booleans for simplified logic' + env: + CREATE_NIGHTLY_RELEASE: '${{ github.event.inputs.create_nightly_release }}' + CREATE_PREVIEW_RELEASE: '${{ github.event.inputs.create_preview_release }}' + EVENT_NAME: '${{ github.event_name }}' + CRON: '${{ github.event.schedule }}' + DRY_RUN_INPUT: '${{ github.event.inputs.dry_run }}' + id: 'vars' + run: |- + is_nightly="false" + if [[ "${CRON}" == "0 0 * * *" || "${CREATE_NIGHTLY_RELEASE}" == "true" ]]; then + is_nightly="true" + fi + echo "is_nightly=${is_nightly}" >> "${GITHUB_OUTPUT}" + + is_preview="false" + if [[ "${CRON}" == "59 23 * * 2" || "${CREATE_PREVIEW_RELEASE}" == "true" ]]; then + is_preview="true" + fi + echo "is_preview=${is_preview}" >> "${GITHUB_OUTPUT}" + + is_dry_run="false" + if [[ "${DRY_RUN_INPUT}" == "true" ]]; then + is_dry_run="true" + fi + echo "is_dry_run=${is_dry_run}" >> "${GITHUB_OUTPUT}" + + - name: 'Setup Node.js' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 + with: + node-version-file: '.nvmrc' + cache: 'npm' + + - name: 'Install Dependencies' + run: |- + npm ci + + - name: 'Get the version' + id: 'version' + env: + GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' + IS_NIGHTLY: '${{ steps.vars.outputs.is_nightly }}' + IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}' + MANUAL_VERSION: '${{ inputs.version }}' + run: |- + VERSION_JSON="$(node scripts/get-release-version.js)" + echo "RELEASE_TAG=$(echo "${VERSION_JSON}" | jq -r .releaseTag)" >> "${GITHUB_OUTPUT}" + echo "RELEASE_VERSION=$(echo "${VERSION_JSON}" | jq -r .releaseVersion)" >> "${GITHUB_OUTPUT}" + echo "NPM_TAG=$(echo "${VERSION_JSON}" | jq -r .npmTag)" >> "${GITHUB_OUTPUT}" + echo "PREVIOUS_TAG=$(echo "${VERSION_JSON}" | jq -r .previousReleaseTag)" >> "${GITHUB_OUTPUT}" + + - name: 'Run Tests' + if: |- + ${{ github.event.inputs.force_skip_tests != 'true' }} + env: + GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}' + run: |- + npm run preflight + npm run test:integration:sandbox:none + npm run test:integration:sandbox:docker + + - name: 'Configure Git User' + run: |- + git config user.name "gemini-cli-robot" + git config user.email "gemini-cli-robot@google.com" + + - name: 'Create and switch to a release branch' + id: 'release_branch' + env: + RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}' + run: |- + BRANCH_NAME="release/${RELEASE_TAG}" + git switch -c "${BRANCH_NAME}" + echo "BRANCH_NAME=${BRANCH_NAME}" >> "${GITHUB_OUTPUT}" + + - name: 'Update package versions' + env: + RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}' + run: |- + npm run release:version "${RELEASE_VERSION}" + + - name: 'Commit and Conditionally Push package versions' + env: + BRANCH_NAME: '${{ steps.release_branch.outputs.BRANCH_NAME }}' + IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}' + RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}' + run: |- + git add package.json package-lock.json packages/*/package.json + git commit -m "chore(release): ${RELEASE_TAG}" + if [[ "${IS_DRY_RUN}" == "false" ]]; then + echo "Pushing release branch to remote..." + git push --set-upstream origin "${BRANCH_NAME}" --follow-tags + else + echo "Dry run enabled. Skipping push." + fi + + - name: 'Build and Prepare Packages' + run: |- + npm run build:packages + npm run prepare:package + + - name: 'Configure npm for publishing' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 + with: + node-version-file: '.nvmrc' + registry-url: 'https://wombat-dressing-room.appspot.com' + scope: '@google' + + - name: 'Publish @google/gemini-cli-core' + env: + IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}' + NODE_AUTH_TOKEN: '${{ secrets.WOMBAT_TOKEN_CORE }}' + NPM_TAG: '${{ steps.version.outputs.NPM_TAG }}' + run: |- + npm publish \ + --dry-run="${IS_DRY_RUN}" \ + --workspace="@google/gemini-cli-core" \ + --tag="${NPM_TAG}" + + - name: 'Install latest core package' + if: |- + ${{ steps.vars.outputs.is_dry_run == 'false' }} + env: + RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}' + run: |- + npm install "@google/gemini-cli-core@${RELEASE_VERSION}" \ + --workspace="@google/gemini-cli" \ + --save-exact + + - name: 'Publish @google/gemini-cli' + env: + IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}' + NODE_AUTH_TOKEN: '${{ secrets.WOMBAT_TOKEN_CLI }}' + NPM_TAG: '${{ steps.version.outputs.NPM_TAG }}' + run: |- + npm publish \ + --dry-run="${IS_DRY_RUN}" \ + --workspace="@google/gemini-cli" \ + --tag="${NPM_TAG}" + + - name: 'Create GitHub Release and Tag' + if: |- + ${{ steps.vars.outputs.is_dry_run == 'false' }} + env: + GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' + RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}' + RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}' + PREVIOUS_TAG: '${{ steps.version.outputs.PREVIOUS_TAG }}' + run: |- + gh release create "${RELEASE_TAG}" \ + bundle/gemini.js \ + --target "$RELEASE_BRANCH" \ + --title "Release ${RELEASE_TAG}" \ + --notes-start-tag "$PREVIOUS_TAG" \ + --generate-notes + + - name: 'Create Issue on Failure' + if: |- + ${{ failure() }} + env: + GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' + RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }} || "N/A"' + DETAILS_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}' + run: |- + gh issue create \ + --title "Release Failed for ${RELEASE_TAG} on $(date +'%Y-%m-%d')" \ + --body "The release workflow failed. See the full run for details: ${DETAILS_URL}" \ + --label "kind/bug,release-failure,priority/p0" diff --git a/projects/gemini-cli/.github/workflows/stale.yml b/projects/gemini-cli/.github/workflows/stale.yml new file mode 100644 index 0000000000000000000000000000000000000000..87354b5706f820e15be322135d84cb8557656f83 --- /dev/null +++ b/projects/gemini-cli/.github/workflows/stale.yml @@ -0,0 +1,39 @@ +name: 'Mark stale issues and pull requests' + +# Run as a daily cron at 1:30 AM +on: + schedule: + - cron: '30 1 * * *' + workflow_dispatch: + +jobs: + stale: + runs-on: 'ubuntu-latest' + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} + permissions: + issues: 'write' + pull-requests: 'write' + concurrency: + group: '${{ github.workflow }}-stale' + cancel-in-progress: true + steps: + - uses: 'actions/stale@5bef64f19d7facfb25b37b414482c7164d639639' # ratchet:actions/stale@v9 + with: + repo-token: '${{ secrets.GITHUB_TOKEN }}' + stale-issue-message: >- + This issue has been automatically marked as stale due to 60 days of inactivity. + It will be closed in 14 days if no further activity occurs. + stale-pr-message: >- + This pull request has been automatically marked as stale due to 60 days of inactivity. + It will be closed in 14 days if no further activity occurs. + close-issue-message: >- + This issue has been closed due to 14 additional days of inactivity after being marked as stale. + If you believe this is still relevant, feel free to comment or reopen the issue. Thank you! + close-pr-message: >- + This pull request has been closed due to 14 additional days of inactivity after being marked as stale. + If this is still relevant, you are welcome to reopen or leave a comment. Thanks for contributing! + days-before-stale: 60 + days-before-close: 14 + exempt-issue-labels: 'pinned,security' + exempt-pr-labels: 'pinned,security' diff --git a/projects/gemini-cli/docs/assets/connected_devtools.png b/projects/gemini-cli/docs/assets/connected_devtools.png new file mode 100644 index 0000000000000000000000000000000000000000..2e91e7563ff42694b16a65013216259c1c5816f7 Binary files /dev/null and b/projects/gemini-cli/docs/assets/connected_devtools.png differ diff --git a/projects/gemini-cli/docs/assets/gemini-screenshot.png b/projects/gemini-cli/docs/assets/gemini-screenshot.png new file mode 100644 index 0000000000000000000000000000000000000000..7f1932363cb695db337d1f2e932b098ba9d7ced4 Binary files /dev/null and b/projects/gemini-cli/docs/assets/gemini-screenshot.png differ diff --git a/projects/gemini-cli/docs/assets/release_patch.png b/projects/gemini-cli/docs/assets/release_patch.png new file mode 100644 index 0000000000000000000000000000000000000000..eb0f4f20ed4920d37671e87b9235d5965883cc77 Binary files /dev/null and b/projects/gemini-cli/docs/assets/release_patch.png differ diff --git a/projects/gemini-cli/docs/assets/theme-ansi-light.png b/projects/gemini-cli/docs/assets/theme-ansi-light.png new file mode 100644 index 0000000000000000000000000000000000000000..d8551bb4b2f7e3d815aa2c7b79d135961ca92135 Binary files /dev/null and b/projects/gemini-cli/docs/assets/theme-ansi-light.png differ diff --git a/projects/gemini-cli/docs/assets/theme-ansi.png b/projects/gemini-cli/docs/assets/theme-ansi.png new file mode 100644 index 0000000000000000000000000000000000000000..f77e44a7bffa22789f6515725a26ccf4ab13962e Binary files /dev/null and b/projects/gemini-cli/docs/assets/theme-ansi.png differ diff --git a/projects/gemini-cli/docs/assets/theme-atom-one.png b/projects/gemini-cli/docs/assets/theme-atom-one.png new file mode 100644 index 0000000000000000000000000000000000000000..945c2ea35276052cc16638e123577a24f618c2c5 Binary files /dev/null and b/projects/gemini-cli/docs/assets/theme-atom-one.png differ diff --git a/projects/gemini-cli/docs/assets/theme-ayu-light.png b/projects/gemini-cli/docs/assets/theme-ayu-light.png new file mode 100644 index 0000000000000000000000000000000000000000..a1ff5c71998c3481adc1dd1b23b58d4aaf2ed152 Binary files /dev/null and b/projects/gemini-cli/docs/assets/theme-ayu-light.png differ diff --git a/projects/gemini-cli/docs/assets/theme-ayu.png b/projects/gemini-cli/docs/assets/theme-ayu.png new file mode 100644 index 0000000000000000000000000000000000000000..5c4f6add5f051521355f2ff7f60e6a21fd23ebe4 Binary files /dev/null and b/projects/gemini-cli/docs/assets/theme-ayu.png differ diff --git a/projects/gemini-cli/docs/assets/theme-custom.png b/projects/gemini-cli/docs/assets/theme-custom.png new file mode 100644 index 0000000000000000000000000000000000000000..89b373264f6e7b07a0c009e4bcf6271840634415 Binary files /dev/null and b/projects/gemini-cli/docs/assets/theme-custom.png differ diff --git a/projects/gemini-cli/docs/assets/theme-default-light.png b/projects/gemini-cli/docs/assets/theme-default-light.png new file mode 100644 index 0000000000000000000000000000000000000000..9192bdaca1daf6f282df1a7fc1c70ef810eeb436 Binary files /dev/null and b/projects/gemini-cli/docs/assets/theme-default-light.png differ diff --git a/projects/gemini-cli/docs/assets/theme-default.png b/projects/gemini-cli/docs/assets/theme-default.png new file mode 100644 index 0000000000000000000000000000000000000000..45fdf741701112d3d5e53c55b1492a7e0b083851 Binary files /dev/null and b/projects/gemini-cli/docs/assets/theme-default.png differ diff --git a/projects/gemini-cli/docs/assets/theme-dracula.png b/projects/gemini-cli/docs/assets/theme-dracula.png new file mode 100644 index 0000000000000000000000000000000000000000..8c620b28c93e1d494164fe6b1eaf707c530ef649 Binary files /dev/null and b/projects/gemini-cli/docs/assets/theme-dracula.png differ diff --git a/projects/gemini-cli/docs/assets/theme-github-light.png b/projects/gemini-cli/docs/assets/theme-github-light.png new file mode 100644 index 0000000000000000000000000000000000000000..20be05612ddedb5053405c150793428b449ad3a9 Binary files /dev/null and b/projects/gemini-cli/docs/assets/theme-github-light.png differ diff --git a/projects/gemini-cli/docs/assets/theme-github.png b/projects/gemini-cli/docs/assets/theme-github.png new file mode 100644 index 0000000000000000000000000000000000000000..a9a5465eec3b5b9b9b4c9d4a051df552ee59c6e1 Binary files /dev/null and b/projects/gemini-cli/docs/assets/theme-github.png differ diff --git a/projects/gemini-cli/docs/assets/theme-google-light.png b/projects/gemini-cli/docs/assets/theme-google-light.png new file mode 100644 index 0000000000000000000000000000000000000000..6b7f53d668b0fcedd8fef0d1eb6a3299b353ff7e Binary files /dev/null and b/projects/gemini-cli/docs/assets/theme-google-light.png differ diff --git a/projects/gemini-cli/docs/assets/theme-xcode-light.png b/projects/gemini-cli/docs/assets/theme-xcode-light.png new file mode 100644 index 0000000000000000000000000000000000000000..6f77df96618faf90b698c3dc99c4eeeccff3eab3 Binary files /dev/null and b/projects/gemini-cli/docs/assets/theme-xcode-light.png differ diff --git a/projects/gemini-cli/docs/cli/authentication.md b/projects/gemini-cli/docs/cli/authentication.md new file mode 100644 index 0000000000000000000000000000000000000000..9b3fa3ad3241331b74784e2b552f5d0bf3cc8e23 --- /dev/null +++ b/projects/gemini-cli/docs/cli/authentication.md @@ -0,0 +1,173 @@ +# Authentication Setup + +The Gemini CLI requires you to authenticate with Google's AI services. On initial startup you'll need to configure **one** of the following authentication methods: + +1. **Login with Google (Gemini Code Assist):** + - Use this option to log in with your Google account. + - During initial startup, Gemini CLI will direct you to a webpage for authentication. Once authenticated, your credentials will be cached locally so the web login can be skipped on subsequent runs. + - Note that the web login must be done in a browser that can communicate with the machine Gemini CLI is being run from. (Specifically, the browser will be redirected to a localhost url that Gemini CLI will be listening on). + - Users may have to specify a GOOGLE_CLOUD_PROJECT if: + 1. You have a Google Workspace account. Google Workspace is a paid service for businesses and organizations that provides a suite of productivity tools, including a custom email domain (e.g. your-name@your-company.com), enhanced security features, and administrative controls. These accounts are often managed by an employer or school. + 1. You have received a Gemini Code Assist license through the [Google Developer Program](https://developers.google.com/program/plans-and-pricing) (including qualified Google Developer Experts) + 1. You have been assigned a license to a current Gemini Code Assist standard or enterprise subscription. + 1. You are using the product outside the [supported regions](https://developers.google.com/gemini-code-assist/resources/available-locations) for free individual usage. + 1. You are a Google account holder under the age of 18 + - If you fall into one of these categories, you must first configure a Google Cloud Project ID to use, [enable the Gemini for Cloud API](https://cloud.google.com/gemini/docs/discover/set-up-gemini#enable-api) and [configure access permissions](https://cloud.google.com/gemini/docs/discover/set-up-gemini#grant-iam). + + You can temporarily set the environment variable in your current shell session using the following command: + + ```bash + export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID" + ``` + - For repeated use, you can add the environment variable to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following command adds the environment variable to a `~/.bashrc` file: + + ```bash + echo 'export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"' >> ~/.bashrc + source ~/.bashrc + ``` + +2. **Gemini API key:** + - Obtain your API key from Google AI Studio: [https://aistudio.google.com/app/apikey](https://aistudio.google.com/app/apikey) + - Set the `GEMINI_API_KEY` environment variable. In the following methods, replace `YOUR_GEMINI_API_KEY` with the API key you obtained from Google AI Studio: + - You can temporarily set the environment variable in your current shell session using the following command: + ```bash + export GEMINI_API_KEY="YOUR_GEMINI_API_KEY" + ``` + - For repeated use, you can add the environment variable to your [.env file](#persisting-environment-variables-with-env-files). + + - Alternatively you can export the API key from your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following command adds the environment variable to a `~/.bashrc` file: + + ```bash + echo 'export GEMINI_API_KEY="YOUR_GEMINI_API_KEY"' >> ~/.bashrc + source ~/.bashrc + ``` + + :warning: Be advised that when you export your API key inside your shell configuration file, any other process executed from the shell can read it. + +3. **Vertex AI:** + - **API Key:** + - Obtain your Google Cloud API key: [Get an API Key](https://cloud.google.com/vertex-ai/generative-ai/docs/start/api-keys?usertype=newuser) + - Set the `GOOGLE_API_KEY` environment variable. In the following methods, replace `YOUR_GOOGLE_API_KEY` with your Vertex AI API key: + - You can temporarily set the environment variable in your current shell session using the following command: + ```bash + export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY" + ``` + - For repeated use, you can add the environment variable to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following command adds the environment variable to a `~/.bashrc` file: + + ```bash + echo 'export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"' >> ~/.bashrc + source ~/.bashrc + ``` + + :warning: Be advised that when you export your API key inside your shell configuration file, any other process executed from the shell can read it. + + > **Note:** + > If you encounter an error like `"API keys are not supported by this API - Expected OAuth2 access token or other authentication credentials that assert a principal"`, it is likely that your organization has restricted the creation of service account API keys. In this case, please try the [service account JSON key](#service-account-json-key) method described below. + + - **Application Default Credentials (ADC):** + + > **Note:** + > If you have previously set the `GOOGLE_API_KEY` or `GEMINI_API_KEY` environment variables, you must unset them to use Application Default Credentials. + > + > ```bash + > unset GOOGLE_API_KEY GEMINI_API_KEY + > ``` + - **Using `gcloud` (for local development):** + - Ensure you have a Google Cloud project and have enabled the Vertex AI API. + - Log in with your user credentials: + ```bash + gcloud auth application-default login + ``` + For more information, see [Set up Application Default Credentials for Google Cloud](https://cloud.google.com/docs/authentication/provide-credentials-adc). + - **Using a Service Account (for applications or when service account API keys are restricted):** + - If you are unable to create an API key due to [organization policies](https://cloud.google.com/vertex-ai/generative-ai/docs/start/api-keys?usertype=existinguser#expandable-2), or if you are running in a non-interactive environment, you can authenticate using a service account key. + - [Create a service account and key](https://cloud.google.com/iam/docs/keys-create-delete), and download the JSON key file. The service account will need to be assigned the "Vertex AI User" role. + - Set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to the absolute path of the JSON file. + - You can temporarily set the environment variable in your current shell session: + ```bash + export GOOGLE_APPLICATION_CREDENTIALS="/path/to/your/keyfile.json" + ``` + - For repeated use, you can add the command to your shell's configuration file (e.g., `~/.bashrc`). + ```bash + echo 'export GOOGLE_APPLICATION_CREDENTIALS="/path/to/your/keyfile.json"' >> ~/.bashrc + source ~/.bashrc + ``` + :warning: Be advised that when you export service account credentials inside your shell configuration file, any other process executed from the shell can read it. + + - **Required Environment Variables for ADC:** + - When using ADC (either with `gcloud` or a service account), you must also set the `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION` environment variables. In the following methods, replace `YOUR_PROJECT_ID` and `YOUR_PROJECT_LOCATION` with the relevant values for your project: + - You can temporarily set these environment variables in your current shell session using the following commands: + ```bash + export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID" + export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION" # e.g., us-central1 + ``` + - For repeated use, you can add the environment variables to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following commands add the environment variables to a `~/.bashrc` file: + ```bash + echo 'export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"' >> ~/.bashrc + echo 'export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"' >> ~/.bashrc + source ~/.bashrc + ``` + +4. **Cloud Shell:** + - This option is only available when running in a Google Cloud Shell environment. + - It automatically uses the credentials of the logged-in user in the Cloud Shell environment. + - This is the default authentication method when running in Cloud Shell and no other method is configured. + + :warning: Be advised that when you export your API key inside your shell configuration file, any other process executed from the shell can read it. + +### Persisting Environment Variables with `.env` Files + +You can create a **`.gemini/.env`** file in your project directory or in your home directory. Creating a plain **`.env`** file also works, but `.gemini/.env` is recommended to keep Gemini variables isolated from other tools. + +**Important:** Some environment variables (like `DEBUG` and `DEBUG_MODE`) are automatically excluded from project `.env` files to prevent interference with gemini-cli behavior. Use `.gemini/.env` files for gemini-cli specific variables. + +Gemini CLI automatically loads environment variables from the **first** `.env` file it finds, using the following search order: + +1. Starting in the **current directory** and moving upward toward `/`, for each directory it checks: + 1. `.gemini/.env` + 2. `.env` +2. If no file is found, it falls back to your **home directory**: + - `~/.gemini/.env` + - `~/.env` + +> **Important:** The search stops at the **first** file encountered—variables are **not merged** across multiple files. + +#### Examples + +**Project-specific overrides** (take precedence when you are inside the project): + +```bash +mkdir -p .gemini +echo 'GOOGLE_CLOUD_PROJECT="your-project-id"' >> .gemini/.env +``` + +**User-wide settings** (available in every directory): + +```bash +mkdir -p ~/.gemini +cat >> ~/.gemini/.env <<'EOF' +GOOGLE_CLOUD_PROJECT="your-project-id" +GEMINI_API_KEY="your-gemini-api-key" +EOF +``` + +## Non-Interactive Mode / Headless Environments + +When running the Gemini CLI in a non-interactive environment, you cannot use the interactive login flow. +Instead, you must configure authentication using environment variables. + +The CLI will automatically detect if it is running in a non-interactive terminal and will use one of the +following authentication methods if available: + +1. **Gemini API Key:** + - Set the `GEMINI_API_KEY` environment variable. + - The CLI will use this key to authenticate with the Gemini API. + +2. **Vertex AI:** + - Set the `GOOGLE_GENAI_USE_VERTEXAI=true` environment variable. + - **Using an API Key:** Set the `GOOGLE_API_KEY` environment variable. + - **Using Application Default Credentials (ADC):** + - Run `gcloud auth application-default login` in your environment to configure ADC. + - Ensure the `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION` environment variables are set. + +If none of these environment variables are set in a non-interactive session, the CLI will exit with an error. diff --git a/projects/gemini-cli/docs/cli/commands.md b/projects/gemini-cli/docs/cli/commands.md new file mode 100644 index 0000000000000000000000000000000000000000..dc7fc2169ca45d6797de511fdcc0b07beb73984c --- /dev/null +++ b/projects/gemini-cli/docs/cli/commands.md @@ -0,0 +1,430 @@ +# CLI Commands + +Gemini CLI supports several built-in commands to help you manage your session, customize the interface, and control its behavior. These commands are prefixed with a forward slash (`/`), an at symbol (`@`), or an exclamation mark (`!`). + +## Slash commands (`/`) + +Slash commands provide meta-level control over the CLI itself. + +### Built-in Commands + +- **`/bug`** + - **Description:** File an issue about Gemini CLI. By default, the issue is filed within the GitHub repository for Gemini CLI. The string you enter after `/bug` will become the headline for the bug being filed. The default `/bug` behavior can be modified using the `advanced.bugCommand` setting in your `.gemini/settings.json` files. + +- **`/chat`** + - **Description:** Save and resume conversation history for branching conversation state interactively, or resuming a previous state from a later session. + - **Sub-commands:** + - **`save`** + - **Description:** Saves the current conversation history. You must add a `` for identifying the conversation state. + - **Usage:** `/chat save ` + - **Details on Checkpoint Location:** The default locations for saved chat checkpoints are: + - Linux/macOS: `~/.gemini/tmp//` + - Windows: `C:\Users\\.gemini\tmp\\` + - When you run `/chat list`, the CLI only scans these specific directories to find available checkpoints. + - **Note:** These checkpoints are for manually saving and resuming conversation states. For automatic checkpoints created before file modifications, see the [Checkpointing documentation](../checkpointing.md). + - **`resume`** + - **Description:** Resumes a conversation from a previous save. + - **Usage:** `/chat resume ` + - **`list`** + - **Description:** Lists available tags for chat state resumption. + - **`delete`** + - **Description:** Deletes a saved conversation checkpoint. + - **Usage:** `/chat delete ` + +- **`/clear`** + - **Description:** Clear the terminal screen, including the visible session history and scrollback within the CLI. The underlying session data (for history recall) might be preserved depending on the exact implementation, but the visual display is cleared. + - **Keyboard shortcut:** Press **Ctrl+L** at any time to perform a clear action. + +- **`/compress`** + - **Description:** Replace the entire chat context with a summary. This saves on tokens used for future tasks while retaining a high level summary of what has happened. + +- **`/copy`** + - **Description:** Copies the last output produced by Gemini CLI to your clipboard, for easy sharing or reuse. + - **Note:** This command requires platform-specific clipboard tools to be installed. + - On Linux, it requires `xclip` or `xsel`. You can typically install them using your system's package manager. + - On macOS, it requires `pbcopy`, and on Windows, it requires `clip`. These tools are typically pre-installed on their respective systems. + +- **`/directory`** (or **`/dir`**) + - **Description:** Manage workspace directories for multi-directory support. + - **Sub-commands:** + - **`add`**: + - **Description:** Add a directory to the workspace. The path can be absolute or relative to the current working directory. Moreover, the reference from home directory is supported as well. + - **Usage:** `/directory add ,` + - **Note:** Disabled in restrictive sandbox profiles. If you're using that, use `--include-directories` when starting the session instead. + - **`show`**: + - **Description:** Display all directories added by `/directory add` and `--include-directories`. + - **Usage:** `/directory show` + +- **`/editor`** + - **Description:** Open a dialog for selecting supported editors. + +- **`/extensions`** + - **Description:** Lists all active extensions in the current Gemini CLI session. See [Gemini CLI Extensions](../extension.md). + +- **`/help`** (or **`/?`**) + - **Description:** Display help information about Gemini CLI, including available commands and their usage. + +- **`/mcp`** + - **Description:** List configured Model Context Protocol (MCP) servers, their connection status, server details, and available tools. + - **Sub-commands:** + - **`desc`** or **`descriptions`**: + - **Description:** Show detailed descriptions for MCP servers and tools. + - **`nodesc`** or **`nodescriptions`**: + - **Description:** Hide tool descriptions, showing only the tool names. + - **`schema`**: + - **Description:** Show the full JSON schema for the tool's configured parameters. + - **Keyboard Shortcut:** Press **Ctrl+T** at any time to toggle between showing and hiding tool descriptions. + +- **`/memory`** + - **Description:** Manage the AI's instructional context (hierarchical memory loaded from `GEMINI.md` files). + - **Sub-commands:** + - **`add`**: + - **Description:** Adds the following text to the AI's memory. Usage: `/memory add ` + - **`show`**: + - **Description:** Display the full, concatenated content of the current hierarchical memory that has been loaded from all `GEMINI.md` files. This lets you inspect the instructional context being provided to the Gemini model. + - **`refresh`**: + - **Description:** Reload the hierarchical instructional memory from all `GEMINI.md` files found in the configured locations (global, project/ancestors, and sub-directories). This command updates the model with the latest `GEMINI.md` content. + - **Note:** For more details on how `GEMINI.md` files contribute to hierarchical memory, see the [CLI Configuration documentation](./configuration.md#4-geminimd-files-hierarchical-instructional-context). + +- **`/restore`** + - **Description:** Restores the project files to the state they were in just before a tool was executed. This is particularly useful for undoing file edits made by a tool. If run without a tool call ID, it will list available checkpoints to restore from. + - **Usage:** `/restore [tool_call_id]` + - **Note:** Only available if the CLI is invoked with the `--checkpointing` option or configured via [settings](./configuration.md). See [Checkpointing documentation](../checkpointing.md) for more details. + +- **`/settings`** + - **Description:** Open the settings editor to view and modify Gemini CLI settings. + - **Details:** This command provides a user-friendly interface for changing settings that control the behavior and appearance of Gemini CLI. It is equivalent to manually editing the `.gemini/settings.json` file, but with validation and guidance to prevent errors. + - **Usage:** Simply run `/settings` and the editor will open. You can then browse or search for specific settings, view their current values, and modify them as desired. Changes to some settings are applied immediately, while others require a restart. + +- **`/stats`** + - **Description:** Display detailed statistics for the current Gemini CLI session, including token usage, cached token savings (when available), and session duration. Note: Cached token information is only displayed when cached tokens are being used, which occurs with API key authentication but not with OAuth authentication at this time. + +- [**`/theme`**](./themes.md) + - **Description:** Open a dialog that lets you change the visual theme of Gemini CLI. + +- **`/auth`** + - **Description:** Open a dialog that lets you change the authentication method. + +- **`/about`** + - **Description:** Show version info. Please share this information when filing issues. + +- [**`/tools`**](../tools/index.md) + - **Description:** Display a list of tools that are currently available within Gemini CLI. + - **Usage:** `/tools [desc]` + - **Sub-commands:** + - **`desc`** or **`descriptions`**: + - **Description:** Show detailed descriptions of each tool, including each tool's name with its full description as provided to the model. + - **`nodesc`** or **`nodescriptions`**: + - **Description:** Hide tool descriptions, showing only the tool names. + +- **`/privacy`** + - **Description:** Display the Privacy Notice and allow users to select whether they consent to the collection of their data for service improvement purposes. + +- **`/quit`** (or **`/exit`**) + - **Description:** Exit Gemini CLI. + +- **`/vim`** + - **Description:** Toggle vim mode on or off. When vim mode is enabled, the input area supports vim-style navigation and editing commands in both NORMAL and INSERT modes. + - **Features:** + - **NORMAL mode:** Navigate with `h`, `j`, `k`, `l`; jump by words with `w`, `b`, `e`; go to line start/end with `0`, `$`, `^`; go to specific lines with `G` (or `gg` for first line) + - **INSERT mode:** Standard text input with escape to return to NORMAL mode + - **Editing commands:** Delete with `x`, change with `c`, insert with `i`, `a`, `o`, `O`; complex operations like `dd`, `cc`, `dw`, `cw` + - **Count support:** Prefix commands with numbers (e.g., `3h`, `5w`, `10G`) + - **Repeat last command:** Use `.` to repeat the last editing operation + - **Persistent setting:** Vim mode preference is saved to `~/.gemini/settings.json` and restored between sessions + - **Status indicator:** When enabled, shows `[NORMAL]` or `[INSERT]` in the footer + +- **`/init`** + - **Description:** To help users easily create a `GEMINI.md` file, this command analyzes the current directory and generates a tailored context file, making it simpler for them to provide project-specific instructions to the Gemini agent. + +### Custom Commands + +For a quick start, see the [example](#example-a-pure-function-refactoring-command) below. + +Custom commands allow you to save and reuse your favorite or most frequently used prompts as personal shortcuts within Gemini CLI. You can create commands that are specific to a single project or commands that are available globally across all your projects, streamlining your workflow and ensuring consistency. + +#### File Locations & Precedence + +Gemini CLI discovers commands from two locations, loaded in a specific order: + +1. **User Commands (Global):** Located in `~/.gemini/commands/`. These commands are available in any project you are working on. +2. **Project Commands (Local):** Located in `/.gemini/commands/`. These commands are specific to the current project and can be checked into version control to be shared with your team. + +If a command in the project directory has the same name as a command in the user directory, the **project command will always be used.** This allows projects to override global commands with project-specific versions. + +#### Naming and Namespacing + +The name of a command is determined by its file path relative to its `commands` directory. Subdirectories are used to create namespaced commands, with the path separator (`/` or `\`) being converted to a colon (`:`). + +- A file at `~/.gemini/commands/test.toml` becomes the command `/test`. +- A file at `/.gemini/commands/git/commit.toml` becomes the namespaced command `/git:commit`. + +#### TOML File Format (v1) + +Your command definition files must be written in the TOML format and use the `.toml` file extension. + +##### Required Fields + +- `prompt` (String): The prompt that will be sent to the Gemini model when the command is executed. This can be a single-line or multi-line string. + +##### Optional Fields + +- `description` (String): A brief, one-line description of what the command does. This text will be displayed next to your command in the `/help` menu. **If you omit this field, a generic description will be generated from the filename.** + +#### Handling Arguments + +Custom commands support two powerful methods for handling arguments. The CLI automatically chooses the correct method based on the content of your command's `prompt`. + +##### 1. Context-Aware Injection with `{{args}}` + +If your `prompt` contains the special placeholder `{{args}}`, the CLI will replace that placeholder with the text the user typed after the command name. + +The behavior of this injection depends on where it is used: + +**A. Raw Injection (Outside Shell Commands)** + +When used in the main body of the prompt, the arguments are injected exactly as the user typed them. + +**Example (`git/fix.toml`):** + +```toml +# Invoked via: /git:fix "Button is misaligned" + +description = "Generates a fix for a given issue." +prompt = "Please provide a code fix for the issue described here: {{args}}." +``` + +The model receives: `Please provide a code fix for the issue described here: "Button is misaligned".` + +**B. Using Arguments in Shell Commands (Inside `!{...}` Blocks)** + +When you use `{{args}}` inside a shell injection block (`!{...}`), the arguments are automatically **shell-escaped** before replacement. This allows you to safely pass arguments to shell commands, ensuring the resulting command is syntactically correct and secure while preventing command injection vulnerabilities. + +**Example (`/grep-code.toml`):** + +```toml +prompt = """ +Please summarize the findings for the pattern `{{args}}`. + +Search Results: +!{grep -r {{args}} .} +""" +``` + +When you run `/grep-code It's complicated`: + +1. The CLI sees `{{args}}` used both outside and inside `!{...}`. +2. Outside: The first `{{args}}` is replaced raw with `It's complicated`. +3. Inside: The second `{{args}}` is replaced with the escaped version (e.g., on Linux: `"It's complicated"`). +4. The command executed is `grep -r "It's complicated" .`. +5. The CLI prompts you to confirm this exact, secure command before execution. +6. The final prompt is sent. + +##### 2. Default Argument Handling + +If your `prompt` does **not** contain the special placeholder `{{args}}`, the CLI uses a default behavior for handling arguments. + +If you provide arguments to the command (e.g., `/mycommand arg1`), the CLI will append the full command you typed to the end of the prompt, separated by two newlines. This allows the model to see both the original instructions and the specific arguments you just provided. + +If you do **not** provide any arguments (e.g., `/mycommand`), the prompt is sent to the model exactly as it is, with nothing appended. + +**Example (`changelog.toml`):** + +This example shows how to create a robust command by defining a role for the model, explaining where to find the user's input, and specifying the expected format and behavior. + +```toml +# In: /.gemini/commands/changelog.toml +# Invoked via: /changelog 1.2.0 added "Support for default argument parsing." + +description = "Adds a new entry to the project's CHANGELOG.md file." +prompt = """ +# Task: Update Changelog + +You are an expert maintainer of this software project. A user has invoked a command to add a new entry to the changelog. + +**The user's raw command is appended below your instructions.** + +Your task is to parse the ``, ``, and `` from their input and use the `write_file` tool to correctly update the `CHANGELOG.md` file. + +## Expected Format +The command follows this format: `/changelog ` +- `` must be one of: "added", "changed", "fixed", "removed". + +## Behavior +1. Read the `CHANGELOG.md` file. +2. Find the section for the specified ``. +3. Add the `` under the correct `` heading. +4. If the version or type section doesn't exist, create it. +5. Adhere strictly to the "Keep a Changelog" format. +""" +``` + +When you run `/changelog 1.2.0 added "New feature"`, the final text sent to the model will be the original prompt followed by two newlines and the command you typed. + +##### 3. Executing Shell Commands with `!{...}` + +You can make your commands dynamic by executing shell commands directly within your `prompt` and injecting their output. This is ideal for gathering context from your local environment, like reading file content or checking the status of Git. + +When a custom command attempts to execute a shell command, Gemini CLI will now prompt you for confirmation before proceeding. This is a security measure to ensure that only intended commands can be run. + +**How It Works:** + +1. **Inject Commands:** Use the `!{...}` syntax. +2. **Argument Substitution:** If `{{args}}` is present inside the block, it is automatically shell-escaped (see [Context-Aware Injection](#1-context-aware-injection-with-args) above). +3. **Robust Parsing:** The parser correctly handles complex shell commands that include nested braces, such as JSON payloads. **Note:** The content inside `!{...}` must have balanced braces (`{` and `}`). If you need to execute a command containing unbalanced braces, consider wrapping it in an external script file and calling the script within the `!{...}` block. +4. **Security Check and Confirmation:** The CLI performs a security check on the final, resolved command (after arguments are escaped and substituted). A dialog will appear showing the exact command(s) to be executed. +5. **Execution and Error Reporting:** The command is executed. If the command fails, the output injected into the prompt will include the error messages (stderr) followed by a status line, e.g., `[Shell command exited with code 1]`. This helps the model understand the context of the failure. + +**Example (`git/commit.toml`):** + +This command gets the staged git diff and uses it to ask the model to write a commit message. + +````toml +# In: /.gemini/commands/git/commit.toml +# Invoked via: /git:commit + +description = "Generates a Git commit message based on staged changes." + +# The prompt uses !{...} to execute the command and inject its output. +prompt = """ +Please generate a Conventional Commit message based on the following git diff: + +```diff +!{git diff --staged} +``` + +""" + +```` + +When you run `/git:commit`, the CLI first executes `git diff --staged`, then replaces `!{git diff --staged}` with the output of that command before sending the final, complete prompt to the model. + +##### 4. Injecting File Content with `@{...}` + +You can directly embed the content of a file or a directory listing into your prompt using the `@{...}` syntax. This is useful for creating commands that operate on specific files. + +**How It Works:** + +- **File Injection**: `@{path/to/file.txt}` is replaced by the content of `file.txt`. +- **Multimodal Support**: If the path points to a supported image (e.g., PNG, JPEG), PDF, audio, or video file, it will be correctly encoded and injected as multimodal input. Other binary files are handled gracefully and skipped. +- **Directory Listing**: `@{path/to/dir}` is traversed and each file present within the directory and all subdirectories are inserted into the prompt. This respects `.gitignore` and `.geminiignore` if enabled. +- **Workspace-Aware**: The command searches for the path in the current directory and any other workspace directories. Absolute paths are allowed if they are within the workspace. +- **Processing Order**: File content injection with `@{...}` is processed _before_ shell commands (`!{...}`) and argument substitution (`{{args}}`). +- **Parsing**: The parser requires the content inside `@{...}` (the path) to have balanced braces (`{` and `}`). + +**Example (`review.toml`):** + +This command injects the content of a _fixed_ best practices file (`docs/best-practices.md`) and uses the user's arguments to provide context for the review. + +```toml +# In: /.gemini/commands/review.toml +# Invoked via: /review FileCommandLoader.ts + +description = "Reviews the provided context using a best practice guide." +prompt = """ +You are an expert code reviewer. + +Your task is to review {{args}}. + +Use the following best practices when providing your review: + +@{docs/best-practices.md} +""" +``` + +When you run `/review FileCommandLoader.ts`, the `@{docs/best-practices.md}` placeholder is replaced by the content of that file, and `{{args}}` is replaced by the text you provided, before the final prompt is sent to the model. + +--- + +#### Example: A "Pure Function" Refactoring Command + +Let's create a global command that asks the model to refactor a piece of code. + +**1. Create the file and directories:** + +First, ensure the user commands directory exists, then create a `refactor` subdirectory for organization and the final TOML file. + +```bash +mkdir -p ~/.gemini/commands/refactor +touch ~/.gemini/commands/refactor/pure.toml +``` + +**2. Add the content to the file:** + +Open `~/.gemini/commands/refactor/pure.toml` in your editor and add the following content. We are including the optional `description` for best practice. + +```toml +# In: ~/.gemini/commands/refactor/pure.toml +# This command will be invoked via: /refactor:pure + +description = "Asks the model to refactor the current context into a pure function." + +prompt = """ +Please analyze the code I've provided in the current context. +Refactor it into a pure function. + +Your response should include: +1. The refactored, pure function code block. +2. A brief explanation of the key changes you made and why they contribute to purity. +""" +``` + +**3. Run the Command:** + +That's it! You can now run your command in the CLI. First, you might add a file to the context, and then invoke your command: + +``` +> @my-messy-function.js +> /refactor:pure +``` + +Gemini CLI will then execute the multi-line prompt defined in your TOML file. + +## At commands (`@`) + +At commands are used to include the content of files or directories as part of your prompt to Gemini. These commands include git-aware filtering. + +- **`@`** + - **Description:** Inject the content of the specified file or files into your current prompt. This is useful for asking questions about specific code, text, or collections of files. + - **Examples:** + - `@path/to/your/file.txt Explain this text.` + - `@src/my_project/ Summarize the code in this directory.` + - `What is this file about? @README.md` + - **Details:** + - If a path to a single file is provided, the content of that file is read. + - If a path to a directory is provided, the command attempts to read the content of files within that directory and any subdirectories. + - Spaces in paths should be escaped with a backslash (e.g., `@My\ Documents/file.txt`). + - The command uses the `read_many_files` tool internally. The content is fetched and then inserted into your query before being sent to the Gemini model. + - **Git-aware filtering:** By default, git-ignored files (like `node_modules/`, `dist/`, `.env`, `.git/`) are excluded. This behavior can be changed via the `context.fileFiltering` settings. + - **File types:** The command is intended for text-based files. While it might attempt to read any file, binary files or very large files might be skipped or truncated by the underlying `read_many_files` tool to ensure performance and relevance. The tool indicates if files were skipped. + - **Output:** The CLI will show a tool call message indicating that `read_many_files` was used, along with a message detailing the status and the path(s) that were processed. + +- **`@` (Lone at symbol)** + - **Description:** If you type a lone `@` symbol without a path, the query is passed as-is to the Gemini model. This might be useful if you are specifically talking _about_ the `@` symbol in your prompt. + +### Error handling for `@` commands + +- If the path specified after `@` is not found or is invalid, an error message will be displayed, and the query might not be sent to the Gemini model, or it will be sent without the file content. +- If the `read_many_files` tool encounters an error (e.g., permission issues), this will also be reported. + +## Shell mode & passthrough commands (`!`) + +The `!` prefix lets you interact with your system's shell directly from within Gemini CLI. + +- **`!`** + - **Description:** Execute the given `` using `bash` on Linux/macOS or `cmd.exe` on Windows. Any output or errors from the command are displayed in the terminal. + - **Examples:** + - `!ls -la` (executes `ls -la` and returns to Gemini CLI) + - `!git status` (executes `git status` and returns to Gemini CLI) + +- **`!` (Toggle shell mode)** + - **Description:** Typing `!` on its own toggles shell mode. + - **Entering shell mode:** + - When active, shell mode uses a different coloring and a "Shell Mode Indicator". + - While in shell mode, text you type is interpreted directly as a shell command. + - **Exiting shell mode:** + - When exited, the UI reverts to its standard appearance and normal Gemini CLI behavior resumes. + +- **Caution for all `!` usage:** Commands you execute in shell mode have the same permissions and impact as if you ran them directly in your terminal. + +- **Environment Variable:** When a command is executed via `!` or in shell mode, the `GEMINI_CLI=1` environment variable is set in the subprocess's environment. This allows scripts or tools to detect if they are being run from within the Gemini CLI. diff --git a/projects/gemini-cli/docs/cli/configuration-v1.md b/projects/gemini-cli/docs/cli/configuration-v1.md new file mode 100644 index 0000000000000000000000000000000000000000..cc8267b59c19356b160f8d113bb27c6cef60ffd0 --- /dev/null +++ b/projects/gemini-cli/docs/cli/configuration-v1.md @@ -0,0 +1,643 @@ +# Gemini CLI Configuration + +**Note on Deprecated Configuration Format** + +This document describes the legacy v1 format for the `settings.json` file. This format is now deprecated. + +- The new format will be supported in the stable release starting **[09/10/25]**. +- Automatic migration from the old format to the new format will begin on **[09/17/25]**. + +For details on the new, recommended format, please see the [current Configuration documentation](./configuration.md). + +Gemini CLI offers several ways to configure its behavior, including environment variables, command-line arguments, and settings files. This document outlines the different configuration methods and available settings. + +## Configuration layers + +Configuration is applied in the following order of precedence (lower numbers are overridden by higher numbers): + +1. **Default values:** Hardcoded defaults within the application. +2. **System defaults file:** System-wide default settings that can be overridden by other settings files. +3. **User settings file:** Global settings for the current user. +4. **Project settings file:** Project-specific settings. +5. **System settings file:** System-wide settings that override all other settings files. +6. **Environment variables:** System-wide or session-specific variables, potentially loaded from `.env` files. +7. **Command-line arguments:** Values passed when launching the CLI. + +## Settings files + +Gemini CLI uses JSON settings files for persistent configuration. There are four locations for these files: + +- **System defaults file:** + - **Location:** `/etc/gemini-cli/system-defaults.json` (Linux), `C:\ProgramData\gemini-cli\system-defaults.json` (Windows) or `/Library/Application Support/GeminiCli/system-defaults.json` (macOS). The path can be overridden using the `GEMINI_CLI_SYSTEM_DEFAULTS_PATH` environment variable. + - **Scope:** Provides a base layer of system-wide default settings. These settings have the lowest precedence and are intended to be overridden by user, project, or system override settings. +- **User settings file:** + - **Location:** `~/.gemini/settings.json` (where `~` is your home directory). + - **Scope:** Applies to all Gemini CLI sessions for the current user. User settings override system defaults. +- **Project settings file:** + - **Location:** `.gemini/settings.json` within your project's root directory. + - **Scope:** Applies only when running Gemini CLI from that specific project. Project settings override user settings and system defaults. +- **System settings file:** + - **Location:** `/etc/gemini-cli/settings.json` (Linux), `C:\ProgramData\gemini-cli\settings.json` (Windows) or `/Library/Application Support/GeminiCli/settings.json` (macOS). The path can be overridden using the `GEMINI_CLI_SYSTEM_SETTINGS_PATH` environment variable. + - **Scope:** Applies to all Gemini CLI sessions on the system, for all users. System settings act as overrides, taking precedence over all other settings files. May be useful for system administrators at enterprises to have controls over users' Gemini CLI setups. + +**Note on environment variables in settings:** String values within your `settings.json` files can reference environment variables using either `$VAR_NAME` or `${VAR_NAME}` syntax. These variables will be automatically resolved when the settings are loaded. For example, if you have an environment variable `MY_API_TOKEN`, you could use it in `settings.json` like this: `"apiKey": "$MY_API_TOKEN"`. + +> **Note for Enterprise Users:** For guidance on deploying and managing Gemini CLI in a corporate environment, please see the [Enterprise Configuration](./enterprise.md) documentation. + +### The `.gemini` directory in your project + +In addition to a project settings file, a project's `.gemini` directory can contain other project-specific files related to Gemini CLI's operation, such as: + +- [Custom sandbox profiles](#sandboxing) (e.g., `.gemini/sandbox-macos-custom.sb`, `.gemini/sandbox.Dockerfile`). + +### Available settings in `settings.json`: + +- **`contextFileName`** (string or array of strings): + - **Description:** Specifies the filename for context files (e.g., `GEMINI.md`, `AGENTS.md`). Can be a single filename or a list of accepted filenames. + - **Default:** `GEMINI.md` + - **Example:** `"contextFileName": "AGENTS.md"` + +- **`bugCommand`** (object): + - **Description:** Overrides the default URL for the `/bug` command. + - **Default:** `"urlTemplate": "https://github.com/google-gemini/gemini-cli/issues/new?template=bug_report.yml&title={title}&info={info}"` + - **Properties:** + - **`urlTemplate`** (string): A URL that can contain `{title}` and `{info}` placeholders. + - **Example:** + ```json + "bugCommand": { + "urlTemplate": "https://bug.example.com/new?title={title}&info={info}" + } + ``` + +- **`fileFiltering`** (object): + - **Description:** Controls git-aware file filtering behavior for @ commands and file discovery tools. + - **Default:** `"respectGitIgnore": true, "enableRecursiveFileSearch": true` + - **Properties:** + - **`respectGitIgnore`** (boolean): Whether to respect .gitignore patterns when discovering files. When set to `true`, git-ignored files (like `node_modules/`, `dist/`, `.env`) are automatically excluded from @ commands and file listing operations. + - **`enableRecursiveFileSearch`** (boolean): Whether to enable searching recursively for filenames under the current tree when completing @ prefixes in the prompt. + - **`disableFuzzySearch`** (boolean): When `true`, disables the fuzzy search capabilities when searching for files, which can improve performance on projects with a large number of files. + - **Example:** + ```json + "fileFiltering": { + "respectGitIgnore": true, + "enableRecursiveFileSearch": false, + "disableFuzzySearch": true + } + ``` + +### Troubleshooting File Search Performance + +If you are experiencing performance issues with file searching (e.g., with `@` completions), especially in projects with a very large number of files, here are a few things you can try in order of recommendation: + +1. **Use `.geminiignore`:** Create a `.geminiignore` file in your project root to exclude directories that contain a large number of files that you don't need to reference (e.g., build artifacts, logs, `node_modules`). Reducing the total number of files crawled is the most effective way to improve performance. + +2. **Disable Fuzzy Search:** If ignoring files is not enough, you can disable fuzzy search by setting `disableFuzzySearch` to `true` in your `settings.json` file. This will use a simpler, non-fuzzy matching algorithm, which can be faster. + +3. **Disable Recursive File Search:** As a last resort, you can disable recursive file search entirely by setting `enableRecursiveFileSearch` to `false`. This will be the fastest option as it avoids a recursive crawl of your project. However, it means you will need to type the full path to files when using `@` completions. + +- **`coreTools`** (array of strings): + - **Description:** Allows you to specify a list of core tool names that should be made available to the model. This can be used to restrict the set of built-in tools. See [Built-in Tools](../core/tools-api.md#built-in-tools) for a list of core tools. You can also specify command-specific restrictions for tools that support it, like the `ShellTool`. For example, `"coreTools": ["ShellTool(ls -l)"]` will only allow the `ls -l` command to be executed. + - **Default:** All tools available for use by the Gemini model. + - **Example:** `"coreTools": ["ReadFileTool", "GlobTool", "ShellTool(ls)"]`. + +- **`allowedTools`** (array of strings): + - **Default:** `undefined` + - **Description:** A list of tool names that will bypass the confirmation dialog. This is useful for tools that you trust and use frequently. The match semantics are the same as `coreTools`. + - **Example:** `"allowedTools": ["ShellTool(git status)"]`. + +- **`excludeTools`** (array of strings): + - **Description:** Allows you to specify a list of core tool names that should be excluded from the model. A tool listed in both `excludeTools` and `coreTools` is excluded. You can also specify command-specific restrictions for tools that support it, like the `ShellTool`. For example, `"excludeTools": ["ShellTool(rm -rf)"]` will block the `rm -rf` command. + - **Default**: No tools excluded. + - **Example:** `"excludeTools": ["run_shell_command", "findFiles"]`. + - **Security Note:** Command-specific restrictions in + `excludeTools` for `run_shell_command` are based on simple string matching and can be easily bypassed. This feature is **not a security mechanism** and should not be relied upon to safely execute untrusted code. It is recommended to use `coreTools` to explicitly select commands + that can be executed. + +- **`allowMCPServers`** (array of strings): + - **Description:** Allows you to specify a list of MCP server names that should be made available to the model. This can be used to restrict the set of MCP servers to connect to. Note that this will be ignored if `--allowed-mcp-server-names` is set. + - **Default:** All MCP servers are available for use by the Gemini model. + - **Example:** `"allowMCPServers": ["myPythonServer"]`. + - **Security Note:** This uses simple string matching on MCP server names, which can be modified. If you're a system administrator looking to prevent users from bypassing this, consider configuring the `mcpServers` at the system settings level such that the user will not be able to configure any MCP servers of their own. This should not be used as an airtight security mechanism. + +- **`excludeMCPServers`** (array of strings): + - **Description:** Allows you to specify a list of MCP server names that should be excluded from the model. A server listed in both `excludeMCPServers` and `allowMCPServers` is excluded. Note that this will be ignored if `--allowed-mcp-server-names` is set. + - **Default**: No MCP servers excluded. + - **Example:** `"excludeMCPServers": ["myNodeServer"]`. + - **Security Note:** This uses simple string matching on MCP server names, which can be modified. If you're a system administrator looking to prevent users from bypassing this, consider configuring the `mcpServers` at the system settings level such that the user will not be able to configure any MCP servers of their own. This should not be used as an airtight security mechanism. + +- **`autoAccept`** (boolean): + - **Description:** Controls whether the CLI automatically accepts and executes tool calls that are considered safe (e.g., read-only operations) without explicit user confirmation. If set to `true`, the CLI will bypass the confirmation prompt for tools deemed safe. + - **Default:** `false` + - **Example:** `"autoAccept": true` + +- **`theme`** (string): + - **Description:** Sets the visual [theme](./themes.md) for Gemini CLI. + - **Default:** `"Default"` + - **Example:** `"theme": "GitHub"` + +- **`vimMode`** (boolean): + - **Description:** Enables or disables vim mode for input editing. When enabled, the input area supports vim-style navigation and editing commands with NORMAL and INSERT modes. The vim mode status is displayed in the footer and persists between sessions. + - **Default:** `false` + - **Example:** `"vimMode": true` + +- **`sandbox`** (boolean or string): + - **Description:** Controls whether and how to use sandboxing for tool execution. If set to `true`, Gemini CLI uses a pre-built `gemini-cli-sandbox` Docker image. For more information, see [Sandboxing](#sandboxing). + - **Default:** `false` + - **Example:** `"sandbox": "docker"` + +- **`toolDiscoveryCommand`** (string): + - **Description:** Defines a custom shell command for discovering tools from your project. The shell command must return on `stdout` a JSON array of [function declarations](https://ai.google.dev/gemini-api/docs/function-calling#function-declarations). Tool wrappers are optional. + - **Default:** Empty + - **Example:** `"toolDiscoveryCommand": "bin/get_tools"` + +- **`toolCallCommand`** (string): + - **Description:** Defines a custom shell command for calling a specific tool that was discovered using `toolDiscoveryCommand`. The shell command must meet the following criteria: + - It must take function `name` (exactly as in [function declaration](https://ai.google.dev/gemini-api/docs/function-calling#function-declarations)) as first command line argument. + - It must read function arguments as JSON on `stdin`, analogous to [`functionCall.args`](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#functioncall). + - It must return function output as JSON on `stdout`, analogous to [`functionResponse.response.content`](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#functionresponse). + - **Default:** Empty + - **Example:** `"toolCallCommand": "bin/call_tool"` + +- **`mcpServers`** (object): + - **Description:** Configures connections to one or more Model-Context Protocol (MCP) servers for discovering and using custom tools. Gemini CLI attempts to connect to each configured MCP server to discover available tools. If multiple MCP servers expose a tool with the same name, the tool names will be prefixed with the server alias you defined in the configuration (e.g., `serverAlias__actualToolName`) to avoid conflicts. Note that the system might strip certain schema properties from MCP tool definitions for compatibility. At least one of `command`, `url`, or `httpUrl` must be provided. If multiple are specified, the order of precedence is `httpUrl`, then `url`, then `command`. + - **Default:** Empty + - **Properties:** + - **``** (object): The server parameters for the named server. + - `command` (string, optional): The command to execute to start the MCP server via standard I/O. + - `args` (array of strings, optional): Arguments to pass to the command. + - `env` (object, optional): Environment variables to set for the server process. + - `cwd` (string, optional): The working directory in which to start the server. + - `url` (string, optional): The URL of an MCP server that uses Server-Sent Events (SSE) for communication. + - `httpUrl` (string, optional): The URL of an MCP server that uses streamable HTTP for communication. + - `headers` (object, optional): A map of HTTP headers to send with requests to `url` or `httpUrl`. + - `timeout` (number, optional): Timeout in milliseconds for requests to this MCP server. + - `trust` (boolean, optional): Trust this server and bypass all tool call confirmations. + - `description` (string, optional): A brief description of the server, which may be used for display purposes. + - `includeTools` (array of strings, optional): List of tool names to include from this MCP server. When specified, only the tools listed here will be available from this server (whitelist behavior). If not specified, all tools from the server are enabled by default. + - `excludeTools` (array of strings, optional): List of tool names to exclude from this MCP server. Tools listed here will not be available to the model, even if they are exposed by the server. **Note:** `excludeTools` takes precedence over `includeTools` - if a tool is in both lists, it will be excluded. + - **Example:** + ```json + "mcpServers": { + "myPythonServer": { + "command": "python", + "args": ["mcp_server.py", "--port", "8080"], + "cwd": "./mcp_tools/python", + "timeout": 5000, + "includeTools": ["safe_tool", "file_reader"], + }, + "myNodeServer": { + "command": "node", + "args": ["mcp_server.js"], + "cwd": "./mcp_tools/node", + "excludeTools": ["dangerous_tool", "file_deleter"] + }, + "myDockerServer": { + "command": "docker", + "args": ["run", "-i", "--rm", "-e", "API_KEY", "ghcr.io/foo/bar"], + "env": { + "API_KEY": "$MY_API_TOKEN" + } + }, + "mySseServer": { + "url": "http://localhost:8081/events", + "headers": { + "Authorization": "Bearer $MY_SSE_TOKEN" + }, + "description": "An example SSE-based MCP server." + }, + "myStreamableHttpServer": { + "httpUrl": "http://localhost:8082/stream", + "headers": { + "X-API-Key": "$MY_HTTP_API_KEY" + }, + "description": "An example Streamable HTTP-based MCP server." + } + } + ``` + +- **`checkpointing`** (object): + - **Description:** Configures the checkpointing feature, which allows you to save and restore conversation and file states. See the [Checkpointing documentation](../checkpointing.md) for more details. + - **Default:** `{"enabled": false}` + - **Properties:** + - **`enabled`** (boolean): When `true`, the `/restore` command is available. + +- **`preferredEditor`** (string): + - **Description:** Specifies the preferred editor to use for viewing diffs. + - **Default:** `vscode` + - **Example:** `"preferredEditor": "vscode"` + +- **`telemetry`** (object) + - **Description:** Configures logging and metrics collection for Gemini CLI. For more information, see [Telemetry](../telemetry.md). + - **Default:** `{"enabled": false, "target": "local", "otlpEndpoint": "http://localhost:4317", "logPrompts": true}` + - **Properties:** + - **`enabled`** (boolean): Whether or not telemetry is enabled. + - **`target`** (string): The destination for collected telemetry. Supported values are `local` and `gcp`. + - **`otlpEndpoint`** (string): The endpoint for the OTLP Exporter. + - **`logPrompts`** (boolean): Whether or not to include the content of user prompts in the logs. + - **Example:** + ```json + "telemetry": { + "enabled": true, + "target": "local", + "otlpEndpoint": "http://localhost:16686", + "logPrompts": false + } + ``` +- **`usageStatisticsEnabled`** (boolean): + - **Description:** Enables or disables the collection of usage statistics. See [Usage Statistics](#usage-statistics) for more information. + - **Default:** `true` + - **Example:** + ```json + "usageStatisticsEnabled": false + ``` + +- **`hideTips`** (boolean): + - **Description:** Enables or disables helpful tips in the CLI interface. + - **Default:** `false` + - **Example:** + + ```json + "hideTips": true + ``` + +- **`hideBanner`** (boolean): + - **Description:** Enables or disables the startup banner (ASCII art logo) in the CLI interface. + - **Default:** `false` + - **Example:** + + ```json + "hideBanner": true + ``` + +- **`maxSessionTurns`** (number): + - **Description:** Sets the maximum number of turns for a session. If the session exceeds this limit, the CLI will stop processing and start a new chat. + - **Default:** `-1` (unlimited) + - **Example:** + ```json + "maxSessionTurns": 10 + ``` + +- **`summarizeToolOutput`** (object): + - **Description:** Enables or disables the summarization of tool output. You can specify the token budget for the summarization using the `tokenBudget` setting. + - Note: Currently only the `run_shell_command` tool is supported. + - **Default:** `{}` (Disabled by default) + - **Example:** + ```json + "summarizeToolOutput": { + "run_shell_command": { + "tokenBudget": 2000 + } + } + ``` + +- **`excludedProjectEnvVars`** (array of strings): + - **Description:** Specifies environment variables that should be excluded from being loaded from project `.env` files. This prevents project-specific environment variables (like `DEBUG=true`) from interfering with gemini-cli behavior. Variables from `.gemini/.env` files are never excluded. + - **Default:** `["DEBUG", "DEBUG_MODE"]` + - **Example:** + ```json + "excludedProjectEnvVars": ["DEBUG", "DEBUG_MODE", "NODE_ENV"] + ``` + +- **`includeDirectories`** (array of strings): + - **Description:** Specifies an array of additional absolute or relative paths to include in the workspace context. Missing directories will be skipped with a warning by default. Paths can use `~` to refer to the user's home directory. This setting can be combined with the `--include-directories` command-line flag. + - **Default:** `[]` + - **Example:** + ```json + "includeDirectories": [ + "/path/to/another/project", + "../shared-library", + "~/common-utils" + ] + ``` + +- **`loadMemoryFromIncludeDirectories`** (boolean): + - **Description:** Controls the behavior of the `/memory refresh` command. If set to `true`, `GEMINI.md` files should be loaded from all directories that are added. If set to `false`, `GEMINI.md` should only be loaded from the current directory. + - **Default:** `false` + - **Example:** + ```json + "loadMemoryFromIncludeDirectories": true + ``` + +- **`chatCompression`** (object): + - **Description:** Controls the settings for chat history compression, both automatic and + when manually invoked through the /compress command. + - **Properties:** + - **`contextPercentageThreshold`** (number): A value between 0 and 1 that specifies the token threshold for compression as a percentage of the model's total token limit. For example, a value of `0.6` will trigger compression when the chat history exceeds 60% of the token limit. + - **Example:** + ```json + "chatCompression": { + "contextPercentageThreshold": 0.6 + } + ``` + +- **`showLineNumbers`** (boolean): + - **Description:** Controls whether line numbers are displayed in code blocks in the CLI output. + - **Default:** `true` + - **Example:** + ```json + "showLineNumbers": false + ``` + +- **`accessibility`** (object): + - **Description:** Configures accessibility features for the CLI. + - **Properties:** + - **`screenReader`** (boolean): Enables screen reader mode, which adjusts the TUI for better compatibility with screen readers. This can also be enabled with the `--screen-reader` command-line flag, which will take precedence over the setting. + - **`disableLoadingPhrases`** (boolean): Disables the display of loading phrases during operations. + - **Default:** `{"screenReader": false, "disableLoadingPhrases": false}` + - **Example:** + ```json + "accessibility": { + "screenReader": true, + "disableLoadingPhrases": true + } + ``` + +### Example `settings.json`: + +```json +{ + "theme": "GitHub", + "sandbox": "docker", + "toolDiscoveryCommand": "bin/get_tools", + "toolCallCommand": "bin/call_tool", + "mcpServers": { + "mainServer": { + "command": "bin/mcp_server.py" + }, + "anotherServer": { + "command": "node", + "args": ["mcp_server.js", "--verbose"] + } + }, + "telemetry": { + "enabled": true, + "target": "local", + "otlpEndpoint": "http://localhost:4317", + "logPrompts": true + }, + "usageStatisticsEnabled": true, + "hideTips": false, + "hideBanner": false, + "maxSessionTurns": 10, + "summarizeToolOutput": { + "run_shell_command": { + "tokenBudget": 100 + } + }, + "excludedProjectEnvVars": ["DEBUG", "DEBUG_MODE", "NODE_ENV"], + "includeDirectories": ["path/to/dir1", "~/path/to/dir2", "../path/to/dir3"], + "loadMemoryFromIncludeDirectories": true +} +``` + +## Shell History + +The CLI keeps a history of shell commands you run. To avoid conflicts between different projects, this history is stored in a project-specific directory within your user's home folder. + +- **Location:** `~/.gemini/tmp//shell_history` + - `` is a unique identifier generated from your project's root path. + - The history is stored in a file named `shell_history`. + +## Environment Variables & `.env` Files + +Environment variables are a common way to configure applications, especially for sensitive information like API keys or for settings that might change between environments. For authentication setup, see the [Authentication documentation](./authentication.md) which covers all available authentication methods. + +The CLI automatically loads environment variables from an `.env` file. The loading order is: + +1. `.env` file in the current working directory. +2. If not found, it searches upwards in parent directories until it finds an `.env` file or reaches the project root (identified by a `.git` folder) or the home directory. +3. If still not found, it looks for `~/.env` (in the user's home directory). + +**Environment Variable Exclusion:** Some environment variables (like `DEBUG` and `DEBUG_MODE`) are automatically excluded from being loaded from project `.env` files to prevent interference with gemini-cli behavior. Variables from `.gemini/.env` files are never excluded. You can customize this behavior using the `excludedProjectEnvVars` setting in your `settings.json` file. + +- **`GEMINI_API_KEY`**: + - Your API key for the Gemini API. + - One of several available [authentication methods](./authentication.md). + - Set this in your shell profile (e.g., `~/.bashrc`, `~/.zshrc`) or an `.env` file. +- **`GEMINI_MODEL`**: + - Specifies the default Gemini model to use. + - Overrides the hardcoded default + - Example: `export GEMINI_MODEL="gemini-2.5-flash"` +- **`GOOGLE_API_KEY`**: + - Your Google Cloud API key. + - Required for using Vertex AI in express mode. + - Ensure you have the necessary permissions. + - Example: `export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"`. +- **`GOOGLE_CLOUD_PROJECT`**: + - Your Google Cloud Project ID. + - Required for using Code Assist or Vertex AI. + - If using Vertex AI, ensure you have the necessary permissions in this project. + - **Cloud Shell Note:** When running in a Cloud Shell environment, this variable defaults to a special project allocated for Cloud Shell users. If you have `GOOGLE_CLOUD_PROJECT` set in your global environment in Cloud Shell, it will be overridden by this default. To use a different project in Cloud Shell, you must define `GOOGLE_CLOUD_PROJECT` in a `.env` file. + - Example: `export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"`. +- **`GOOGLE_APPLICATION_CREDENTIALS`** (string): + - **Description:** The path to your Google Application Credentials JSON file. + - **Example:** `export GOOGLE_APPLICATION_CREDENTIALS="/path/to/your/credentials.json"` +- **`OTLP_GOOGLE_CLOUD_PROJECT`**: + - Your Google Cloud Project ID for Telemetry in Google Cloud + - Example: `export OTLP_GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"`. +- **`GOOGLE_CLOUD_LOCATION`**: + - Your Google Cloud Project Location (e.g., us-central1). + - Required for using Vertex AI in non express mode. + - Example: `export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"`. +- **`GEMINI_SANDBOX`**: + - Alternative to the `sandbox` setting in `settings.json`. + - Accepts `true`, `false`, `docker`, `podman`, or a custom command string. +- **`SEATBELT_PROFILE`** (macOS specific): + - Switches the Seatbelt (`sandbox-exec`) profile on macOS. + - `permissive-open`: (Default) Restricts writes to the project folder (and a few other folders, see `packages/cli/src/utils/sandbox-macos-permissive-open.sb`) but allows other operations. + - `strict`: Uses a strict profile that declines operations by default. + - ``: Uses a custom profile. To define a custom profile, create a file named `sandbox-macos-.sb` in your project's `.gemini/` directory (e.g., `my-project/.gemini/sandbox-macos-custom.sb`). +- **`DEBUG` or `DEBUG_MODE`** (often used by underlying libraries or the CLI itself): + - Set to `true` or `1` to enable verbose debug logging, which can be helpful for troubleshooting. + - **Note:** These variables are automatically excluded from project `.env` files by default to prevent interference with gemini-cli behavior. Use `.gemini/.env` files if you need to set these for gemini-cli specifically. +- **`NO_COLOR`**: + - Set to any value to disable all color output in the CLI. +- **`CLI_TITLE`**: + - Set to a string to customize the title of the CLI. +- **`CODE_ASSIST_ENDPOINT`**: + - Specifies the endpoint for the code assist server. + - This is useful for development and testing. + +## Command-Line Arguments + +Arguments passed directly when running the CLI can override other configurations for that specific session. + +- **`--model `** (**`-m `**): + - Specifies the Gemini model to use for this session. + - Example: `npm start -- --model gemini-1.5-pro-latest` +- **`--prompt `** (**`-p `**): + - Used to pass a prompt directly to the command. This invokes Gemini CLI in a non-interactive mode. +- **`--prompt-interactive `** (**`-i `**): + - Starts an interactive session with the provided prompt as the initial input. + - The prompt is processed within the interactive session, not before it. + - Cannot be used when piping input from stdin. + - Example: `gemini -i "explain this code"` +- **`--sandbox`** (**`-s`**): + - Enables sandbox mode for this session. +- **`--sandbox-image`**: + - Sets the sandbox image URI. +- **`--debug`** (**`-d`**): + - Enables debug mode for this session, providing more verbose output. +- **`--all-files`** (**`-a`**): + - If set, recursively includes all files within the current directory as context for the prompt. +- **`--help`** (or **`-h`**): + - Displays help information about command-line arguments. +- **`--show-memory-usage`**: + - Displays the current memory usage. +- **`--yolo`**: + - Enables YOLO mode, which automatically approves all tool calls. +- **`--approval-mode `**: + - Sets the approval mode for tool calls. Available modes: + - `default`: Prompt for approval on each tool call (default behavior) + - `auto_edit`: Automatically approve edit tools (replace, write_file) while prompting for others + - `yolo`: Automatically approve all tool calls (equivalent to `--yolo`) + - Cannot be used together with `--yolo`. Use `--approval-mode=yolo` instead of `--yolo` for the new unified approach. + - Example: `gemini --approval-mode auto_edit` +- **`--allowed-tools `**: + - A comma-separated list of tool names that will bypass the confirmation dialog. + - Example: `gemini --allowed-tools "ShellTool(git status)"` +- **`--telemetry`**: + - Enables [telemetry](../telemetry.md). +- **`--telemetry-target`**: + - Sets the telemetry target. See [telemetry](../telemetry.md) for more information. +- **`--telemetry-otlp-endpoint`**: + - Sets the OTLP endpoint for telemetry. See [telemetry](../telemetry.md) for more information. +- **`--telemetry-otlp-protocol`**: + - Sets the OTLP protocol for telemetry (`grpc` or `http`). Defaults to `grpc`. See [telemetry](../telemetry.md) for more information. +- **`--telemetry-log-prompts`**: + - Enables logging of prompts for telemetry. See [telemetry](../telemetry.md) for more information. +- **`--checkpointing`**: + - Enables [checkpointing](../checkpointing.md). +- **`--extensions `** (**`-e `**): + - Specifies a list of extensions to use for the session. If not provided, all available extensions are used. + - Use the special term `gemini -e none` to disable all extensions. + - Example: `gemini -e my-extension -e my-other-extension` +- **`--list-extensions`** (**`-l`**): + - Lists all available extensions and exits. +- **`--proxy`**: + - Sets the proxy for the CLI. + - Example: `--proxy http://localhost:7890`. +- **`--include-directories `**: + - Includes additional directories in the workspace for multi-directory support. + - Can be specified multiple times or as comma-separated values. + - 5 directories can be added at maximum. + - Example: `--include-directories /path/to/project1,/path/to/project2` or `--include-directories /path/to/project1 --include-directories /path/to/project2` +- **`--screen-reader`**: + - Enables screen reader mode for accessibility. +- **`--version`**: + - Displays the version of the CLI. + +## Context Files (Hierarchical Instructional Context) + +While not strictly configuration for the CLI's _behavior_, context files (defaulting to `GEMINI.md` but configurable via the `contextFileName` setting) are crucial for configuring the _instructional context_ (also referred to as "memory") provided to the Gemini model. This powerful feature allows you to give project-specific instructions, coding style guides, or any relevant background information to the AI, making its responses more tailored and accurate to your needs. The CLI includes UI elements, such as an indicator in the footer showing the number of loaded context files, to keep you informed about the active context. + +- **Purpose:** These Markdown files contain instructions, guidelines, or context that you want the Gemini model to be aware of during your interactions. The system is designed to manage this instructional context hierarchically. + +### Example Context File Content (e.g., `GEMINI.md`) + +Here's a conceptual example of what a context file at the root of a TypeScript project might contain: + +```markdown +# Project: My Awesome TypeScript Library + +## General Instructions: + +- When generating new TypeScript code, please follow the existing coding style. +- Ensure all new functions and classes have JSDoc comments. +- Prefer functional programming paradigms where appropriate. +- All code should be compatible with TypeScript 5.0 and Node.js 20+. + +## Coding Style: + +- Use 2 spaces for indentation. +- Interface names should be prefixed with `I` (e.g., `IUserService`). +- Private class members should be prefixed with an underscore (`_`). +- Always use strict equality (`===` and `!==`). + +## Specific Component: `src/api/client.ts` + +- This file handles all outbound API requests. +- When adding new API call functions, ensure they include robust error handling and logging. +- Use the existing `fetchWithRetry` utility for all GET requests. + +## Regarding Dependencies: + +- Avoid introducing new external dependencies unless absolutely necessary. +- If a new dependency is required, please state the reason. +``` + +This example demonstrates how you can provide general project context, specific coding conventions, and even notes about particular files or components. The more relevant and precise your context files are, the better the AI can assist you. Project-specific context files are highly encouraged to establish conventions and context. + +- **Hierarchical Loading and Precedence:** The CLI implements a sophisticated hierarchical memory system by loading context files (e.g., `GEMINI.md`) from several locations. Content from files lower in this list (more specific) typically overrides or supplements content from files higher up (more general). The exact concatenation order and final context can be inspected using the `/memory show` command. The typical loading order is: + 1. **Global Context File:** + - Location: `~/.gemini/` (e.g., `~/.gemini/GEMINI.md` in your user home directory). + - Scope: Provides default instructions for all your projects. + 2. **Project Root & Ancestors Context Files:** + - Location: The CLI searches for the configured context file in the current working directory and then in each parent directory up to either the project root (identified by a `.git` folder) or your home directory. + - Scope: Provides context relevant to the entire project or a significant portion of it. + 3. **Sub-directory Context Files (Contextual/Local):** + - Location: The CLI also scans for the configured context file in subdirectories _below_ the current working directory (respecting common ignore patterns like `node_modules`, `.git`, etc.). The breadth of this search is limited to 200 directories by default, but can be configured with a `memoryDiscoveryMaxDirs` field in your `settings.json` file. + - Scope: Allows for highly specific instructions relevant to a particular component, module, or subsection of your project. +- **Concatenation & UI Indication:** The contents of all found context files are concatenated (with separators indicating their origin and path) and provided as part of the system prompt to the Gemini model. The CLI footer displays the count of loaded context files, giving you a quick visual cue about the active instructional context. +- **Importing Content:** You can modularize your context files by importing other Markdown files using the `@path/to/file.md` syntax. For more details, see the [Memory Import Processor documentation](../core/memport.md). +- **Commands for Memory Management:** + - Use `/memory refresh` to force a re-scan and reload of all context files from all configured locations. This updates the AI's instructional context. + - Use `/memory show` to display the combined instructional context currently loaded, allowing you to verify the hierarchy and content being used by the AI. + - See the [Commands documentation](./commands.md#memory) for full details on the `/memory` command and its sub-commands (`show` and `refresh`). + +By understanding and utilizing these configuration layers and the hierarchical nature of context files, you can effectively manage the AI's memory and tailor the Gemini CLI's responses to your specific needs and projects. + +## Sandboxing + +The Gemini CLI can execute potentially unsafe operations (like shell commands and file modifications) within a sandboxed environment to protect your system. + +Sandboxing is disabled by default, but you can enable it in a few ways: + +- Using `--sandbox` or `-s` flag. +- Setting `GEMINI_SANDBOX` environment variable. +- Sandbox is enabled when using `--yolo` or `--approval-mode=yolo` by default. + +By default, it uses a pre-built `gemini-cli-sandbox` Docker image. + +For project-specific sandboxing needs, you can create a custom Dockerfile at `.gemini/sandbox.Dockerfile` in your project's root directory. This Dockerfile can be based on the base sandbox image: + +```dockerfile +FROM gemini-cli-sandbox + +# Add your custom dependencies or configurations here +# For example: +# RUN apt-get update && apt-get install -y some-package +# COPY ./my-config /app/my-config +``` + +When `.gemini/sandbox.Dockerfile` exists, you can use `BUILD_SANDBOX` environment variable when running Gemini CLI to automatically build the custom sandbox image: + +```bash +BUILD_SANDBOX=1 gemini -s +``` + +## Usage Statistics + +To help us improve the Gemini CLI, we collect anonymized usage statistics. This data helps us understand how the CLI is used, identify common issues, and prioritize new features. + +**What we collect:** + +- **Tool Calls:** We log the names of the tools that are called, whether they succeed or fail, and how long they take to execute. We do not collect the arguments passed to the tools or any data returned by them. +- **API Requests:** We log the Gemini model used for each request, the duration of the request, and whether it was successful. We do not collect the content of the prompts or responses. +- **Session Information:** We collect information about the configuration of the CLI, such as the enabled tools and the approval mode. + +**What we DON'T collect:** + +- **Personally Identifiable Information (PII):** We do not collect any personal information, such as your name, email address, or API keys. +- **Prompt and Response Content:** We do not log the content of your prompts or the responses from the Gemini model. +- **File Content:** We do not log the content of any files that are read or written by the CLI. + +**How to opt out:** + +You can opt out of usage statistics collection at any time by setting the `usageStatisticsEnabled` property to `false` in your `settings.json` file: + +```json +{ + "usageStatisticsEnabled": false +} +``` diff --git a/projects/gemini-cli/docs/cli/configuration.md b/projects/gemini-cli/docs/cli/configuration.md new file mode 100644 index 0000000000000000000000000000000000000000..bc114f178b83526d2d8dc9369506fffc3f273c73 --- /dev/null +++ b/projects/gemini-cli/docs/cli/configuration.md @@ -0,0 +1,612 @@ +# Gemini CLI Configuration + +**Note on New Configuration Format** + +The format of the `settings.json` file has been updated to a new, more organized structure. + +- The new format will be supported in the stable release starting **[09/10/25]**. +- Automatic migration from the old format to the new format will begin on **[09/17/25]**. + +For details on the previous format, please see the [v1 Configuration documentation](./configuration-v1.md). + +Gemini CLI offers several ways to configure its behavior, including environment variables, command-line arguments, and settings files. This document outlines the different configuration methods and available settings. + +## Configuration layers + +Configuration is applied in the following order of precedence (lower numbers are overridden by higher numbers): + +1. **Default values:** Hardcoded defaults within the application. +2. **System defaults file:** System-wide default settings that can be overridden by other settings files. +3. **User settings file:** Global settings for the current user. +4. **Project settings file:** Project-specific settings. +5. **System settings file:** System-wide settings that override all other settings files. +6. **Environment variables:** System-wide or session-specific variables, potentially loaded from `.env` files. +7. **Command-line arguments:** Values passed when launching the CLI. + +## Settings files + +Gemini CLI uses JSON settings files for persistent configuration. There are four locations for these files: + +- **System defaults file:** + - **Location:** `/etc/gemini-cli/system-defaults.json` (Linux), `C:\ProgramData\gemini-cli\system-defaults.json` (Windows) or `/Library/Application Support/GeminiCli/system-defaults.json` (macOS). The path can be overridden using the `GEMINI_CLI_SYSTEM_DEFAULTS_PATH` environment variable. + - **Scope:** Provides a base layer of system-wide default settings. These settings have the lowest precedence and are intended to be overridden by user, project, or system override settings. +- **User settings file:** + - **Location:** `~/.gemini/settings.json` (where `~` is your home directory). + - **Scope:** Applies to all Gemini CLI sessions for the current user. User settings override system defaults. +- **Project settings file:** + - **Location:** `.gemini/settings.json` within your project's root directory. + - **Scope:** Applies only when running Gemini CLI from that specific project. Project settings override user settings and system defaults. +- **System settings file:** + - **Location:** `/etc/gemini-cli/settings.json` (Linux), `C:\ProgramData\gemini-cli\settings.json` (Windows) or `/Library/Application Support/GeminiCli/settings.json` (macOS). The path can be overridden using the `GEMINI_CLI_SYSTEM_SETTINGS_PATH` environment variable. + - **Scope:** Applies to all Gemini CLI sessions on the system, for all users. System settings act as overrides, taking precedence over all other settings files. May be useful for system administrators at enterprises to have controls over users' Gemini CLI setups. + +**Note on environment variables in settings:** String values within your `settings.json` files can reference environment variables using either `$VAR_NAME` or `${VAR_NAME}` syntax. These variables will be automatically resolved when the settings are loaded. For example, if you have an environment variable `MY_API_TOKEN`, you could use it in `settings.json` like this: `"apiKey": "$MY_API_TOKEN"`. + +> **Note for Enterprise Users:** For guidance on deploying and managing Gemini CLI in a corporate environment, please see the [Enterprise Configuration](./enterprise.md) documentation. + +### The `.gemini` directory in your project + +In addition to a project settings file, a project's `.gemini` directory can contain other project-specific files related to Gemini CLI's operation, such as: + +- [Custom sandbox profiles](#sandboxing) (e.g., `.gemini/sandbox-macos-custom.sb`, `.gemini/sandbox.Dockerfile`). + +### Available settings in `settings.json` + +Settings are organized into categories. All settings should be placed within their corresponding top-level category object in your `settings.json` file. + +#### `general` + +- **`general.preferredEditor`** (string): + - **Description:** The preferred editor to open files in. + - **Default:** `undefined` + +- **`general.vimMode`** (boolean): + - **Description:** Enable Vim keybindings. + - **Default:** `false` + +- **`general.disableAutoUpdate`** (boolean): + - **Description:** Disable automatic updates. + - **Default:** `false` + +- **`general.disableUpdateNag`** (boolean): + - **Description:** Disable update notification prompts. + - **Default:** `false` + +- **`general.checkpointing.enabled`** (boolean): + - **Description:** Enable session checkpointing for recovery. + - **Default:** `false` + +#### `ui` + +- **`ui.theme`** (string): + - **Description:** The color theme for the UI. See [Themes](./themes.md) for available options. + - **Default:** `undefined` + +- **`ui.customThemes`** (object): + - **Description:** Custom theme definitions. + - **Default:** `{}` + +- **`ui.hideWindowTitle`** (boolean): + - **Description:** Hide the window title bar. + - **Default:** `false` + +- **`ui.hideTips`** (boolean): + - **Description:** Hide helpful tips in the UI. + - **Default:** `false` + +- **`ui.hideBanner`** (boolean): + - **Description:** Hide the application banner. + - **Default:** `false` + +- **`ui.hideFooter`** (boolean): + - **Description:** Hide the footer from the UI. + - **Default:** `false` + +- **`ui.showMemoryUsage`** (boolean): + - **Description:** Display memory usage information in the UI. + - **Default:** `false` + +- **`ui.showLineNumbers`** (boolean): + - **Description:** Show line numbers in the chat. + - **Default:** `false` + +- **`ui.accessibility.disableLoadingPhrases`** (boolean): + - **Description:** Disable loading phrases for accessibility. + - **Default:** `false` + +#### `ide` + +- **`ide.enabled`** (boolean): + - **Description:** Enable IDE integration mode. + - **Default:** `false` + +- **`ide.hasSeenNudge`** (boolean): + - **Description:** Whether the user has seen the IDE integration nudge. + - **Default:** `false` + +#### `privacy` + +- **`privacy.usageStatisticsEnabled`** (boolean): + - **Description:** Enable collection of usage statistics. + - **Default:** `true` + +#### `model` + +- **`model.name`** (string): + - **Description:** The Gemini model to use for conversations. + - **Default:** `undefined` + +- **`model.maxSessionTurns`** (number): + - **Description:** Maximum number of user/model/tool turns to keep in a session. -1 means unlimited. + - **Default:** `-1` + +- **`model.summarizeToolOutput`** (object): + - **Description:** Settings for summarizing tool output. + - **Default:** `undefined` + +- **`model.chatCompression`** (object): + - **Description:** Chat compression settings. + - **Default:** `undefined` + +- **`model.skipNextSpeakerCheck`** (boolean): + - **Description:** Skip the next speaker check. + - **Default:** `false` + +#### `context` + +- **`context.fileName`** (string or array of strings): + - **Description:** The name of the context file(s). + - **Default:** `undefined` + +- **`context.importFormat`** (string): + - **Description:** The format to use when importing memory. + - **Default:** `undefined` + +- **`context.discoveryMaxDirs`** (number): + - **Description:** Maximum number of directories to search for memory. + - **Default:** `200` + +- **`context.includeDirectories`** (array): + - **Description:** Additional directories to include in the workspace context. Missing directories will be skipped with a warning. + - **Default:** `[]` + +- **`context.loadFromIncludeDirectories`** (boolean): + - **Description:** Whether to load memory files from include directories. + - **Default:** `false` + +- **`context.fileFiltering.respectGitIgnore`** (boolean): + - **Description:** Respect .gitignore files when searching. + - **Default:** `true` + +- **`context.fileFiltering.respectGeminiIgnore`** (boolean): + - **Description:** Respect .geminiignore files when searching. + - **Default:** `true` + +- **`context.fileFiltering.enableRecursiveFileSearch`** (boolean): + - **Description:** Enable recursive file search functionality. + - **Default:** `true` + +#### `tools` + +- **`tools.sandbox`** (boolean or string): + - **Description:** Sandbox execution environment (can be a boolean or a path string). + - **Default:** `undefined` + +- **`tools.usePty`** (boolean): + - **Description:** Use node-pty for shell command execution. Fallback to child_process still applies. + - **Default:** `false` + +- **`tools.core`** (array of strings): + - **Description:** Paths to core tool definitions. + - **Default:** `undefined` + +- **`tools.exclude`** (array of strings): + - **Description:** Tool names to exclude from discovery. + - **Default:** `undefined` + +- **`tools.discoveryCommand`** (string): + - **Description:** Command to run for tool discovery. + - **Default:** `undefined` + +- **`tools.callCommand`** (string): + - **Description:** Command to run for tool calls. + - **Default:** `undefined` + +#### `mcp` + +- **`mcp.serverCommand`** (string): + - **Description:** Command to start an MCP server. + - **Default:** `undefined` + +- **`mcp.allowed`** (array of strings): + - **Description:** A whitelist of MCP servers to allow. + - **Default:** `undefined` + +- **`mcp.excluded`** (array of strings): + - **Description:** A blacklist of MCP servers to exclude. + - **Default:** `undefined` + +#### `security` + +- **`security.folderTrust.featureEnabled`** (boolean): + - **Description:** Enable folder trust feature for enhanced security. + - **Default:** `false` + +- **`security.folderTrust.enabled`** (boolean): + - **Description:** Setting to track whether Folder trust is enabled. + - **Default:** `false` + +- **`security.auth.selectedType`** (string): + - **Description:** The currently selected authentication type. + - **Default:** `undefined` + +- **`security.auth.useExternal`** (boolean): + - **Description:** Whether to use an external authentication flow. + - **Default:** `undefined` + +#### `advanced` + +- **`advanced.autoConfigureMemory`** (boolean): + - **Description:** Automatically configure Node.js memory limits. + - **Default:** `false` + +- **`advanced.dnsResolutionOrder`** (string): + - **Description:** The DNS resolution order. + - **Default:** `undefined` + +- **`advanced.excludedEnvVars`** (array of strings): + - **Description:** Environment variables to exclude from project context. + - **Default:** `["DEBUG","DEBUG_MODE"]` + +- **`advanced.bugCommand`** (object): + - **Description:** Configuration for the bug report command. + - **Default:** `undefined` + +#### Top-Level Settings + +The following settings remain at the top level of the `settings.json` file. + +- **`mcpServers`** (object): + - **Description:** Configures connections to one or more Model-Context Protocol (MCP) servers for discovering and using custom tools. Gemini CLI attempts to connect to each configured MCP server to discover available tools. If multiple MCP servers expose a tool with the same name, the tool names will be prefixed with the server alias you defined in the configuration (e.g., `serverAlias__actualToolName`) to avoid conflicts. Note that the system might strip certain schema properties from MCP tool definitions for compatibility. At least one of `command`, `url`, or `httpUrl` must be provided. If multiple are specified, the order of precedence is `httpUrl`, then `url`, then `command`. + - **Default:** `{}` + - **Properties:** + - **``** (object): The server parameters for the named server. + - `command` (string, optional): The command to execute to start the MCP server via standard I/O. + - `args` (array of strings, optional): Arguments to pass to the command. + - `env` (object, optional): Environment variables to set for the server process. + - `cwd` (string, optional): The working directory in which to start the server. + - `url` (string, optional): The URL of an MCP server that uses Server-Sent Events (SSE) for communication. + - `httpUrl` (string, optional): The URL of an MCP server that uses streamable HTTP for communication. + - `headers` (object, optional): A map of HTTP headers to send with requests to `url` or `httpUrl`. + - `timeout` (number, optional): Timeout in milliseconds for requests to this MCP server. + - `trust` (boolean, optional): Trust this server and bypass all tool call confirmations. + - `description` (string, optional): A brief description of the server, which may be used for display purposes. + - `includeTools` (array of strings, optional): List of tool names to include from this MCP server. When specified, only the tools listed here will be available from this server (whitelist behavior). If not specified, all tools from the server are enabled by default. + - `excludeTools` (array of strings, optional): List of tool names to exclude from this MCP server. Tools listed here will not be available to the model, even if they are exposed by the server. **Note:** `excludeTools` takes precedence over `includeTools` - if a tool is in both lists, it will be excluded. + +- **`telemetry`** (object) + - **Description:** Configures logging and metrics collection for Gemini CLI. For more information, see [Telemetry](../telemetry.md). + - **Default:** `undefined` + - **Properties:** + - **`enabled`** (boolean): Whether or not telemetry is enabled. + - **`target`** (string): The destination for collected telemetry. Supported values are `local` and `gcp`. + - **`otlpEndpoint`** (string): The endpoint for the OTLP Exporter. + - **`otlpProtocol`** (string): The protocol for the OTLP Exporter (`grpc` or `http`). + - **`logPrompts`** (boolean): Whether or not to include the content of user prompts in the logs. + - **`outfile`** (string): The file to write telemetry to when `target` is `local`. + +### Example `settings.json` + +Here is an example of a `settings.json` file with the new nested structure: + +```json +{ + "general": { + "vimMode": true, + "preferredEditor": "code" + }, + "ui": { + "theme": "GitHub", + "hideBanner": true, + "hideTips": false + }, + "tools": { + "sandbox": "docker", + "discoveryCommand": "bin/get_tools", + "callCommand": "bin/call_tool", + "exclude": ["write_file"] + }, + "mcpServers": { + "mainServer": { + "command": "bin/mcp_server.py" + }, + "anotherServer": { + "command": "node", + "args": ["mcp_server.js", "--verbose"] + } + }, + "telemetry": { + "enabled": true, + "target": "local", + "otlpEndpoint": "http://localhost:4317", + "logPrompts": true + }, + "privacy": { + "usageStatisticsEnabled": true + }, + "model": { + "name": "gemini-1.5-pro-latest", + "maxSessionTurns": 10, + "summarizeToolOutput": { + "run_shell_command": { + "tokenBudget": 100 + } + } + }, + "context": { + "fileName": ["CONTEXT.md", "GEMINI.md"], + "includeDirectories": ["path/to/dir1", "~/path/to/dir2", "../path/to/dir3"], + "loadFromIncludeDirectories": true, + "fileFiltering": { + "respectGitIgnore": false + } + }, + "advanced": { + "excludedEnvVars": ["DEBUG", "DEBUG_MODE", "NODE_ENV"] + } +} +``` + +## Shell History + +The CLI keeps a history of shell commands you run. To avoid conflicts between different projects, this history is stored in a project-specific directory within your user's home folder. + +- **Location:** `~/.gemini/tmp//shell_history` + - `` is a unique identifier generated from your project's root path. + - The history is stored in a file named `shell_history`. + +## Environment Variables & `.env` Files + +Environment variables are a common way to configure applications, especially for sensitive information like API keys or for settings that might change between environments. For authentication setup, see the [Authentication documentation](./authentication.md) which covers all available authentication methods. + +The CLI automatically loads environment variables from an `.env` file. The loading order is: + +1. `.env` file in the current working directory. +2. If not found, it searches upwards in parent directories until it finds an `.env` file or reaches the project root (identified by a `.git` folder) or the home directory. +3. If still not found, it looks for `~/.env` (in the user's home directory). + +**Environment Variable Exclusion:** Some environment variables (like `DEBUG` and `DEBUG_MODE`) are automatically excluded from being loaded from project `.env` files to prevent interference with gemini-cli behavior. Variables from `.gemini/.env` files are never excluded. You can customize this behavior using the `advanced.excludedEnvVars` setting in your `settings.json` file. + +- **`GEMINI_API_KEY`**: + - Your API key for the Gemini API. + - One of several available [authentication methods](./authentication.md). + - Set this in your shell profile (e.g., `~/.bashrc`, `~/.zshrc`) or an `.env` file. +- **`GEMINI_MODEL`**: + - Specifies the default Gemini model to use. + - Overrides the hardcoded default + - Example: `export GEMINI_MODEL="gemini-2.5-flash"` +- **`GOOGLE_API_KEY`**: + - Your Google Cloud API key. + - Required for using Vertex AI in express mode. + - Ensure you have the necessary permissions. + - Example: `export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"`. +- **`GOOGLE_CLOUD_PROJECT`**: + - Your Google Cloud Project ID. + - Required for using Code Assist or Vertex AI. + - If using Vertex AI, ensure you have the necessary permissions in this project. + - **Cloud Shell Note:** When running in a Cloud Shell environment, this variable defaults to a special project allocated for Cloud Shell users. If you have `GOOGLE_CLOUD_PROJECT` set in your global environment in Cloud Shell, it will be overridden by this default. To use a different project in Cloud Shell, you must define `GOOGLE_CLOUD_PROJECT` in a `.env` file. + - Example: `export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"`. +- **`GOOGLE_APPLICATION_CREDENTIALS`** (string): + - **Description:** The path to your Google Application Credentials JSON file. + - **Example:** `export GOOGLE_APPLICATION_CREDENTIALS="/path/to/your/credentials.json"` +- **`OTLP_GOOGLE_CLOUD_PROJECT`**: + - Your Google Cloud Project ID for Telemetry in Google Cloud + - Example: `export OTLP_GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"`. +- **`GOOGLE_CLOUD_LOCATION`**: + - Your Google Cloud Project Location (e.g., us-central1). + - Required for using Vertex AI in non express mode. + - Example: `export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"`. +- **`GEMINI_SANDBOX`**: + - Alternative to the `sandbox` setting in `settings.json`. + - Accepts `true`, `false`, `docker`, `podman`, or a custom command string. +- **`SEATBELT_PROFILE`** (macOS specific): + - Switches the Seatbelt (`sandbox-exec`) profile on macOS. + - `permissive-open`: (Default) Restricts writes to the project folder (and a few other folders, see `packages/cli/src/utils/sandbox-macos-permissive-open.sb`) but allows other operations. + - `strict`: Uses a strict profile that declines operations by default. + - ``: Uses a custom profile. To define a custom profile, create a file named `sandbox-macos-.sb` in your project's `.gemini/` directory (e.g., `my-project/.gemini/sandbox-macos-custom.sb`). +- **`DEBUG` or `DEBUG_MODE`** (often used by underlying libraries or the CLI itself): + - Set to `true` or `1` to enable verbose debug logging, which can be helpful for troubleshooting. + - **Note:** These variables are automatically excluded from project `.env` files by default to prevent interference with gemini-cli behavior. Use `.gemini/.env` files if you need to set these for gemini-cli specifically. +- **`NO_COLOR`**: + - Set to any value to disable all color output in the CLI. +- **`CLI_TITLE`**: + - Set to a string to customize the title of the CLI. +- **`CODE_ASSIST_ENDPOINT`**: + - Specifies the endpoint for the code assist server. + - This is useful for development and testing. + +## Command-Line Arguments + +Arguments passed directly when running the CLI can override other configurations for that specific session. + +- **`--model `** (**`-m `**): + - Specifies the Gemini model to use for this session. + - Example: `npm start -- --model gemini-1.5-pro-latest` +- **`--prompt `** (**`-p `**): + - Used to pass a prompt directly to the command. This invokes Gemini CLI in a non-interactive mode. +- **`--prompt-interactive `** (**`-i `**): + - Starts an interactive session with the provided prompt as the initial input. + - The prompt is processed within the interactive session, not before it. + - Cannot be used when piping input from stdin. + - Example: `gemini -i "explain this code"` +- **`--sandbox`** (**`-s`**): + - Enables sandbox mode for this session. +- **`--sandbox-image`**: + - Sets the sandbox image URI. +- **`--debug`** (**`-d`**): + - Enables debug mode for this session, providing more verbose output. +- **`--all-files`** (**`-a`**): + - If set, recursively includes all files within the current directory as context for the prompt. +- **`--help`** (or **`-h`**): + - Displays help information about command-line arguments. +- **`--show-memory-usage`**: + - Displays the current memory usage. +- **`--yolo`**: + - Enables YOLO mode, which automatically approves all tool calls. +- **`--approval-mode `**: + - Sets the approval mode for tool calls. Available modes: + - `default`: Prompt for approval on each tool call (default behavior) + - `auto_edit`: Automatically approve edit tools (replace, write_file) while prompting for others + - `yolo`: Automatically approve all tool calls (equivalent to `--yolo`) + - Cannot be used together with `--yolo`. Use `--approval-mode=yolo` instead of `--yolo` for the new unified approach. + - Example: `gemini --approval-mode auto_edit` +- **`--allowed-tools `**: + - A comma-separated list of tool names that will bypass the confirmation dialog. + - Example: `gemini --allowed-tools "ShellTool(git status)"` +- **`--telemetry`**: + - Enables [telemetry](../telemetry.md). +- **`--telemetry-target`**: + - Sets the telemetry target. See [telemetry](../telemetry.md) for more information. +- **`--telemetry-otlp-endpoint`**: + - Sets the OTLP endpoint for telemetry. See [telemetry](../telemetry.md) for more information. +- **`--telemetry-otlp-protocol`**: + - Sets the OTLP protocol for telemetry (`grpc` or `http`). Defaults to `grpc`. See [telemetry](../telemetry.md) for more information. +- **`--telemetry-log-prompts`**: + - Enables logging of prompts for telemetry. See [telemetry](../telemetry.md) for more information. +- **`--checkpointing`**: + - Enables [checkpointing](../checkpointing.md). +- **`--extensions `** (**`-e `**): + - Specifies a list of extensions to use for the session. If not provided, all available extensions are used. + - Use the special term `gemini -e none` to disable all extensions. + - Example: `gemini -e my-extension -e my-other-extension` +- **`--list-extensions`** (**`-l`**): + - Lists all available extensions and exits. +- **`--proxy`**: + - Sets the proxy for the CLI. + - Example: `--proxy http://localhost:7890`. +- **`--include-directories `**: + - Includes additional directories in the workspace for multi-directory support. + - Can be specified multiple times or as comma-separated values. + - 5 directories can be added at maximum. + - Example: `--include-directories /path/to/project1,/path/to/project2` or `--include-directories /path/to/project1 --include-directories /path/to/project2` +- **`--screen-reader`**: + - Enables screen reader mode for accessibility. +- **`--version`**: + - Displays the version of the CLI. + +## Context Files (Hierarchical Instructional Context) + +While not strictly configuration for the CLI's _behavior_, context files (defaulting to `GEMINI.md` but configurable via the `context.fileName` setting) are crucial for configuring the _instructional context_ (also referred to as "memory") provided to the Gemini model. This powerful feature allows you to give project-specific instructions, coding style guides, or any relevant background information to the AI, making its responses more tailored and accurate to your needs. The CLI includes UI elements, such as an indicator in the footer showing the number of loaded context files, to keep you informed about the active context. + +- **Purpose:** These Markdown files contain instructions, guidelines, or context that you want the Gemini model to be aware of during your interactions. The system is designed to manage this instructional context hierarchically. + +### Example Context File Content (e.g., `GEMINI.md`) + +Here's a conceptual example of what a context file at the root of a TypeScript project might contain: + +```markdown +# Project: My Awesome TypeScript Library + +## General Instructions: + +- When generating new TypeScript code, please follow the existing coding style. +- Ensure all new functions and classes have JSDoc comments. +- Prefer functional programming paradigms where appropriate. +- All code should be compatible with TypeScript 5.0 and Node.js 20+. + +## Coding Style: + +- Use 2 spaces for indentation. +- Interface names should be prefixed with `I` (e.g., `IUserService`). +- Private class members should be prefixed with an underscore (`_`). +- Always use strict equality (`===` and `!==`). + +## Specific Component: `src/api/client.ts` + +- This file handles all outbound API requests. +- When adding new API call functions, ensure they include robust error handling and logging. +- Use the existing `fetchWithRetry` utility for all GET requests. + +## Regarding Dependencies: + +- Avoid introducing new external dependencies unless absolutely necessary. +- If a new dependency is required, please state the reason. +``` + +This example demonstrates how you can provide general project context, specific coding conventions, and even notes about particular files or components. The more relevant and precise your context files are, the better the AI can assist you. Project-specific context files are highly encouraged to establish conventions and context. + +- **Hierarchical Loading and Precedence:** The CLI implements a sophisticated hierarchical memory system by loading context files (e.g., `GEMINI.md`) from several locations. Content from files lower in this list (more specific) typically overrides or supplements content from files higher up (more general). The exact concatenation order and final context can be inspected using the `/memory show` command. The typical loading order is: + 1. **Global Context File:** + - Location: `~/.gemini/` (e.g., `~/.gemini/GEMINI.md` in your user home directory). + - Scope: Provides default instructions for all your projects. + 2. **Project Root & Ancestors Context Files:** + - Location: The CLI searches for the configured context file in the current working directory and then in each parent directory up to either the project root (identified by a `.git` folder) or your home directory. + - Scope: Provides context relevant to the entire project or a significant portion of it. + 3. **Sub-directory Context Files (Contextual/Local):** + - Location: The CLI also scans for the configured context file in subdirectories _below_ the current working directory (respecting common ignore patterns like `node_modules`, `.git`, etc.). The breadth of this search is limited to 200 directories by default, but can be configured with the `context.discoveryMaxDirs` setting in your `settings.json` file. + - Scope: Allows for highly specific instructions relevant to a particular component, module, or subsection of your project. +- **Concatenation & UI Indication:** The contents of all found context files are concatenated (with separators indicating their origin and path) and provided as part of the system prompt to the Gemini model. The CLI footer displays the count of loaded context files, giving you a quick visual cue about the active instructional context. +- **Importing Content:** You can modularize your context files by importing other Markdown files using the `@path/to/file.md` syntax. For more details, see the [Memory Import Processor documentation](../core/memport.md). +- **Commands for Memory Management:** + - Use `/memory refresh` to force a re-scan and reload of all context files from all configured locations. This updates the AI's instructional context. + - Use `/memory show` to display the combined instructional context currently loaded, allowing you to verify the hierarchy and content being used by the AI. + - See the [Commands documentation](./commands.md#memory) for full details on the `/memory` command and its sub-commands (`show` and `refresh`). + +By understanding and utilizing these configuration layers and the hierarchical nature of context files, you can effectively manage the AI's memory and tailor the Gemini CLI's responses to your specific needs and projects. + +## Sandboxing + +The Gemini CLI can execute potentially unsafe operations (like shell commands and file modifications) within a sandboxed environment to protect your system. + +Sandboxing is disabled by default, but you can enable it in a few ways: + +- Using `--sandbox` or `-s` flag. +- Setting `GEMINI_SANDBOX` environment variable. +- Sandbox is enabled when using `--yolo` or `--approval-mode=yolo` by default. + +By default, it uses a pre-built `gemini-cli-sandbox` Docker image. + +For project-specific sandboxing needs, you can create a custom Dockerfile at `.gemini/sandbox.Dockerfile` in your project's root directory. This Dockerfile can be based on the base sandbox image: + +```dockerfile +FROM gemini-cli-sandbox + +# Add your custom dependencies or configurations here +# For example: +# RUN apt-get update && apt-get install -y some-package +# COPY ./my-config /app/my-config +``` + +When `.gemini/sandbox.Dockerfile` exists, you can use `BUILD_SANDBOX` environment variable when running Gemini CLI to automatically build the custom sandbox image: + +```bash +BUILD_SANDBOX=1 gemini -s +``` + +## Usage Statistics + +To help us improve the Gemini CLI, we collect anonymized usage statistics. This data helps us understand how the CLI is used, identify common issues, and prioritize new features. + +**What we collect:** + +- **Tool Calls:** We log the names of the tools that are called, whether they succeed or fail, and how long they take to execute. We do not collect the arguments passed to the tools or any data returned by them. +- **API Requests:** We log the Gemini model used for each request, the duration of the request, and whether it was successful. We do not collect the content of the prompts or responses. +- **Session Information:** We collect information about the configuration of the CLI, such as the enabled tools and the approval mode. + +**What we DON'T collect:** + +- **Personally Identifiable Information (PII):** We do not collect any personal information, such as your name, email address, or API keys. +- **Prompt and Response Content:** We do not log the content of your prompts or the responses from the Gemini model. +- **File Content:** We do not log the content of any files that are read or written by the CLI. + +**How to opt out:** + +You can opt out of usage statistics collection at any time by setting the `usageStatisticsEnabled` property to `false` under the `privacy` category in your `settings.json` file: + +```json +{ + "privacy": { + "usageStatisticsEnabled": false + } +} +``` diff --git a/projects/gemini-cli/docs/cli/enterprise.md b/projects/gemini-cli/docs/cli/enterprise.md new file mode 100644 index 0000000000000000000000000000000000000000..d4c0dcf96a75233200d753eaae23390f1c6c7930 --- /dev/null +++ b/projects/gemini-cli/docs/cli/enterprise.md @@ -0,0 +1,369 @@ +# Gemini CLI for the Enterprise + +This document outlines configuration patterns and best practices for deploying and managing Gemini CLI in an enterprise environment. By leveraging system-level settings, administrators can enforce security policies, manage tool access, and ensure a consistent experience for all users. + +> **A Note on Security:** The patterns described in this document are intended to help administrators create a more controlled and secure environment for using Gemini CLI. However, they should not be considered a foolproof security boundary. A determined user with sufficient privileges on their local machine may still be able to circumvent these configurations. These measures are designed to prevent accidental misuse and enforce corporate policy in a managed environment, not to defend against a malicious actor with local administrative rights. + +## Centralized Configuration: The System Settings File + +The most powerful tools for enterprise administration are the system-wide settings files. These files allow you to define a baseline configuration (`system-defaults.json`) and a set of overrides (`settings.json`) that apply to all users on a machine. For a complete overview of configuration options, see the [Configuration documentation](./configuration.md). + +Settings are merged from four files. The precedence order for single-value settings (like `theme`) is: + +1. System Defaults (`system-defaults.json`) +2. User Settings (`~/.gemini/settings.json`) +3. Workspace Settings (`/.gemini/settings.json`) +4. System Overrides (`settings.json`) + +This means the System Overrides file has the final say. For settings that are arrays (`includeDirectories`) or objects (`mcpServers`), the values are merged. + +**Example of Merging and Precedence:** + +Here is how settings from different levels are combined. + +- **System Defaults `system-defaults.json`:** + + ```json + { + "ui": { + "theme": "default-corporate-theme" + }, + "context": { + "includeDirectories": ["/etc/gemini-cli/common-context"] + } + } + ``` + +- **User `settings.json` (`~/.gemini/settings.json`):** + + ```json + { + "ui": { + "theme": "user-preferred-dark-theme" + }, + "mcpServers": { + "corp-server": { + "command": "/usr/local/bin/corp-server-dev" + }, + "user-tool": { + "command": "npm start --prefix ~/tools/my-tool" + } + }, + "context": { + "includeDirectories": ["~/gemini-context"] + } + } + ``` + +- **Workspace `settings.json` (`/.gemini/settings.json`):** + + ```json + { + "ui": { + "theme": "project-specific-light-theme" + }, + "mcpServers": { + "project-tool": { + "command": "npm start" + } + }, + "context": { + "includeDirectories": ["./project-context"] + } + } + ``` + +- **System Overrides `settings.json`:** + ```json + { + "ui": { + "theme": "system-enforced-theme" + }, + "mcpServers": { + "corp-server": { + "command": "/usr/local/bin/corp-server-prod" + } + }, + "context": { + "includeDirectories": ["/etc/gemini-cli/global-context"] + } + } + ``` + +This results in the following merged configuration: + +- **Final Merged Configuration:** + ```json + { + "ui": { + "theme": "system-enforced-theme" + }, + "mcpServers": { + "corp-server": { + "command": "/usr/local/bin/corp-server-prod" + }, + "user-tool": { + "command": "npm start --prefix ~/tools/my-tool" + }, + "project-tool": { + "command": "npm start" + } + }, + "context": { + "includeDirectories": [ + "/etc/gemini-cli/common-context", + "~/gemini-context", + "./project-context", + "/etc/gemini-cli/global-context" + ] + } + } + ``` + +**Why:** + +- **`theme`**: The value from the system overrides (`system-enforced-theme`) is used, as it has the highest precedence. +- **`mcpServers`**: The objects are merged. The `corp-server` definition from the system overrides takes precedence over the user's definition. The unique `user-tool` and `project-tool` are included. +- **`includeDirectories`**: The arrays are concatenated in the order of System Defaults, User, Workspace, and then System Overrides. + +- **Location**: + - **Linux**: `/etc/gemini-cli/settings.json` + - **Windows**: `C:\ProgramData\gemini-cli\settings.json` + - **macOS**: `/Library/Application Support/GeminiCli/settings.json` + - The path can be overridden using the `GEMINI_CLI_SYSTEM_SETTINGS_PATH` environment variable. +- **Control**: This file should be managed by system administrators and protected with appropriate file permissions to prevent unauthorized modification by users. + +By using the system settings file, you can enforce the security and configuration patterns described below. + +## Restricting Tool Access + +You can significantly enhance security by controlling which tools the Gemini model can use. This is achieved through the `tools.core` and `tools.exclude` settings. For a list of available tools, see the [Tools documentation](../tools/index.md). + +### Allowlisting with `coreTools` + +The most secure approach is to explicitly add the tools and commands that users are permitted to execute to an allowlist. This prevents the use of any tool not on the approved list. + +**Example:** Allow only safe, read-only file operations and listing files. + +```json +{ + "tools": { + "core": ["ReadFileTool", "GlobTool", "ShellTool(ls)"] + } +} +``` + +### Blocklisting with `excludeTools` + +Alternatively, you can add specific tools that are considered dangerous in your environment to a blocklist. + +**Example:** Prevent the use of the shell tool for removing files. + +```json +{ + "tools": { + "exclude": ["ShellTool(rm -rf)"] + } +} +``` + +**Security Note:** Blocklisting with `excludeTools` is less secure than allowlisting with `coreTools`, as it relies on blocking known-bad commands, and clever users may find ways to bypass simple string-based blocks. **Allowlisting is the recommended approach.** + +## Managing Custom Tools (MCP Servers) + +If your organization uses custom tools via [Model-Context Protocol (MCP) servers](../core/tools-api.md), it is crucial to understand how server configurations are managed to apply security policies effectively. + +### How MCP Server Configurations are Merged + +Gemini CLI loads `settings.json` files from three levels: System, Workspace, and User. When it comes to the `mcpServers` object, these configurations are **merged**: + +1. **Merging:** The lists of servers from all three levels are combined into a single list. +2. **Precedence:** If a server with the **same name** is defined at multiple levels (e.g., a server named `corp-api` exists in both system and user settings), the definition from the highest-precedence level is used. The order of precedence is: **System > Workspace > User**. + +This means a user **cannot** override the definition of a server that is already defined in the system-level settings. However, they **can** add new servers with unique names. + +### Enforcing a Catalog of Tools + +The security of your MCP tool ecosystem depends on a combination of defining the canonical servers and adding their names to an allowlist. + +### Restricting Tools Within an MCP Server + +For even greater security, especially when dealing with third-party MCP servers, you can restrict which specific tools from a server are exposed to the model. This is done using the `includeTools` and `excludeTools` properties within a server's definition. This allows you to use a subset of tools from a server without allowing potentially dangerous ones. + +Following the principle of least privilege, it is highly recommended to use `includeTools` to create an allowlist of only the necessary tools. + +**Example:** Only allow the `code-search` and `get-ticket-details` tools from a third-party MCP server, even if the server offers other tools like `delete-ticket`. + +```json +{ + "mcp": { + "allowed": ["third-party-analyzer"] + }, + "mcpServers": { + "third-party-analyzer": { + "command": "/usr/local/bin/start-3p-analyzer.sh", + "includeTools": ["code-search", "get-ticket-details"] + } + } +} +``` + +#### More Secure Pattern: Define and Add to Allowlist in System Settings + +To create a secure, centrally-managed catalog of tools, the system administrator **must** do both of the following in the system-level `settings.json` file: + +1. **Define the full configuration** for every approved server in the `mcpServers` object. This ensures that even if a user defines a server with the same name, the secure system-level definition will take precedence. +2. **Add the names** of those servers to an allowlist using the `mcp.allowed` setting. This is a critical security step that prevents users from running any servers that are not on this list. If this setting is omitted, the CLI will merge and allow any server defined by the user. + +**Example System `settings.json`:** + +1. Add the _names_ of all approved servers to an allowlist. + This will prevent users from adding their own servers. + +2. Provide the canonical _definition_ for each server on the allowlist. + +```json +{ + "mcp": { + "allowed": ["corp-data-api", "source-code-analyzer"] + }, + "mcpServers": { + "corp-data-api": { + "command": "/usr/local/bin/start-corp-api.sh", + "timeout": 5000 + }, + "source-code-analyzer": { + "command": "/usr/local/bin/start-analyzer.sh" + } + } +} +``` + +This pattern is more secure because it uses both definition and an allowlist. Any server a user defines will either be overridden by the system definition (if it has the same name) or blocked because its name is not in the `mcp.allowed` list. + +### Less Secure Pattern: Omitting the Allowlist + +If the administrator defines the `mcpServers` object but fails to also specify the `mcp.allowed` allowlist, users may add their own servers. + +**Example System `settings.json`:** + +This configuration defines servers but does not enforce the allowlist. +The administrator has NOT included the "mcp.allowed" setting. + +```json +{ + "mcpServers": { + "corp-data-api": { + "command": "/usr/local/bin/start-corp-api.sh" + } + } +} +``` + +In this scenario, a user can add their own server in their local `settings.json`. Because there is no `mcp.allowed` list to filter the merged results, the user's server will be added to the list of available tools and allowed to run. + +## Enforcing Sandboxing for Security + +To mitigate the risk of potentially harmful operations, you can enforce the use of sandboxing for all tool execution. The sandbox isolates tool execution in a containerized environment. + +**Example:** Force all tool execution to happen within a Docker sandbox. + +```json +{ + "tools": { + "sandbox": "docker" + } +} +``` + +You can also specify a custom, hardened Docker image for the sandbox using the `--sandbox-image` command-line argument or by building a custom `sandbox.Dockerfile` as described in the [Sandboxing documentation](./configuration.md#sandboxing). + +## Controlling Network Access via Proxy + +In corporate environments with strict network policies, you can configure Gemini CLI to route all outbound traffic through a corporate proxy. This can be set via an environment variable, but it can also be enforced for custom tools via the `mcpServers` configuration. + +**Example (for an MCP Server):** + +```json +{ + "mcpServers": { + "proxied-server": { + "command": "node", + "args": ["mcp_server.js"], + "env": { + "HTTP_PROXY": "http://proxy.example.com:8080", + "HTTPS_PROXY": "http://proxy.example.com:8080" + } + } + } +} +``` + +## Telemetry and Auditing + +For auditing and monitoring purposes, you can configure Gemini CLI to send telemetry data to a central location. This allows you to track tool usage and other events. For more information, see the [telemetry documentation](../telemetry.md). + +**Example:** Enable telemetry and send it to a local OTLP collector. If `otlpEndpoint` is not specified, it defaults to `http://localhost:4317`. + +```json +{ + "telemetry": { + "enabled": true, + "target": "gcp", + "logPrompts": false + } +} +``` + +**Note:** Ensure that `logPrompts` is set to `false` in an enterprise setting to avoid collecting potentially sensitive information from user prompts. + +## Putting It All Together: Example System `settings.json` + +Here is an example of a system `settings.json` file that combines several of the patterns discussed above to create a secure, controlled environment for Gemini CLI. + +```json +{ + "tools": { + "sandbox": "docker", + "core": [ + "ReadFileTool", + "GlobTool", + "ShellTool(ls)", + "ShellTool(cat)", + "ShellTool(grep)" + ] + }, + "mcp": { + "allowed": ["corp-tools"] + }, + "mcpServers": { + "corp-tools": { + "command": "/opt/gemini-tools/start.sh", + "timeout": 5000 + } + }, + "telemetry": { + "enabled": true, + "target": "gcp", + "otlpEndpoint": "https://telemetry-prod.example.com:4317", + "logPrompts": false + }, + "advanced": { + "bugCommand": { + "urlTemplate": "https://servicedesk.example.com/new-ticket?title={title}&details={info}" + } + }, + "privacy": { + "usageStatisticsEnabled": false + } +} +``` + +This configuration: + +- Forces all tool execution into a Docker sandbox. +- Strictly uses an allowlist for a small set of safe shell commands and file tools. +- Defines and allows a single corporate MCP server for custom tools. +- Enables telemetry for auditing, without logging prompt content. +- Redirects the `/bug` command to an internal ticketing system. +- Disables general usage statistics collection. diff --git a/projects/gemini-cli/docs/cli/index.md b/projects/gemini-cli/docs/cli/index.md new file mode 100644 index 0000000000000000000000000000000000000000..1b5e1796d64e76ec99639df3593482d675b8492f --- /dev/null +++ b/projects/gemini-cli/docs/cli/index.md @@ -0,0 +1,29 @@ +# Gemini CLI + +Within Gemini CLI, `packages/cli` is the frontend for users to send and receive prompts with the Gemini AI model and its associated tools. For a general overview of Gemini CLI, see the [main documentation page](../index.md). + +## Navigating this section + +- **[Authentication](./authentication.md):** A guide to setting up authentication with Google's AI services. +- **[Commands](./commands.md):** A reference for Gemini CLI commands (e.g., `/help`, `/tools`, `/theme`). +- **[Configuration](./configuration.md):** A guide to tailoring Gemini CLI behavior using configuration files. +- **[Enterprise](./enterprise.md):** A guide to enterprise configuration. +- **[Token Caching](./token-caching.md):** Optimize API costs through token caching. +- **[Themes](./themes.md)**: A guide to customizing the CLI's appearance with different themes. +- **[Tutorials](tutorials.md)**: A tutorial showing how to use Gemini CLI to automate a development task. + +## Non-interactive mode + +Gemini CLI can be run in a non-interactive mode, which is useful for scripting and automation. In this mode, you pipe input to the CLI, it executes the command, and then it exits. + +The following example pipes a command to Gemini CLI from your terminal: + +```bash +echo "What is fine tuning?" | gemini +``` + +Gemini CLI executes the command and prints the output to your terminal. Note that you can achieve the same behavior by using the `--prompt` or `-p` flag. For example: + +```bash +gemini -p "What is fine tuning?" +``` diff --git a/projects/gemini-cli/docs/cli/themes.md b/projects/gemini-cli/docs/cli/themes.md new file mode 100644 index 0000000000000000000000000000000000000000..6881832ca0234f17f03e47f406859f01a8fbe60e --- /dev/null +++ b/projects/gemini-cli/docs/cli/themes.md @@ -0,0 +1,203 @@ +# Themes + +Gemini CLI supports a variety of themes to customize its color scheme and appearance. You can change the theme to suit your preferences via the `/theme` command or `"theme":` configuration setting. + +## Available Themes + +Gemini CLI comes with a selection of pre-defined themes, which you can list using the `/theme` command within Gemini CLI: + +- **Dark Themes:** + - `ANSI` + - `Atom One` + - `Ayu` + - `Default` + - `Dracula` + - `GitHub` +- **Light Themes:** + - `ANSI Light` + - `Ayu Light` + - `Default Light` + - `GitHub Light` + - `Google Code` + - `Xcode` + +### Changing Themes + +1. Enter `/theme` into Gemini CLI. +2. A dialog or selection prompt appears, listing the available themes. +3. Using the arrow keys, select a theme. Some interfaces might offer a live preview or highlight as you select. +4. Confirm your selection to apply the theme. + +**Note:** If a theme is defined in your `settings.json` file (either by name or by a file path), you must remove the `"theme"` setting from the file before you can change the theme using the `/theme` command. + +### Theme Persistence + +Selected themes are saved in Gemini CLI's [configuration](./configuration.md) so your preference is remembered across sessions. + +--- + +## Custom Color Themes + +Gemini CLI allows you to create your own custom color themes by specifying them in your `settings.json` file. This gives you full control over the color palette used in the CLI. + +### How to Define a Custom Theme + +Add a `customThemes` block to your user, project, or system `settings.json` file. Each custom theme is defined as an object with a unique name and a set of color keys. For example: + +```json +{ + "ui": { + "customThemes": { + "MyCustomTheme": { + "name": "MyCustomTheme", + "type": "custom", + "Background": "#181818", + ... + } + } + } +} +``` + +**Color keys:** + +- `Background` +- `Foreground` +- `LightBlue` +- `AccentBlue` +- `AccentPurple` +- `AccentCyan` +- `AccentGreen` +- `AccentYellow` +- `AccentRed` +- `Comment` +- `Gray` +- `DiffAdded` (optional, for added lines in diffs) +- `DiffRemoved` (optional, for removed lines in diffs) +- `DiffModified` (optional, for modified lines in diffs) + +**Required Properties:** + +- `name` (must match the key in the `customThemes` object and be a string) +- `type` (must be the string `"custom"`) +- `Background` +- `Foreground` +- `LightBlue` +- `AccentBlue` +- `AccentPurple` +- `AccentCyan` +- `AccentGreen` +- `AccentYellow` +- `AccentRed` +- `Comment` +- `Gray` + +You can use either hex codes (e.g., `#FF0000`) **or** standard CSS color names (e.g., `coral`, `teal`, `blue`) for any color value. See [CSS color names](https://developer.mozilla.org/en-US/docs/Web/CSS/color_value#color_keywords) for a full list of supported names. + +You can define multiple custom themes by adding more entries to the `customThemes` object. + +### Loading Themes from a File + +In addition to defining custom themes in `settings.json`, you can also load a theme directly from a JSON file by specifying the file path in your `settings.json`. This is useful for sharing themes or keeping them separate from your main configuration. + +To load a theme from a file, set the `theme` property in your `settings.json` to the path of your theme file: + +```json +{ + "ui": { + "theme": "/path/to/your/theme.json" + } +} +``` + +The theme file must be a valid JSON file that follows the same structure as a custom theme defined in `settings.json`. + +**Example `my-theme.json`:** + +```json +{ + "name": "My File Theme", + "type": "custom", + "Background": "#282A36", + "Foreground": "#F8F8F2", + "LightBlue": "#82AAFF", + "AccentBlue": "#61AFEF", + "AccentPurple": "#BD93F9", + "AccentCyan": "#8BE9FD", + "AccentGreen": "#50FA7B", + "AccentYellow": "#F1FA8C", + "AccentRed": "#FF5555", + "Comment": "#6272A4", + "Gray": "#ABB2BF", + "DiffAdded": "#A6E3A1", + "DiffRemoved": "#F38BA8", + "DiffModified": "#89B4FA", + "GradientColors": ["#4796E4", "#847ACE", "#C3677F"] +} +``` + +**Security Note:** For your safety, Gemini CLI will only load theme files that are located within your home directory. If you attempt to load a theme from outside your home directory, a warning will be displayed and the theme will not be loaded. This is to prevent loading potentially malicious theme files from untrusted sources. + +### Example Custom Theme + +Custom theme example + +### Using Your Custom Theme + +- Select your custom theme using the `/theme` command in Gemini CLI. Your custom theme will appear in the theme selection dialog. +- Or, set it as the default by adding `"theme": "MyCustomTheme"` to the `ui` object in your `settings.json`. +- Custom themes can be set at the user, project, or system level, and follow the same [configuration precedence](./configuration.md) as other settings. + +--- + +## Dark Themes + +### ANSI + +ANSI theme + +### Atom OneDark + +Atom One theme + +### Ayu + +Ayu theme + +### Default + +Default theme + +### Dracula + +Dracula theme + +### GitHub + +GitHub theme + +## Light Themes + +### ANSI Light + +ANSI Light theme + +### Ayu Light + +Ayu Light theme + +### Default Light + +Default Light theme + +### GitHub Light + +GitHub Light theme + +### Google Code + +Google Code theme + +### Xcode + +Xcode Light theme diff --git a/projects/gemini-cli/docs/cli/token-caching.md b/projects/gemini-cli/docs/cli/token-caching.md new file mode 100644 index 0000000000000000000000000000000000000000..17e103e19027ee7f4a74db6457abb4d360b1f812 --- /dev/null +++ b/projects/gemini-cli/docs/cli/token-caching.md @@ -0,0 +1,14 @@ +# Token Caching and Cost Optimization + +Gemini CLI automatically optimizes API costs through token caching when using API key authentication (Gemini API key or Vertex AI). This feature reuses previous system instructions and context to reduce the number of tokens processed in subsequent requests. + +**Token caching is available for:** + +- API key users (Gemini API key) +- Vertex AI users (with project and location setup) + +**Token caching is not available for:** + +- OAuth users (Google Personal/Enterprise accounts) - the Code Assist API does not support cached content creation at this time + +You can view your token usage and cached token savings using the `/stats` command. When cached tokens are available, they will be displayed in the stats output. diff --git a/projects/gemini-cli/docs/cli/tutorials.md b/projects/gemini-cli/docs/cli/tutorials.md new file mode 100644 index 0000000000000000000000000000000000000000..1f77791dc9e64c3144b470c5e00c4a176983110c --- /dev/null +++ b/projects/gemini-cli/docs/cli/tutorials.md @@ -0,0 +1,69 @@ +# Tutorials + +This page contains tutorials for interacting with Gemini CLI. + +## Setting up a Model Context Protocol (MCP) server + +> [!CAUTION] +> Before using a third-party MCP server, ensure you trust its source and understand the tools it provides. Your use of third-party servers is at your own risk. + +This tutorial demonstrates how to set up a MCP server, using the [GitHub MCP server](https://github.com/github/github-mcp-server) as an example. The GitHub MCP server provides tools for interacting with GitHub repositories, such as creating issues and commenting on pull requests. + +### Prerequisites + +Before you begin, ensure you have the following installed and configured: + +- **Docker:** Install and run [Docker]. +- **GitHub Personal Access Token (PAT):** Create a new [classic] or [fine-grained] PAT with the necessary scopes. + +[Docker]: https://www.docker.com/ +[classic]: https://github.com/settings/tokens/new +[fine-grained]: https://github.com/settings/personal-access-tokens/new + +### Guide + +#### Configure the MCP server in `settings.json` + +In your project's root directory, create or open the [`.gemini/settings.json` file](./configuration.md). Within the file, add the `mcpServers` configuration block, which provides instructions for how to launch the GitHub MCP server. + +```json +{ + "mcpServers": { + "github": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "ghcr.io/github/github-mcp-server" + ], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_PERSONAL_ACCESS_TOKEN}" + } + } + } +} +``` + +#### Set your GitHub token + +> [!CAUTION] +> Using a broadly scoped personal access token that has access to personal and private repositories can lead to information from the private repository being leaked into the public repository. We recommend using a fine-grained access token that doesn't share access to both public and private repositories. + +Use an environment variable to store your GitHub PAT: + +```bash +GITHUB_PERSONAL_ACCESS_TOKEN="pat_YourActualGitHubTokenHere" +``` + +Gemini CLI uses this value in the `mcpServers` configuration that you defined in the `settings.json` file. + +#### Launch Gemini CLI and verify the connection + +When you launch Gemini CLI, it automatically reads your configuration and launches the GitHub MCP server in the background. You can then use natural language prompts to ask Gemini CLI to perform GitHub actions. For example: + +```bash +"get all open issues assigned to me in the 'foo/bar' repo and prioritize them" +``` diff --git a/projects/gemini-cli/docs/core/index.md b/projects/gemini-cli/docs/core/index.md new file mode 100644 index 0000000000000000000000000000000000000000..035e00c73ec72bcf5235beea90ed1c26bd35a256 --- /dev/null +++ b/projects/gemini-cli/docs/core/index.md @@ -0,0 +1,55 @@ +# Gemini CLI Core + +Gemini CLI's core package (`packages/core`) is the backend portion of Gemini CLI, handling communication with the Gemini API, managing tools, and processing requests sent from `packages/cli`. For a general overview of Gemini CLI, see the [main documentation page](../index.md). + +## Navigating this section + +- **[Core tools API](./tools-api.md):** Information on how tools are defined, registered, and used by the core. +- **[Memory Import Processor](./memport.md):** Documentation for the modular GEMINI.md import feature using @file.md syntax. + +## Role of the core + +While the `packages/cli` portion of Gemini CLI provides the user interface, `packages/core` is responsible for: + +- **Gemini API interaction:** Securely communicating with the Google Gemini API, sending user prompts, and receiving model responses. +- **Prompt engineering:** Constructing effective prompts for the Gemini model, potentially incorporating conversation history, tool definitions, and instructional context from `GEMINI.md` files. +- **Tool management & orchestration:** + - Registering available tools (e.g., file system tools, shell command execution). + - Interpreting tool use requests from the Gemini model. + - Executing the requested tools with the provided arguments. + - Returning tool execution results to the Gemini model for further processing. +- **Session and state management:** Keeping track of the conversation state, including history and any relevant context required for coherent interactions. +- **Configuration:** Managing core-specific configurations, such as API key access, model selection, and tool settings. + +## Security considerations + +The core plays a vital role in security: + +- **API key management:** It handles the `GEMINI_API_KEY` and ensures it's used securely when communicating with the Gemini API. +- **Tool execution:** When tools interact with the local system (e.g., `run_shell_command`), the core (and its underlying tool implementations) must do so with appropriate caution, often involving sandboxing mechanisms to prevent unintended modifications. + +## Chat history compression + +To ensure that long conversations don't exceed the token limits of the Gemini model, the core includes a chat history compression feature. + +When a conversation approaches the token limit for the configured model, the core automatically compresses the conversation history before sending it to the model. This compression is designed to be lossless in terms of the information conveyed, but it reduces the overall number of tokens used. + +You can find the token limits for each model in the [Google AI documentation](https://ai.google.dev/gemini-api/docs/models). + +## Model fallback + +Gemini CLI includes a model fallback mechanism to ensure that you can continue to use the CLI even if the default "pro" model is rate-limited. + +If you are using the default "pro" model and the CLI detects that you are being rate-limited, it automatically switches to the "flash" model for the current session. This allows you to continue working without interruption. + +## File discovery service + +The file discovery service is responsible for finding files in the project that are relevant to the current context. It is used by the `@` command and other tools that need to access files. + +## Memory discovery service + +The memory discovery service is responsible for finding and loading the `GEMINI.md` files that provide context to the model. It searches for these files in a hierarchical manner, starting from the current working directory and moving up to the project root and the user's home directory. It also searches in subdirectories. + +This allows you to have global, project-level, and component-level context files, which are all combined to provide the model with the most relevant information. + +You can use the [`/memory` command](../cli/commands.md) to `show`, `add`, and `refresh` the content of loaded `GEMINI.md` files. diff --git a/projects/gemini-cli/docs/core/memport.md b/projects/gemini-cli/docs/core/memport.md new file mode 100644 index 0000000000000000000000000000000000000000..cc96aad3fdf879f05dbba191f912bf6cb180d070 --- /dev/null +++ b/projects/gemini-cli/docs/core/memport.md @@ -0,0 +1,215 @@ +# Memory Import Processor + +The Memory Import Processor is a feature that allows you to modularize your GEMINI.md files by importing content from other files using the `@file.md` syntax. + +## Overview + +This feature enables you to break down large GEMINI.md files into smaller, more manageable components that can be reused across different contexts. The import processor supports both relative and absolute paths, with built-in safety features to prevent circular imports and ensure file access security. + +## Syntax + +Use the `@` symbol followed by the path to the file you want to import: + +```markdown +# Main GEMINI.md file + +This is the main content. + +@./components/instructions.md + +More content here. + +@./shared/configuration.md +``` + +## Supported Path Formats + +### Relative Paths + +- `@./file.md` - Import from the same directory +- `@../file.md` - Import from parent directory +- `@./components/file.md` - Import from subdirectory + +### Absolute Paths + +- `@/absolute/path/to/file.md` - Import using absolute path + +## Examples + +### Basic Import + +```markdown +# My GEMINI.md + +Welcome to my project! + +@./getting-started.md + +## Features + +@./features/overview.md +``` + +### Nested Imports + +The imported files can themselves contain imports, creating a nested structure: + +```markdown +# main.md + +@./header.md +@./content.md +@./footer.md +``` + +```markdown +# header.md + +# Project Header + +@./shared/title.md +``` + +## Safety Features + +### Circular Import Detection + +The processor automatically detects and prevents circular imports: + +```markdown +# file-a.md + +@./file-b.md + +# file-b.md + +@./file-a.md +``` + +### File Access Security + +The `validateImportPath` function ensures that imports are only allowed from specified directories, preventing access to sensitive files outside the allowed scope. + +### Maximum Import Depth + +To prevent infinite recursion, there's a configurable maximum import depth (default: 5 levels). + +## Error Handling + +### Missing Files + +If a referenced file doesn't exist, the import will fail gracefully with an error comment in the output. + +### File Access Errors + +Permission issues or other file system errors are handled gracefully with appropriate error messages. + +## Code Region Detection + +The import processor uses the `marked` library to detect code blocks and inline code spans, ensuring that `@` imports inside these regions are properly ignored. This provides robust handling of nested code blocks and complex Markdown structures. + +## Import Tree Structure + +The processor returns an import tree that shows the hierarchy of imported files, similar to Claude's `/memory` feature. This helps users debug problems with their GEMINI.md files by showing which files were read and their import relationships. + +Example tree structure: + +``` +Memory Files + L project: GEMINI.md + L a.md + L b.md + L c.md + L d.md + L e.md + L f.md + L included.md +``` + +The tree preserves the order that files were imported and shows the complete import chain for debugging purposes. + +## Comparison to Claude Code's `/memory` (`claude.md`) Approach + +Claude Code's `/memory` feature (as seen in `claude.md`) produces a flat, linear document by concatenating all included files, always marking file boundaries with clear comments and path names. It does not explicitly present the import hierarchy, but the LLM receives all file contents and paths, which is sufficient for reconstructing the hierarchy if needed. + +Note: The import tree is mainly for clarity during development and has limited relevance to LLM consumption. + +## API Reference + +### `processImports(content, basePath, debugMode?, importState?)` + +Processes import statements in GEMINI.md content. + +**Parameters:** + +- `content` (string): The content to process for imports +- `basePath` (string): The directory path where the current file is located +- `debugMode` (boolean, optional): Whether to enable debug logging (default: false) +- `importState` (ImportState, optional): State tracking for circular import prevention + +**Returns:** Promise - Object containing processed content and import tree + +### `ProcessImportsResult` + +```typescript +interface ProcessImportsResult { + content: string; // The processed content with imports resolved + importTree: MemoryFile; // Tree structure showing the import hierarchy +} +``` + +### `MemoryFile` + +```typescript +interface MemoryFile { + path: string; // The file path + imports?: MemoryFile[]; // Direct imports, in the order they were imported +} +``` + +### `validateImportPath(importPath, basePath, allowedDirectories)` + +Validates import paths to ensure they are safe and within allowed directories. + +**Parameters:** + +- `importPath` (string): The import path to validate +- `basePath` (string): The base directory for resolving relative paths +- `allowedDirectories` (string[]): Array of allowed directory paths + +**Returns:** boolean - Whether the import path is valid + +### `findProjectRoot(startDir)` + +Finds the project root by searching for a `.git` directory upwards from the given start directory. Implemented as an **async** function using non-blocking file system APIs to avoid blocking the Node.js event loop. + +**Parameters:** + +- `startDir` (string): The directory to start searching from + +**Returns:** Promise - The project root directory (or the start directory if no `.git` is found) + +## Best Practices + +1. **Use descriptive file names** for imported components +2. **Keep imports shallow** - avoid deeply nested import chains +3. **Document your structure** - maintain a clear hierarchy of imported files +4. **Test your imports** - ensure all referenced files exist and are accessible +5. **Use relative paths** when possible for better portability + +## Troubleshooting + +### Common Issues + +1. **Import not working**: Check that the file exists and the path is correct +2. **Circular import warnings**: Review your import structure for circular references +3. **Permission errors**: Ensure the files are readable and within allowed directories +4. **Path resolution issues**: Use absolute paths if relative paths aren't resolving correctly + +### Debug Mode + +Enable debug mode to see detailed logging of the import process: + +```typescript +const result = await processImports(content, basePath, true); +``` diff --git a/projects/gemini-cli/docs/core/tools-api.md b/projects/gemini-cli/docs/core/tools-api.md new file mode 100644 index 0000000000000000000000000000000000000000..73ac903fd47d7c27516c22494669020ef5b315b9 --- /dev/null +++ b/projects/gemini-cli/docs/core/tools-api.md @@ -0,0 +1,75 @@ +# Gemini CLI Core: Tools API + +The Gemini CLI core (`packages/core`) features a robust system for defining, registering, and executing tools. These tools extend the capabilities of the Gemini model, allowing it to interact with the local environment, fetch web content, and perform various actions beyond simple text generation. + +## Core Concepts + +- **Tool (`tools.ts`):** An interface and base class (`BaseTool`) that defines the contract for all tools. Each tool must have: + - `name`: A unique internal name (used in API calls to Gemini). + - `displayName`: A user-friendly name. + - `description`: A clear explanation of what the tool does, which is provided to the Gemini model. + - `parameterSchema`: A JSON schema defining the parameters that the tool accepts. This is crucial for the Gemini model to understand how to call the tool correctly. + - `validateToolParams()`: A method to validate incoming parameters. + - `getDescription()`: A method to provide a human-readable description of what the tool will do with specific parameters before execution. + - `shouldConfirmExecute()`: A method to determine if user confirmation is required before execution (e.g., for potentially destructive operations). + - `execute()`: The core method that performs the tool's action and returns a `ToolResult`. + +- **`ToolResult` (`tools.ts`):** An interface defining the structure of a tool's execution outcome: + - `llmContent`: The factual content to be included in the history sent back to the LLM for context. This can be a simple string or a `PartListUnion` (an array of `Part` objects and strings) for rich content. + - `returnDisplay`: A user-friendly string (often Markdown) or a special object (like `FileDiff`) for display in the CLI. + +- **Returning Rich Content:** Tools are not limited to returning simple text. The `llmContent` can be a `PartListUnion`, which is an array that can contain a mix of `Part` objects (for images, audio, etc.) and `string`s. This allows a single tool execution to return multiple pieces of rich content. + +- **Tool Registry (`tool-registry.ts`):** A class (`ToolRegistry`) responsible for: + - **Registering Tools:** Holding a collection of all available built-in tools (e.g., `ReadFileTool`, `ShellTool`). + - **Discovering Tools:** It can also discover tools dynamically: + - **Command-based Discovery:** If `tools.discoveryCommand` is configured in settings, this command is executed. It's expected to output JSON describing custom tools, which are then registered as `DiscoveredTool` instances. + - **MCP-based Discovery:** If `mcp.serverCommand` is configured, the registry can connect to a Model Context Protocol (MCP) server to list and register tools (`DiscoveredMCPTool`). + - **Providing Schemas:** Exposing the `FunctionDeclaration` schemas of all registered tools to the Gemini model, so it knows what tools are available and how to use them. + - **Retrieving Tools:** Allowing the core to get a specific tool by name for execution. + +## Built-in Tools + +The core comes with a suite of pre-defined tools, typically found in `packages/core/src/tools/`. These include: + +- **File System Tools:** + - `LSTool` (`ls.ts`): Lists directory contents. + - `ReadFileTool` (`read-file.ts`): Reads the content of a single file. It takes an `absolute_path` parameter, which must be an absolute path. + - `WriteFileTool` (`write-file.ts`): Writes content to a file. + - `GrepTool` (`grep.ts`): Searches for patterns in files. + - `GlobTool` (`glob.ts`): Finds files matching glob patterns. + - `EditTool` (`edit.ts`): Performs in-place modifications to files (often requiring confirmation). + - `ReadManyFilesTool` (`read-many-files.ts`): Reads and concatenates content from multiple files or glob patterns (used by the `@` command in CLI). +- **Execution Tools:** + - `ShellTool` (`shell.ts`): Executes arbitrary shell commands (requires careful sandboxing and user confirmation). +- **Web Tools:** + - `WebFetchTool` (`web-fetch.ts`): Fetches content from a URL. + - `WebSearchTool` (`web-search.ts`): Performs a web search. +- **Memory Tools:** + - `MemoryTool` (`memoryTool.ts`): Interacts with the AI's memory. + +Each of these tools extends `BaseTool` and implements the required methods for its specific functionality. + +## Tool Execution Flow + +1. **Model Request:** The Gemini model, based on the user's prompt and the provided tool schemas, decides to use a tool and returns a `FunctionCall` part in its response, specifying the tool name and arguments. +2. **Core Receives Request:** The core parses this `FunctionCall`. +3. **Tool Retrieval:** It looks up the requested tool in the `ToolRegistry`. +4. **Parameter Validation:** The tool's `validateToolParams()` method is called. +5. **Confirmation (if needed):** + - The tool's `shouldConfirmExecute()` method is called. + - If it returns details for confirmation, the core communicates this back to the CLI, which prompts the user. + - The user's decision (e.g., proceed, cancel) is sent back to the core. +6. **Execution:** If validated and confirmed (or if no confirmation is needed), the core calls the tool's `execute()` method with the provided arguments and an `AbortSignal` (for potential cancellation). +7. **Result Processing:** The `ToolResult` from `execute()` is received by the core. +8. **Response to Model:** The `llmContent` from the `ToolResult` is packaged as a `FunctionResponse` and sent back to the Gemini model so it can continue generating a user-facing response. +9. **Display to User:** The `returnDisplay` from the `ToolResult` is sent to the CLI to show the user what the tool did. + +## Extending with Custom Tools + +While direct programmatic registration of new tools by users isn't explicitly detailed as a primary workflow in the provided files for typical end-users, the architecture supports extension through: + +- **Command-based Discovery:** Advanced users or project administrators can define a `tools.discoveryCommand` in `settings.json`. This command, when run by the Gemini CLI core, should output a JSON array of `FunctionDeclaration` objects. The core will then make these available as `DiscoveredTool` instances. The corresponding `tools.callCommand` would then be responsible for actually executing these custom tools. +- **MCP Server(s):** For more complex scenarios, one or more MCP servers can be set up and configured via the `mcpServers` setting in `settings.json`. The Gemini CLI core can then discover and use tools exposed by these servers. As mentioned, if you have multiple MCP servers, the tool names will be prefixed with the server name from your configuration (e.g., `serverAlias__actualToolName`). + +This tool system provides a flexible and powerful way to augment the Gemini model's capabilities, making the Gemini CLI a versatile assistant for a wide range of tasks. diff --git a/projects/gemini-cli/docs/examples/proxy-script.md b/projects/gemini-cli/docs/examples/proxy-script.md new file mode 100644 index 0000000000000000000000000000000000000000..78299001ba42c6695f6ce4dcc26d93559d1af95e --- /dev/null +++ b/projects/gemini-cli/docs/examples/proxy-script.md @@ -0,0 +1,81 @@ +# Example Proxy Script + +The following is an example of a proxy script that can be used with the `GEMINI_SANDBOX_PROXY_COMMAND` environment variable. This script only allows `HTTPS` connections to `example.com:443` and declines all other requests. + +```javascript +#!/usr/bin/env node + +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// Example proxy server that listens on :::8877 and only allows HTTPS connections to example.com. +// Set `GEMINI_SANDBOX_PROXY_COMMAND=scripts/example-proxy.js` to run proxy alongside sandbox +// Test via `curl https://example.com` inside sandbox (in shell mode or via shell tool) + +import http from 'node:http'; +import net from 'node:net'; +import { URL } from 'node:url'; +import console from 'node:console'; + +const PROXY_PORT = 8877; +const ALLOWED_DOMAINS = ['example.com', 'googleapis.com']; +const ALLOWED_PORT = '443'; + +const server = http.createServer((req, res) => { + // Deny all requests other than CONNECT for HTTPS + console.log( + `[PROXY] Denying non-CONNECT request for: ${req.method} ${req.url}`, + ); + res.writeHead(405, { 'Content-Type': 'text/plain' }); + res.end('Method Not Allowed'); +}); + +server.on('connect', (req, clientSocket, head) => { + // req.url will be in the format "hostname:port" for a CONNECT request. + const { port, hostname } = new URL(`http://${req.url}`); + + console.log(`[PROXY] Intercepted CONNECT request for: ${hostname}:${port}`); + + if ( + ALLOWED_DOMAINS.some( + (domain) => hostname == domain || hostname.endsWith(`.${domain}`), + ) && + port === ALLOWED_PORT + ) { + console.log(`[PROXY] Allowing connection to ${hostname}:${port}`); + + // Establish a TCP connection to the original destination. + const serverSocket = net.connect(port, hostname, () => { + clientSocket.write('HTTP/1.1 200 Connection Established\r\n\r\n'); + // Create a tunnel by piping data between the client and the destination server. + serverSocket.write(head); + serverSocket.pipe(clientSocket); + clientSocket.pipe(serverSocket); + }); + + serverSocket.on('error', (err) => { + console.error(`[PROXY] Error connecting to destination: ${err.message}`); + clientSocket.end(`HTTP/1.1 502 Bad Gateway\r\n\r\n`); + }); + } else { + console.log(`[PROXY] Denying connection to ${hostname}:${port}`); + clientSocket.end('HTTP/1.1 403 Forbidden\r\n\r\n'); + } + + clientSocket.on('error', (err) => { + // This can happen if the client hangs up. + console.error(`[PROXY] Client socket error: ${err.message}`); + }); +}); + +server.listen(PROXY_PORT, () => { + const address = server.address(); + console.log(`[PROXY] Proxy listening on ${address.address}:${address.port}`); + console.log( + `[PROXY] Allowing HTTPS connections to domains: ${ALLOWED_DOMAINS.join(', ')}`, + ); +}); +``` diff --git a/projects/gemini-cli/docs/tools/file-system.md b/projects/gemini-cli/docs/tools/file-system.md new file mode 100644 index 0000000000000000000000000000000000000000..ec741096a6429ff76732b36b6c3477778db9c5ee --- /dev/null +++ b/projects/gemini-cli/docs/tools/file-system.md @@ -0,0 +1,143 @@ +# Gemini CLI file system tools + +The Gemini CLI provides a comprehensive suite of tools for interacting with the local file system. These tools allow the Gemini model to read from, write to, list, search, and modify files and directories, all under your control and typically with confirmation for sensitive operations. + +**Note:** All file system tools operate within a `rootDirectory` (usually the current working directory where you launched the CLI) for security. Paths that you provide to these tools are generally expected to be absolute or are resolved relative to this root directory. + +## 1. `list_directory` (ReadFolder) + +`list_directory` lists the names of files and subdirectories directly within a specified directory path. It can optionally ignore entries matching provided glob patterns. + +- **Tool name:** `list_directory` +- **Display name:** ReadFolder +- **File:** `ls.ts` +- **Parameters:** + - `path` (string, required): The absolute path to the directory to list. + - `ignore` (array of strings, optional): A list of glob patterns to exclude from the listing (e.g., `["*.log", ".git"]`). + - `respect_git_ignore` (boolean, optional): Whether to respect `.gitignore` patterns when listing files. Defaults to `true`. +- **Behavior:** + - Returns a list of file and directory names. + - Indicates whether each entry is a directory. + - Sorts entries with directories first, then alphabetically. +- **Output (`llmContent`):** A string like: `Directory listing for /path/to/your/folder:\n[DIR] subfolder1\nfile1.txt\nfile2.png` +- **Confirmation:** No. + +## 2. `read_file` (ReadFile) + +`read_file` reads and returns the content of a specified file. This tool handles text, images (PNG, JPG, GIF, WEBP, SVG, BMP), and PDF files. For text files, it can read specific line ranges. Other binary file types are generally skipped. + +- **Tool name:** `read_file` +- **Display name:** ReadFile +- **File:** `read-file.ts` +- **Parameters:** + - `path` (string, required): The absolute path to the file to read. + - `offset` (number, optional): For text files, the 0-based line number to start reading from. Requires `limit` to be set. + - `limit` (number, optional): For text files, the maximum number of lines to read. If omitted, reads a default maximum (e.g., 2000 lines) or the entire file if feasible. +- **Behavior:** + - For text files: Returns the content. If `offset` and `limit` are used, returns only that slice of lines. Indicates if content was truncated due to line limits or line length limits. + - For image and PDF files: Returns the file content as a base64-encoded data structure suitable for model consumption. + - For other binary files: Attempts to identify and skip them, returning a message indicating it's a generic binary file. +- **Output:** (`llmContent`): + - For text files: The file content, potentially prefixed with a truncation message (e.g., `[File content truncated: showing lines 1-100 of 500 total lines...]\nActual file content...`). + - For image/PDF files: An object containing `inlineData` with `mimeType` and base64 `data` (e.g., `{ inlineData: { mimeType: 'image/png', data: 'base64encodedstring' } }`). + - For other binary files: A message like `Cannot display content of binary file: /path/to/data.bin`. +- **Confirmation:** No. + +## 3. `write_file` (WriteFile) + +`write_file` writes content to a specified file. If the file exists, it will be overwritten. If the file doesn't exist, it (and any necessary parent directories) will be created. + +- **Tool name:** `write_file` +- **Display name:** WriteFile +- **File:** `write-file.ts` +- **Parameters:** + - `file_path` (string, required): The absolute path to the file to write to. + - `content` (string, required): The content to write into the file. +- **Behavior:** + - Writes the provided `content` to the `file_path`. + - Creates parent directories if they don't exist. +- **Output (`llmContent`):** A success message, e.g., `Successfully overwrote file: /path/to/your/file.txt` or `Successfully created and wrote to new file: /path/to/new/file.txt`. +- **Confirmation:** Yes. Shows a diff of changes and asks for user approval before writing. + +## 4. `glob` (FindFiles) + +`glob` finds files matching specific glob patterns (e.g., `src/**/*.ts`, `*.md`), returning absolute paths sorted by modification time (newest first). + +- **Tool name:** `glob` +- **Display name:** FindFiles +- **File:** `glob.ts` +- **Parameters:** + - `pattern` (string, required): The glob pattern to match against (e.g., `"*.py"`, `"src/**/*.js"`). + - `path` (string, optional): The absolute path to the directory to search within. If omitted, searches the tool's root directory. + - `case_sensitive` (boolean, optional): Whether the search should be case-sensitive. Defaults to `false`. + - `respect_git_ignore` (boolean, optional): Whether to respect .gitignore patterns when finding files. Defaults to `true`. +- **Behavior:** + - Searches for files matching the glob pattern within the specified directory. + - Returns a list of absolute paths, sorted with the most recently modified files first. + - Ignores common nuisance directories like `node_modules` and `.git` by default. +- **Output (`llmContent`):** A message like: `Found 5 file(s) matching "*.ts" within src, sorted by modification time (newest first):\nsrc/file1.ts\nsrc/subdir/file2.ts...` +- **Confirmation:** No. + +## 5. `search_file_content` (SearchText) + +`search_file_content` searches for a regular expression pattern within the content of files in a specified directory. Can filter files by a glob pattern. Returns the lines containing matches, along with their file paths and line numbers. + +- **Tool name:** `search_file_content` +- **Display name:** SearchText +- **File:** `grep.ts` +- **Parameters:** + - `pattern` (string, required): The regular expression (regex) to search for (e.g., `"function\s+myFunction"`). + - `path` (string, optional): The absolute path to the directory to search within. Defaults to the current working directory. + - `include` (string, optional): A glob pattern to filter which files are searched (e.g., `"*.js"`, `"src/**/*.{ts,tsx}"`). If omitted, searches most files (respecting common ignores). +- **Behavior:** + - Uses `git grep` if available in a Git repository for speed; otherwise, falls back to system `grep` or a JavaScript-based search. + - Returns a list of matching lines, each prefixed with its file path (relative to the search directory) and line number. +- **Output (`llmContent`):** A formatted string of matches, e.g.: + ``` + Found 3 matches for pattern "myFunction" in path "." (filter: "*.ts"): + --- + File: src/utils.ts + L15: export function myFunction() { + L22: myFunction.call(); + --- + File: src/index.ts + L5: import { myFunction } from './utils'; + --- + ``` +- **Confirmation:** No. + +## 6. `replace` (Edit) + +`replace` replaces text within a file. By default, replaces a single occurrence, but can replace multiple occurrences when `expected_replacements` is specified. This tool is designed for precise, targeted changes and requires significant context around the `old_string` to ensure it modifies the correct location. + +- **Tool name:** `replace` +- **Display name:** Edit +- **File:** `edit.ts` +- **Parameters:** + - `file_path` (string, required): The absolute path to the file to modify. + - `old_string` (string, required): The exact literal text to replace. + + **CRITICAL:** This string must uniquely identify the single instance to change. It should include at least 3 lines of context _before_ and _after_ the target text, matching whitespace and indentation precisely. If `old_string` is empty, the tool attempts to create a new file at `file_path` with `new_string` as content. + + - `new_string` (string, required): The exact literal text to replace `old_string` with. + - `expected_replacements` (number, optional): The number of occurrences to replace. Defaults to `1`. + +- **Behavior:** + - If `old_string` is empty and `file_path` does not exist, creates a new file with `new_string` as content. + - If `old_string` is provided, it reads the `file_path` and attempts to find exactly one occurrence of `old_string`. + - If one occurrence is found, it replaces it with `new_string`. + - **Enhanced Reliability (Multi-Stage Edit Correction):** To significantly improve the success rate of edits, especially when the model-provided `old_string` might not be perfectly precise, the tool incorporates a multi-stage edit correction mechanism. + - If the initial `old_string` isn't found or matches multiple locations, the tool can leverage the Gemini model to iteratively refine `old_string` (and potentially `new_string`). + - This self-correction process attempts to identify the unique segment the model intended to modify, making the `replace` operation more robust even with slightly imperfect initial context. +- **Failure conditions:** Despite the correction mechanism, the tool will fail if: + - `file_path` is not absolute or is outside the root directory. + - `old_string` is not empty, but the `file_path` does not exist. + - `old_string` is empty, but the `file_path` already exists. + - `old_string` is not found in the file after attempts to correct it. + - `old_string` is found multiple times, and the self-correction mechanism cannot resolve it to a single, unambiguous match. +- **Output (`llmContent`):** + - On success: `Successfully modified file: /path/to/file.txt (1 replacements).` or `Created new file: /path/to/new_file.txt with provided content.` + - On failure: An error message explaining the reason (e.g., `Failed to edit, 0 occurrences found...`, `Failed to edit, expected 1 occurrences but found 2...`). +- **Confirmation:** Yes. Shows a diff of the proposed changes and asks for user approval before writing to the file. + +These file system tools provide a foundation for the Gemini CLI to understand and interact with your local project context. diff --git a/projects/gemini-cli/docs/tools/index.md b/projects/gemini-cli/docs/tools/index.md new file mode 100644 index 0000000000000000000000000000000000000000..4fa98c03b8938e187d5fe8d934aa8e824daf6545 --- /dev/null +++ b/projects/gemini-cli/docs/tools/index.md @@ -0,0 +1,56 @@ +# Gemini CLI tools + +The Gemini CLI includes built-in tools that the Gemini model uses to interact with your local environment, access information, and perform actions. These tools enhance the CLI's capabilities, enabling it to go beyond text generation and assist with a wide range of tasks. + +## Overview of Gemini CLI tools + +In the context of the Gemini CLI, tools are specific functions or modules that the Gemini model can request to be executed. For example, if you ask Gemini to "Summarize the contents of `my_document.txt`," the model will likely identify the need to read that file and will request the execution of the `read_file` tool. + +The core component (`packages/core`) manages these tools, presents their definitions (schemas) to the Gemini model, executes them when requested, and returns the results to the model for further processing into a user-facing response. + +These tools provide the following capabilities: + +- **Access local information:** Tools allow Gemini to access your local file system, read file contents, list directories, etc. +- **Execute commands:** With tools like `run_shell_command`, Gemini can run shell commands (with appropriate safety measures and user confirmation). +- **Interact with the web:** Tools can fetch content from URLs. +- **Take actions:** Tools can modify files, write new files, or perform other actions on your system (again, typically with safeguards). +- **Ground responses:** By using tools to fetch real-time or specific local data, Gemini's responses can be more accurate, relevant, and grounded in your actual context. + +## How to use Gemini CLI tools + +To use Gemini CLI tools, provide a prompt to the Gemini CLI. The process works as follows: + +1. You provide a prompt to the Gemini CLI. +2. The CLI sends the prompt to the core. +3. The core, along with your prompt and conversation history, sends a list of available tools and their descriptions/schemas to the Gemini API. +4. The Gemini model analyzes your request. If it determines that a tool is needed, its response will include a request to execute a specific tool with certain parameters. +5. The core receives this tool request, validates it, and (often after user confirmation for sensitive operations) executes the tool. +6. The output from the tool is sent back to the Gemini model. +7. The Gemini model uses the tool's output to formulate its final answer, which is then sent back through the core to the CLI and displayed to you. + +You will typically see messages in the CLI indicating when a tool is being called and whether it succeeded or failed. + +## Security and confirmation + +Many tools, especially those that can modify your file system or execute commands (`write_file`, `edit`, `run_shell_command`), are designed with safety in mind. The Gemini CLI will typically: + +- **Require confirmation:** Prompt you before executing potentially sensitive operations, showing you what action is about to be taken. +- **Utilize sandboxing:** All tools are subject to restrictions enforced by sandboxing (see [Sandboxing in the Gemini CLI](../sandbox.md)). This means that when operating in a sandbox, any tools (including MCP servers) you wish to use must be available _inside_ the sandbox environment. For example, to run an MCP server through `npx`, the `npx` executable must be installed within the sandbox's Docker image or be available in the `sandbox-exec` environment. + +It's important to always review confirmation prompts carefully before allowing a tool to proceed. + +## Learn more about Gemini CLI's tools + +Gemini CLI's built-in tools can be broadly categorized as follows: + +- **[File System Tools](./file-system.md):** For interacting with files and directories (reading, writing, listing, searching, etc.). +- **[Shell Tool](./shell.md) (`run_shell_command`):** For executing shell commands. +- **[Web Fetch Tool](./web-fetch.md) (`web_fetch`):** For retrieving content from URLs. +- **[Web Search Tool](./web-search.md) (`web_search`):** For searching the web. +- **[Multi-File Read Tool](./multi-file.md) (`read_many_files`):** A specialized tool for reading content from multiple files or directories, often used by the `@` command. +- **[Memory Tool](./memory.md) (`save_memory`):** For saving and recalling information across sessions. + +Additionally, these tools incorporate: + +- **[MCP servers](./mcp-server.md)**: MCP servers act as a bridge between the Gemini model and your local environment or other services like APIs. +- **[Sandboxing](../sandbox.md)**: Sandboxing isolates the model and its changes from your environment to reduce potential risk. diff --git a/projects/gemini-cli/docs/tools/mcp-server.md b/projects/gemini-cli/docs/tools/mcp-server.md new file mode 100644 index 0000000000000000000000000000000000000000..9aa7a847996b4f80c4d64db9173cd2b65669618c --- /dev/null +++ b/projects/gemini-cli/docs/tools/mcp-server.md @@ -0,0 +1,828 @@ +# MCP servers with the Gemini CLI + +This document provides a guide to configuring and using Model Context Protocol (MCP) servers with the Gemini CLI. + +## What is an MCP server? + +An MCP server is an application that exposes tools and resources to the Gemini CLI through the Model Context Protocol, allowing it to interact with external systems and data sources. MCP servers act as a bridge between the Gemini model and your local environment or other services like APIs. + +An MCP server enables the Gemini CLI to: + +- **Discover tools:** List available tools, their descriptions, and parameters through standardized schema definitions. +- **Execute tools:** Call specific tools with defined arguments and receive structured responses. +- **Access resources:** Read data from specific resources (though the Gemini CLI primarily focuses on tool execution). + +With an MCP server, you can extend the Gemini CLI's capabilities to perform actions beyond its built-in features, such as interacting with databases, APIs, custom scripts, or specialized workflows. + +## Core Integration Architecture + +The Gemini CLI integrates with MCP servers through a sophisticated discovery and execution system built into the core package (`packages/core/src/tools/`): + +### Discovery Layer (`mcp-client.ts`) + +The discovery process is orchestrated by `discoverMcpTools()`, which: + +1. **Iterates through configured servers** from your `settings.json` `mcpServers` configuration +2. **Establishes connections** using appropriate transport mechanisms (Stdio, SSE, or Streamable HTTP) +3. **Fetches tool definitions** from each server using the MCP protocol +4. **Sanitizes and validates** tool schemas for compatibility with the Gemini API +5. **Registers tools** in the global tool registry with conflict resolution + +### Execution Layer (`mcp-tool.ts`) + +Each discovered MCP tool is wrapped in a `DiscoveredMCPTool` instance that: + +- **Handles confirmation logic** based on server trust settings and user preferences +- **Manages tool execution** by calling the MCP server with proper parameters +- **Processes responses** for both the LLM context and user display +- **Maintains connection state** and handles timeouts + +### Transport Mechanisms + +The Gemini CLI supports three MCP transport types: + +- **Stdio Transport:** Spawns a subprocess and communicates via stdin/stdout +- **SSE Transport:** Connects to Server-Sent Events endpoints +- **Streamable HTTP Transport:** Uses HTTP streaming for communication + +## How to set up your MCP server + +The Gemini CLI uses the `mcpServers` configuration in your `settings.json` file to locate and connect to MCP servers. This configuration supports multiple servers with different transport mechanisms. + +### Configure the MCP server in settings.json + +You can configure MCP servers in your `settings.json` file in two main ways: through the top-level `mcpServers` object for specific server definitions, and through the `mcp` object for global settings that control server discovery and execution. + +#### Global MCP Settings (`mcp`) + +The `mcp` object in your `settings.json` allows you to define global rules for all MCP servers. + +- **`mcp.serverCommand`** (string): A global command to start an MCP server. +- **`mcp.allowed`** (array of strings): A whitelist of MCP server names to allow. If this is set, only servers from this list (matching the keys in the `mcpServers` object) will be connected to. +- **`mcp.excluded`** (array of strings): A blacklist of MCP server names to exclude. Servers in this list will not be connected to. + +**Example:** + +```json +{ + "mcp": { + "allowed": ["my-trusted-server"], + "excluded": ["experimental-server"] + } +} +``` + +#### Server-Specific Configuration (`mcpServers`) + +The `mcpServers` object is where you define each individual MCP server you want the CLI to connect to. + +### Configuration Structure + +Add an `mcpServers` object to your `settings.json` file: + +```json +{ ...file contains other config objects + "mcpServers": { + "serverName": { + "command": "path/to/server", + "args": ["--arg1", "value1"], + "env": { + "API_KEY": "$MY_API_TOKEN" + }, + "cwd": "./server-directory", + "timeout": 30000, + "trust": false + } + } +} +``` + +### Configuration Properties + +Each server configuration supports the following properties: + +#### Required (one of the following) + +- **`command`** (string): Path to the executable for Stdio transport +- **`url`** (string): SSE endpoint URL (e.g., `"http://localhost:8080/sse"`) +- **`httpUrl`** (string): HTTP streaming endpoint URL + +#### Optional + +- **`args`** (string[]): Command-line arguments for Stdio transport +- **`headers`** (object): Custom HTTP headers when using `url` or `httpUrl` +- **`env`** (object): Environment variables for the server process. Values can reference environment variables using `$VAR_NAME` or `${VAR_NAME}` syntax +- **`cwd`** (string): Working directory for Stdio transport +- **`timeout`** (number): Request timeout in milliseconds (default: 600,000ms = 10 minutes) +- **`trust`** (boolean): When `true`, bypasses all tool call confirmations for this server (default: `false`) +- **`includeTools`** (string[]): List of tool names to include from this MCP server. When specified, only the tools listed here will be available from this server (whitelist behavior). If not specified, all tools from the server are enabled by default. +- **`excludeTools`** (string[]): List of tool names to exclude from this MCP server. Tools listed here will not be available to the model, even if they are exposed by the server. **Note:** `excludeTools` takes precedence over `includeTools` - if a tool is in both lists, it will be excluded. + +### OAuth Support for Remote MCP Servers + +The Gemini CLI supports OAuth 2.0 authentication for remote MCP servers using SSE or HTTP transports. This enables secure access to MCP servers that require authentication. + +#### Automatic OAuth Discovery + +For servers that support OAuth discovery, you can omit the OAuth configuration and let the CLI discover it automatically: + +```json +{ + "mcpServers": { + "discoveredServer": { + "url": "https://api.example.com/sse" + } + } +} +``` + +The CLI will automatically: + +- Detect when a server requires OAuth authentication (401 responses) +- Discover OAuth endpoints from server metadata +- Perform dynamic client registration if supported +- Handle the OAuth flow and token management + +#### Authentication Flow + +When connecting to an OAuth-enabled server: + +1. **Initial connection attempt** fails with 401 Unauthorized +2. **OAuth discovery** finds authorization and token endpoints +3. **Browser opens** for user authentication (requires local browser access) +4. **Authorization code** is exchanged for access tokens +5. **Tokens are stored** securely for future use +6. **Connection retry** succeeds with valid tokens + +#### Browser Redirect Requirements + +**Important:** OAuth authentication requires that your local machine can: + +- Open a web browser for authentication +- Receive redirects on `http://localhost:7777/oauth/callback` + +This feature will not work in: + +- Headless environments without browser access +- Remote SSH sessions without X11 forwarding +- Containerized environments without browser support + +#### Managing OAuth Authentication + +Use the `/mcp auth` command to manage OAuth authentication: + +```bash +# List servers requiring authentication +/mcp auth + +# Authenticate with a specific server +/mcp auth serverName + +# Re-authenticate if tokens expire +/mcp auth serverName +``` + +#### OAuth Configuration Properties + +- **`enabled`** (boolean): Enable OAuth for this server +- **`clientId`** (string): OAuth client identifier (optional with dynamic registration) +- **`clientSecret`** (string): OAuth client secret (optional for public clients) +- **`authorizationUrl`** (string): OAuth authorization endpoint (auto-discovered if omitted) +- **`tokenUrl`** (string): OAuth token endpoint (auto-discovered if omitted) +- **`scopes`** (string[]): Required OAuth scopes +- **`redirectUri`** (string): Custom redirect URI (defaults to `http://localhost:7777/oauth/callback`) +- **`tokenParamName`** (string): Query parameter name for tokens in SSE URLs +- **`audiences`** (string[]): Audiences the token is valid for + +#### Token Management + +OAuth tokens are automatically: + +- **Stored securely** in `~/.gemini/mcp-oauth-tokens.json` +- **Refreshed** when expired (if refresh tokens are available) +- **Validated** before each connection attempt +- **Cleaned up** when invalid or expired + +#### Authentication Provider Type + +You can specify the authentication provider type using the `authProviderType` property: + +- **`authProviderType`** (string): Specifies the authentication provider. Can be one of the following: + - **`dynamic_discovery`** (default): The CLI will automatically discover the OAuth configuration from the server. + - **`google_credentials`**: The CLI will use the Google Application Default Credentials (ADC) to authenticate with the server. When using this provider, you must specify the required scopes. + +```json +{ + "mcpServers": { + "googleCloudServer": { + "httpUrl": "https://my-gcp-service.run.app/mcp", + "authProviderType": "google_credentials", + "oauth": { + "scopes": ["https://www.googleapis.com/auth/userinfo.email"] + } + } + } +} +``` + +### Example Configurations + +#### Python MCP Server (Stdio) + +```json +{ + "mcpServers": { + "pythonTools": { + "command": "python", + "args": ["-m", "my_mcp_server", "--port", "8080"], + "cwd": "./mcp-servers/python", + "env": { + "DATABASE_URL": "$DB_CONNECTION_STRING", + "API_KEY": "${EXTERNAL_API_KEY}" + }, + "timeout": 15000 + } + } +} +``` + +#### Node.js MCP Server (Stdio) + +```json +{ + "mcpServers": { + "nodeServer": { + "command": "node", + "args": ["dist/server.js", "--verbose"], + "cwd": "./mcp-servers/node", + "trust": true + } + } +} +``` + +#### Docker-based MCP Server + +```json +{ + "mcpServers": { + "dockerizedServer": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "API_KEY", + "-v", + "${PWD}:/workspace", + "my-mcp-server:latest" + ], + "env": { + "API_KEY": "$EXTERNAL_SERVICE_TOKEN" + } + } + } +} +``` + +#### HTTP-based MCP Server + +```json +{ + "mcpServers": { + "httpServer": { + "httpUrl": "http://localhost:3000/mcp", + "timeout": 5000 + } + } +} +``` + +#### HTTP-based MCP Server with Custom Headers + +```json +{ + "mcpServers": { + "httpServerWithAuth": { + "httpUrl": "http://localhost:3000/mcp", + "headers": { + "Authorization": "Bearer your-api-token", + "X-Custom-Header": "custom-value", + "Content-Type": "application/json" + }, + "timeout": 5000 + } + } +} +``` + +#### MCP Server with Tool Filtering + +```json +{ + "mcpServers": { + "filteredServer": { + "command": "python", + "args": ["-m", "my_mcp_server"], + "includeTools": ["safe_tool", "file_reader", "data_processor"], + // "excludeTools": ["dangerous_tool", "file_deleter"], + "timeout": 30000 + } + } +} +``` + +## Discovery Process Deep Dive + +When the Gemini CLI starts, it performs MCP server discovery through the following detailed process: + +### 1. Server Iteration and Connection + +For each configured server in `mcpServers`: + +1. **Status tracking begins:** Server status is set to `CONNECTING` +2. **Transport selection:** Based on configuration properties: + - `httpUrl` → `StreamableHTTPClientTransport` + - `url` → `SSEClientTransport` + - `command` → `StdioClientTransport` +3. **Connection establishment:** The MCP client attempts to connect with the configured timeout +4. **Error handling:** Connection failures are logged and the server status is set to `DISCONNECTED` + +### 2. Tool Discovery + +Upon successful connection: + +1. **Tool listing:** The client calls the MCP server's tool listing endpoint +2. **Schema validation:** Each tool's function declaration is validated +3. **Tool filtering:** Tools are filtered based on `includeTools` and `excludeTools` configuration +4. **Name sanitization:** Tool names are cleaned to meet Gemini API requirements: + - Invalid characters (non-alphanumeric, underscore, dot, hyphen) are replaced with underscores + - Names longer than 63 characters are truncated with middle replacement (`___`) + +### 3. Conflict Resolution + +When multiple servers expose tools with the same name: + +1. **First registration wins:** The first server to register a tool name gets the unprefixed name +2. **Automatic prefixing:** Subsequent servers get prefixed names: `serverName__toolName` +3. **Registry tracking:** The tool registry maintains mappings between server names and their tools + +### 4. Schema Processing + +Tool parameter schemas undergo sanitization for Gemini API compatibility: + +- **`$schema` properties** are removed +- **`additionalProperties`** are stripped +- **`anyOf` with `default`** have their default values removed (Vertex AI compatibility) +- **Recursive processing** applies to nested schemas + +### 5. Connection Management + +After discovery: + +- **Persistent connections:** Servers that successfully register tools maintain their connections +- **Cleanup:** Servers that provide no usable tools have their connections closed +- **Status updates:** Final server statuses are set to `CONNECTED` or `DISCONNECTED` + +## Tool Execution Flow + +When the Gemini model decides to use an MCP tool, the following execution flow occurs: + +### 1. Tool Invocation + +The model generates a `FunctionCall` with: + +- **Tool name:** The registered name (potentially prefixed) +- **Arguments:** JSON object matching the tool's parameter schema + +### 2. Confirmation Process + +Each `DiscoveredMCPTool` implements sophisticated confirmation logic: + +#### Trust-based Bypass + +```typescript +if (this.trust) { + return false; // No confirmation needed +} +``` + +#### Dynamic Allow-listing + +The system maintains internal allow-lists for: + +- **Server-level:** `serverName` → All tools from this server are trusted +- **Tool-level:** `serverName.toolName` → This specific tool is trusted + +#### User Choice Handling + +When confirmation is required, users can choose: + +- **Proceed once:** Execute this time only +- **Always allow this tool:** Add to tool-level allow-list +- **Always allow this server:** Add to server-level allow-list +- **Cancel:** Abort execution + +### 3. Execution + +Upon confirmation (or trust bypass): + +1. **Parameter preparation:** Arguments are validated against the tool's schema +2. **MCP call:** The underlying `CallableTool` invokes the server with: + + ```typescript + const functionCalls = [ + { + name: this.serverToolName, // Original server tool name + args: params, + }, + ]; + ``` + +3. **Response processing:** Results are formatted for both LLM context and user display + +### 4. Response Handling + +The execution result contains: + +- **`llmContent`:** Raw response parts for the language model's context +- **`returnDisplay`:** Formatted output for user display (often JSON in markdown code blocks) + +## How to interact with your MCP server + +### Using the `/mcp` Command + +The `/mcp` command provides comprehensive information about your MCP server setup: + +```bash +/mcp +``` + +This displays: + +- **Server list:** All configured MCP servers +- **Connection status:** `CONNECTED`, `CONNECTING`, or `DISCONNECTED` +- **Server details:** Configuration summary (excluding sensitive data) +- **Available tools:** List of tools from each server with descriptions +- **Discovery state:** Overall discovery process status + +### Example `/mcp` Output + +``` +MCP Servers Status: + +📡 pythonTools (CONNECTED) + Command: python -m my_mcp_server --port 8080 + Working Directory: ./mcp-servers/python + Timeout: 15000ms + Tools: calculate_sum, file_analyzer, data_processor + +🔌 nodeServer (DISCONNECTED) + Command: node dist/server.js --verbose + Error: Connection refused + +🐳 dockerizedServer (CONNECTED) + Command: docker run -i --rm -e API_KEY my-mcp-server:latest + Tools: docker__deploy, docker__status + +Discovery State: COMPLETED +``` + +### Tool Usage + +Once discovered, MCP tools are available to the Gemini model like built-in tools. The model will automatically: + +1. **Select appropriate tools** based on your requests +2. **Present confirmation dialogs** (unless the server is trusted) +3. **Execute tools** with proper parameters +4. **Display results** in a user-friendly format + +## Status Monitoring and Troubleshooting + +### Connection States + +The MCP integration tracks several states: + +#### Server Status (`MCPServerStatus`) + +- **`DISCONNECTED`:** Server is not connected or has errors +- **`CONNECTING`:** Connection attempt in progress +- **`CONNECTED`:** Server is connected and ready + +#### Discovery State (`MCPDiscoveryState`) + +- **`NOT_STARTED`:** Discovery hasn't begun +- **`IN_PROGRESS`:** Currently discovering servers +- **`COMPLETED`:** Discovery finished (with or without errors) + +### Common Issues and Solutions + +#### Server Won't Connect + +**Symptoms:** Server shows `DISCONNECTED` status + +**Troubleshooting:** + +1. **Check configuration:** Verify `command`, `args`, and `cwd` are correct +2. **Test manually:** Run the server command directly to ensure it works +3. **Check dependencies:** Ensure all required packages are installed +4. **Review logs:** Look for error messages in the CLI output +5. **Verify permissions:** Ensure the CLI can execute the server command + +#### No Tools Discovered + +**Symptoms:** Server connects but no tools are available + +**Troubleshooting:** + +1. **Verify tool registration:** Ensure your server actually registers tools +2. **Check MCP protocol:** Confirm your server implements the MCP tool listing correctly +3. **Review server logs:** Check stderr output for server-side errors +4. **Test tool listing:** Manually test your server's tool discovery endpoint + +#### Tools Not Executing + +**Symptoms:** Tools are discovered but fail during execution + +**Troubleshooting:** + +1. **Parameter validation:** Ensure your tool accepts the expected parameters +2. **Schema compatibility:** Verify your input schemas are valid JSON Schema +3. **Error handling:** Check if your tool is throwing unhandled exceptions +4. **Timeout issues:** Consider increasing the `timeout` setting + +#### Sandbox Compatibility + +**Symptoms:** MCP servers fail when sandboxing is enabled + +**Solutions:** + +1. **Docker-based servers:** Use Docker containers that include all dependencies +2. **Path accessibility:** Ensure server executables are available in the sandbox +3. **Network access:** Configure sandbox to allow necessary network connections +4. **Environment variables:** Verify required environment variables are passed through + +### Debugging Tips + +1. **Enable debug mode:** Run the CLI with `--debug` for verbose output +2. **Check stderr:** MCP server stderr is captured and logged (INFO messages filtered) +3. **Test isolation:** Test your MCP server independently before integrating +4. **Incremental setup:** Start with simple tools before adding complex functionality +5. **Use `/mcp` frequently:** Monitor server status during development + +## Important Notes + +### Security Considerations + +- **Trust settings:** The `trust` option bypasses all confirmation dialogs. Use cautiously and only for servers you completely control +- **Access tokens:** Be security-aware when configuring environment variables containing API keys or tokens +- **Sandbox compatibility:** When using sandboxing, ensure MCP servers are available within the sandbox environment +- **Private data:** Using broadly scoped personal access tokens can lead to information leakage between repositories + +### Performance and Resource Management + +- **Connection persistence:** The CLI maintains persistent connections to servers that successfully register tools +- **Automatic cleanup:** Connections to servers providing no tools are automatically closed +- **Timeout management:** Configure appropriate timeouts based on your server's response characteristics +- **Resource monitoring:** MCP servers run as separate processes and consume system resources + +### Schema Compatibility + +- **Property stripping:** The system automatically removes certain schema properties (`$schema`, `additionalProperties`) for Gemini API compatibility +- **Name sanitization:** Tool names are automatically sanitized to meet API requirements +- **Conflict resolution:** Tool name conflicts between servers are resolved through automatic prefixing + +This comprehensive integration makes MCP servers a powerful way to extend the Gemini CLI's capabilities while maintaining security, reliability, and ease of use. + +## Returning Rich Content from Tools + +MCP tools are not limited to returning simple text. You can return rich, multi-part content, including text, images, audio, and other binary data in a single tool response. This allows you to build powerful tools that can provide diverse information to the model in a single turn. + +All data returned from the tool is processed and sent to the model as context for its next generation, enabling it to reason about or summarize the provided information. + +### How It Works + +To return rich content, your tool's response must adhere to the MCP specification for a [`CallToolResult`](https://modelcontextprotocol.io/specification/2025-06-18/server/tools#tool-result). The `content` field of the result should be an array of `ContentBlock` objects. The Gemini CLI will correctly process this array, separating text from binary data and packaging it for the model. + +You can mix and match different content block types in the `content` array. The supported block types include: + +- `text` +- `image` +- `audio` +- `resource` (embedded content) +- `resource_link` + +### Example: Returning Text and an Image + +Here is an example of a valid JSON response from an MCP tool that returns both a text description and an image: + +```json +{ + "content": [ + { + "type": "text", + "text": "Here is the logo you requested." + }, + { + "type": "image", + "data": "BASE64_ENCODED_IMAGE_DATA_HERE", + "mimeType": "image/png" + }, + { + "type": "text", + "text": "The logo was created in 2025." + } + ] +} +``` + +When the Gemini CLI receives this response, it will: + +1. Extract all the text and combine it into a single `functionResponse` part for the model. +2. Present the image data as a separate `inlineData` part. +3. Provide a clean, user-friendly summary in the CLI, indicating that both text and an image were received. + +This enables you to build sophisticated tools that can provide rich, multi-modal context to the Gemini model. + +## MCP Prompts as Slash Commands + +In addition to tools, MCP servers can expose predefined prompts that can be executed as slash commands within the Gemini CLI. This allows you to create shortcuts for common or complex queries that can be easily invoked by name. + +### Defining Prompts on the Server + +Here's a small example of a stdio MCP server that defines prompts: + +```ts +import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js'; +import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'; +import { z } from 'zod'; + +const server = new McpServer({ + name: 'prompt-server', + version: '1.0.0', +}); + +server.registerPrompt( + 'poem-writer', + { + title: 'Poem Writer', + description: 'Write a nice haiku', + argsSchema: { title: z.string(), mood: z.string().optional() }, + }, + ({ title, mood }) => ({ + messages: [ + { + role: 'user', + content: { + type: 'text', + text: `Write a haiku${mood ? ` with the mood ${mood}` : ''} called ${title}. Note that a haiku is 5 syllables followed by 7 syllables followed by 5 syllables `, + }, + }, + ], + }), +); + +const transport = new StdioServerTransport(); +await server.connect(transport); +``` + +This can be included in `settings.json` under `mcpServers` with: + +```json +{ + "mcpServers": { + "nodeServer": { + "command": "node", + "args": ["filename.ts"] + } + } +} +``` + +### Invoking Prompts + +Once a prompt is discovered, you can invoke it using its name as a slash command. The CLI will automatically handle parsing arguments. + +```bash +/poem-writer --title="Gemini CLI" --mood="reverent" +``` + +or, using positional arguments: + +```bash +/poem-writer "Gemini CLI" reverent +``` + +When you run this command, the Gemini CLI executes the `prompts/get` method on the MCP server with the provided arguments. The server is responsible for substituting the arguments into the prompt template and returning the final prompt text. The CLI then sends this prompt to the model for execution. This provides a convenient way to automate and share common workflows. + +## Managing MCP Servers with `gemini mcp` + +While you can always configure MCP servers by manually editing your `settings.json` file, the Gemini CLI provides a convenient set of commands to manage your server configurations programmatically. These commands streamline the process of adding, listing, and removing MCP servers without needing to directly edit JSON files. + +### Adding a Server (`gemini mcp add`) + +The `add` command configures a new MCP server in your `settings.json`. Based on the scope (`-s, --scope`), it will be added to either the user config `~/.gemini/settings.json` or the project config `.gemini/settings.json` file. + +**Command:** + +```bash +gemini mcp add [options] [args...] +``` + +- ``: A unique name for the server. +- ``: The command to execute (for `stdio`) or the URL (for `http`/`sse`). +- `[args...]`: Optional arguments for a `stdio` command. + +**Options (Flags):** + +- `-s, --scope`: Configuration scope (user or project). [default: "project"] +- `-t, --transport`: Transport type (stdio, sse, http). [default: "stdio"] +- `-e, --env`: Set environment variables (e.g. -e KEY=value). +- `-H, --header`: Set HTTP headers for SSE and HTTP transports (e.g. -H "X-Api-Key: abc123" -H "Authorization: Bearer abc123"). +- `--timeout`: Set connection timeout in milliseconds. +- `--trust`: Trust the server (bypass all tool call confirmation prompts). +- `--description`: Set the description for the server. +- `--include-tools`: A comma-separated list of tools to include. +- `--exclude-tools`: A comma-separated list of tools to exclude. + +#### Adding an stdio server + +This is the default transport for running local servers. + +```bash +# Basic syntax +gemini mcp add [args...] + +# Example: Adding a local server +gemini mcp add my-stdio-server -e API_KEY=123 /path/to/server arg1 arg2 arg3 + +# Example: Adding a local python server +gemini mcp add python-server python server.py --port 8080 +``` + +#### Adding an HTTP server + +This transport is for servers that use the streamable HTTP transport. + +```bash +# Basic syntax +gemini mcp add --transport http + +# Example: Adding an HTTP server +gemini mcp add --transport http http-server https://api.example.com/mcp/ + +# Example: Adding an HTTP server with an authentication header +gemini mcp add --transport http secure-http https://api.example.com/mcp/ --header "Authorization: Bearer abc123" +``` + +#### Adding an SSE server + +This transport is for servers that use Server-Sent Events (SSE). + +```bash +# Basic syntax +gemini mcp add --transport sse + +# Example: Adding an SSE server +gemini mcp add --transport sse sse-server https://api.example.com/sse/ + +# Example: Adding an SSE server with an authentication header +gemini mcp add --transport sse secure-sse https://api.example.com/sse/ --header "Authorization: Bearer abc123" +``` + +### Listing Servers (`gemini mcp list`) + +To view all MCP servers currently configured, use the `list` command. It displays each server's name, configuration details, and connection status. + +**Command:** + +```bash +gemini mcp list +``` + +**Example Output:** + +```sh +✓ stdio-server: command: python3 server.py (stdio) - Connected +✓ http-server: https://api.example.com/mcp (http) - Connected +✗ sse-server: https://api.example.com/sse (sse) - Disconnected +``` + +### Removing a Server (`gemini mcp remove`) + +To delete a server from your configuration, use the `remove` command with the server's name. + +**Command:** + +```bash +gemini mcp remove +``` + +**Example:** + +```bash +gemini mcp remove my-server +``` + +This will find and delete the "my-server" entry from the `mcpServers` object in the appropriate `settings.json` file based on the scope (`-s, --scope`). diff --git a/projects/gemini-cli/docs/tools/memory.md b/projects/gemini-cli/docs/tools/memory.md new file mode 100644 index 0000000000000000000000000000000000000000..fa2dac577552aafbe56f1f4d775c941b8926ff0b --- /dev/null +++ b/projects/gemini-cli/docs/tools/memory.md @@ -0,0 +1,44 @@ +# Memory Tool (`save_memory`) + +This document describes the `save_memory` tool for the Gemini CLI. + +## Description + +Use `save_memory` to save and recall information across your Gemini CLI sessions. With `save_memory`, you can direct the CLI to remember key details across sessions, providing personalized and directed assistance. + +### Arguments + +`save_memory` takes one argument: + +- `fact` (string, required): The specific fact or piece of information to remember. This should be a clear, self-contained statement written in natural language. + +## How to use `save_memory` with the Gemini CLI + +The tool appends the provided `fact` to a special `GEMINI.md` file located in the user's home directory (`~/.gemini/GEMINI.md`). This file can be configured to have a different name. + +Once added, the facts are stored under a `## Gemini Added Memories` section. This file is loaded as context in subsequent sessions, allowing the CLI to recall the saved information. + +Usage: + +``` +save_memory(fact="Your fact here.") +``` + +### `save_memory` examples + +Remember a user preference: + +``` +save_memory(fact="My preferred programming language is Python.") +``` + +Store a project-specific detail: + +``` +save_memory(fact="The project I'm currently working on is called 'gemini-cli'.") +``` + +## Important notes + +- **General usage:** This tool should be used for concise, important facts. It is not intended for storing large amounts of data or conversational history. +- **Memory file:** The memory file is a plain text Markdown file, so you can view and edit it manually if needed. diff --git a/projects/gemini-cli/docs/tools/multi-file.md b/projects/gemini-cli/docs/tools/multi-file.md new file mode 100644 index 0000000000000000000000000000000000000000..7aaff147ddaaaf55c8acae46702d8b8e8ff17a78 --- /dev/null +++ b/projects/gemini-cli/docs/tools/multi-file.md @@ -0,0 +1,69 @@ +# Multi File Read Tool (`read_many_files`) + +This document describes the `read_many_files` tool for the Gemini CLI. + +## Description + +Use `read_many_files` to read content from multiple files specified by paths or glob patterns. The behavior of this tool depends on the provided files: + +- For text files, this tool concatenates their content into a single string. +- For image (e.g., PNG, JPEG), PDF, audio (MP3, WAV), and video (MP4, MOV) files, it reads and returns them as base64-encoded data, provided they are explicitly requested by name or extension. + +`read_many_files` can be used to perform tasks such as getting an overview of a codebase, finding where specific functionality is implemented, reviewing documentation, or gathering context from multiple configuration files. + +**Note:** `read_many_files` looks for files following the provided paths or glob patterns. A directory path such as `"/docs"` will return an empty result; the tool requires a pattern such as `"/docs/*"` or `"/docs/*.md"` to identify the relevant files. + +### Arguments + +`read_many_files` takes the following arguments: + +- `paths` (list[string], required): An array of glob patterns or paths relative to the tool's target directory (e.g., `["src/**/*.ts"]`, `["README.md", "docs/*", "assets/logo.png"]`). +- `exclude` (list[string], optional): Glob patterns for files/directories to exclude (e.g., `["**/*.log", "temp/"]`). These are added to default excludes if `useDefaultExcludes` is true. +- `include` (list[string], optional): Additional glob patterns to include. These are merged with `paths` (e.g., `["*.test.ts"]` to specifically add test files if they were broadly excluded, or `["images/*.jpg"]` to include specific image types). +- `recursive` (boolean, optional): Whether to search recursively. This is primarily controlled by `**` in glob patterns. Defaults to `true`. +- `useDefaultExcludes` (boolean, optional): Whether to apply a list of default exclusion patterns (e.g., `node_modules`, `.git`, non image/pdf binary files). Defaults to `true`. +- `respect_git_ignore` (boolean, optional): Whether to respect .gitignore patterns when finding files. Defaults to true. + +## How to use `read_many_files` with the Gemini CLI + +`read_many_files` searches for files matching the provided `paths` and `include` patterns, while respecting `exclude` patterns and default excludes (if enabled). + +- For text files: it reads the content of each matched file (attempting to skip binary files not explicitly requested as image/PDF) and concatenates it into a single string, with a separator `--- {filePath} ---` between the content of each file. Uses UTF-8 encoding by default. +- The tool inserts a `--- End of content ---` after the last file. +- For image and PDF files: if explicitly requested by name or extension (e.g., `paths: ["logo.png"]` or `include: ["*.pdf"]`), the tool reads the file and returns its content as a base64 encoded string. +- The tool attempts to detect and skip other binary files (those not matching common image/PDF types or not explicitly requested) by checking for null bytes in their initial content. + +Usage: + +``` +read_many_files(paths=["Your files or paths here."], include=["Additional files to include."], exclude=["Files to exclude."], recursive=False, useDefaultExcludes=false, respect_git_ignore=true) +``` + +## `read_many_files` examples + +Read all TypeScript files in the `src` directory: + +``` +read_many_files(paths=["src/**/*.ts"]) +``` + +Read the main README, all Markdown files in the `docs` directory, and a specific logo image, excluding a specific file: + +``` +read_many_files(paths=["README.md", "docs/**/*.md", "assets/logo.png"], exclude=["docs/OLD_README.md"]) +``` + +Read all JavaScript files but explicitly include test files and all JPEGs in an `images` folder: + +``` +read_many_files(paths=["**/*.js"], include=["**/*.test.js", "images/**/*.jpg"], useDefaultExcludes=False) +``` + +## Important notes + +- **Binary file handling:** + - **Image/PDF/Audio/Video files:** The tool can read common image types (PNG, JPEG, etc.), PDF, audio (mp3, wav), and video (mp4, mov) files, returning them as base64 encoded data. These files _must_ be explicitly targeted by the `paths` or `include` patterns (e.g., by specifying the exact filename like `video.mp4` or a pattern like `*.mov`). + - **Other binary files:** The tool attempts to detect and skip other types of binary files by examining their initial content for null bytes. The tool excludes these files from its output. +- **Performance:** Reading a very large number of files or very large individual files can be resource-intensive. +- **Path specificity:** Ensure paths and glob patterns are correctly specified relative to the tool's target directory. For image/PDF files, ensure the patterns are specific enough to include them. +- **Default excludes:** Be aware of the default exclusion patterns (like `node_modules`, `.git`) and use `useDefaultExcludes=False` if you need to override them, but do so cautiously. diff --git a/projects/gemini-cli/docs/tools/shell.md b/projects/gemini-cli/docs/tools/shell.md new file mode 100644 index 0000000000000000000000000000000000000000..bb03c868c79a26dfe1d9a3a29c5fbc1e20126560 --- /dev/null +++ b/projects/gemini-cli/docs/tools/shell.md @@ -0,0 +1,149 @@ +# Shell Tool (`run_shell_command`) + +This document describes the `run_shell_command` tool for the Gemini CLI. + +## Description + +Use `run_shell_command` to interact with the underlying system, run scripts, or perform command-line operations. `run_shell_command` executes a given shell command. On Windows, the command will be executed with `cmd.exe /c`. On other platforms, the command will be executed with `bash -c`. + +### Arguments + +`run_shell_command` takes the following arguments: + +- `command` (string, required): The exact shell command to execute. +- `description` (string, optional): A brief description of the command's purpose, which will be shown to the user. +- `directory` (string, optional): The directory (relative to the project root) in which to execute the command. If not provided, the command runs in the project root. + +## How to use `run_shell_command` with the Gemini CLI + +When using `run_shell_command`, the command is executed as a subprocess. `run_shell_command` can start background processes using `&`. The tool returns detailed information about the execution, including: + +- `Command`: The command that was executed. +- `Directory`: The directory where the command was run. +- `Stdout`: Output from the standard output stream. +- `Stderr`: Output from the standard error stream. +- `Error`: Any error message reported by the subprocess. +- `Exit Code`: The exit code of the command. +- `Signal`: The signal number if the command was terminated by a signal. +- `Background PIDs`: A list of PIDs for any background processes started. + +Usage: + +``` +run_shell_command(command="Your commands.", description="Your description of the command.", directory="Your execution directory.") +``` + +## `run_shell_command` examples + +List files in the current directory: + +``` +run_shell_command(command="ls -la") +``` + +Run a script in a specific directory: + +``` +run_shell_command(command="./my_script.sh", directory="scripts", description="Run my custom script") +``` + +Start a background server: + +``` +run_shell_command(command="npm run dev &", description="Start development server in background") +``` + +## Important notes + +- **Security:** Be cautious when executing commands, especially those constructed from user input, to prevent security vulnerabilities. +- **Interactive commands:** Avoid commands that require interactive user input, as this can cause the tool to hang. Use non-interactive flags if available (e.g., `npm init -y`). +- **Error handling:** Check the `Stderr`, `Error`, and `Exit Code` fields to determine if a command executed successfully. +- **Background processes:** When a command is run in the background with `&`, the tool will return immediately and the process will continue to run in the background. The `Background PIDs` field will contain the process ID of the background process. + +## Environment Variables + +When `run_shell_command` executes a command, it sets the `GEMINI_CLI=1` environment variable in the subprocess's environment. This allows scripts or tools to detect if they are being run from within the Gemini CLI. + +## Command Restrictions + +You can restrict the commands that can be executed by the `run_shell_command` tool by using the `tools.core` and `tools.exclude` settings in your configuration file. + +- `tools.core`: To restrict `run_shell_command` to a specific set of commands, add entries to the `core` list under the `tools` category in the format `run_shell_command()`. For example, `"tools": {"core": ["run_shell_command(git)"]}` will only allow `git` commands. Including the generic `run_shell_command` acts as a wildcard, allowing any command not explicitly blocked. +- `tools.exclude`: To block specific commands, add entries to the `exclude` list under the `tools` category in the format `run_shell_command()`. For example, `"tools": {"exclude": ["run_shell_command(rm)"]}` will block `rm` commands. + +The validation logic is designed to be secure and flexible: + +1. **Command Chaining Disabled**: The tool automatically splits commands chained with `&&`, `||`, or `;` and validates each part separately. If any part of the chain is disallowed, the entire command is blocked. +2. **Prefix Matching**: The tool uses prefix matching. For example, if you allow `git`, you can run `git status` or `git log`. +3. **Blocklist Precedence**: The `tools.exclude` list is always checked first. If a command matches a blocked prefix, it will be denied, even if it also matches an allowed prefix in `tools.core`. + +### Command Restriction Examples + +**Allow only specific command prefixes** + +To allow only `git` and `npm` commands, and block all others: + +```json +{ + "tools": { + "core": ["run_shell_command(git)", "run_shell_command(npm)"] + } +} +``` + +- `git status`: Allowed +- `npm install`: Allowed +- `ls -l`: Blocked + +**Block specific command prefixes** + +To block `rm` and allow all other commands: + +```json +{ + "tools": { + "core": ["run_shell_command"], + "exclude": ["run_shell_command(rm)"] + } +} +``` + +- `rm -rf /`: Blocked +- `git status`: Allowed +- `npm install`: Allowed + +**Blocklist takes precedence** + +If a command prefix is in both `tools.core` and `tools.exclude`, it will be blocked. + +```json +{ + "tools": { + "core": ["run_shell_command(git)"], + "exclude": ["run_shell_command(git push)"] + } +} +``` + +- `git push origin main`: Blocked +- `git status`: Allowed + +**Block all shell commands** + +To block all shell commands, add the `run_shell_command` wildcard to `tools.exclude`: + +```json +{ + "tools": { + "exclude": ["run_shell_command"] + } +} +``` + +- `ls -l`: Blocked +- `any other command`: Blocked + +## Security Note for `excludeTools` + +Command-specific restrictions in `excludeTools` for `run_shell_command` are based on simple string matching and can be easily bypassed. This feature is **not a security mechanism** and should not be relied upon to safely execute untrusted code. It is recommended to use `coreTools` to explicitly select commands +that can be executed. diff --git a/projects/gemini-cli/docs/tools/web-fetch.md b/projects/gemini-cli/docs/tools/web-fetch.md new file mode 100644 index 0000000000000000000000000000000000000000..a9647df229f27e67e983632ed8f328441b390de7 --- /dev/null +++ b/projects/gemini-cli/docs/tools/web-fetch.md @@ -0,0 +1,44 @@ +# Web Fetch Tool (`web_fetch`) + +This document describes the `web_fetch` tool for the Gemini CLI. + +## Description + +Use `web_fetch` to summarize, compare, or extract information from web pages. The `web_fetch` tool processes content from one or more URLs (up to 20) embedded in a prompt. `web_fetch` takes a natural language prompt and returns a generated response. + +### Arguments + +`web_fetch` takes one argument: + +- `prompt` (string, required): A comprehensive prompt that includes the URL(s) (up to 20) to fetch and specific instructions on how to process their content. For example: `"Summarize https://example.com/article and extract key points from https://another.com/data"`. The prompt must contain at least one URL starting with `http://` or `https://`. + +## How to use `web_fetch` with the Gemini CLI + +To use `web_fetch` with the Gemini CLI, provide a natural language prompt that contains URLs. The tool will ask for confirmation before fetching any URLs. Once confirmed, the tool will process URLs through Gemini API's `urlContext`. + +If the Gemini API cannot access the URL, the tool will fall back to fetching content directly from the local machine. The tool will format the response, including source attribution and citations where possible. The tool will then provide the response to the user. + +Usage: + +``` +web_fetch(prompt="Your prompt, including a URL such as https://google.com.") +``` + +## `web_fetch` examples + +Summarize a single article: + +``` +web_fetch(prompt="Can you summarize the main points of https://example.com/news/latest") +``` + +Compare two articles: + +``` +web_fetch(prompt="What are the differences in the conclusions of these two papers: https://arxiv.org/abs/2401.0001 and https://arxiv.org/abs/2401.0002?") +``` + +## Important notes + +- **URL processing:** `web_fetch` relies on the Gemini API's ability to access and process the given URLs. +- **Output quality:** The quality of the output will depend on the clarity of the instructions in the prompt. diff --git a/projects/gemini-cli/docs/tools/web-search.md b/projects/gemini-cli/docs/tools/web-search.md new file mode 100644 index 0000000000000000000000000000000000000000..9ba56bfbd40d0abb73519f01445e89138cd4860a --- /dev/null +++ b/projects/gemini-cli/docs/tools/web-search.md @@ -0,0 +1,36 @@ +# Web Search Tool (`google_web_search`) + +This document describes the `google_web_search` tool. + +## Description + +Use `google_web_search` to perform a web search using Google Search via the Gemini API. The `google_web_search` tool returns a summary of web results with sources. + +### Arguments + +`google_web_search` takes one argument: + +- `query` (string, required): The search query. + +## How to use `google_web_search` with the Gemini CLI + +The `google_web_search` tool sends a query to the Gemini API, which then performs a web search. `google_web_search` will return a generated response based on the search results, including citations and sources. + +Usage: + +``` +google_web_search(query="Your query goes here.") +``` + +## `google_web_search` examples + +Get information on a topic: + +``` +google_web_search(query="latest advancements in AI-powered code generation") +``` + +## Important notes + +- **Response returned:** The `google_web_search` tool returns a processed summary, not a raw list of search results. +- **Citations:** The response includes citations to the sources used to generate the summary. diff --git a/projects/gemini-cli/packages/a2a-server/README.md b/projects/gemini-cli/packages/a2a-server/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bd6a2fac45aa6370e2b476c4637eb64289d6c832 --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/README.md @@ -0,0 +1,5 @@ +# Gemini CLI A2A Server + +## All code in this package is experimental and under active development + +This package contains the A2A server implementation for the Gemini CLI. diff --git a/projects/gemini-cli/packages/a2a-server/index.ts b/projects/gemini-cli/packages/a2a-server/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..3e74d6beda7ba356f3b3fd22540a54c4fba43468 --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/index.ts @@ -0,0 +1,7 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +export * from './src/index.js'; diff --git a/projects/gemini-cli/packages/a2a-server/package.json b/projects/gemini-cli/packages/a2a-server/package.json new file mode 100644 index 0000000000000000000000000000000000000000..edbd64f17d702b13dab2c93163cbe57230b2b353 --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/package.json @@ -0,0 +1,48 @@ +{ + "name": "@google/gemini-cli-a2a-server", + "version": "0.1.0", + "private": true, + "description": "Gemini CLI A2A Server", + "repository": { + "type": "git", + "url": "git+https://github.com/google-gemini/gemini-cli.git", + "directory": "packages/a2a-server" + }, + "type": "module", + "main": "dist/server.js", + "scripts": { + "start": "node dist/src/server.js", + "build": "node ../../scripts/build_package.js", + "lint": "eslint . --ext .ts,.tsx", + "format": "prettier --write .", + "test": "vitest run", + "test:ci": "vitest run --coverage", + "typecheck": "tsc --noEmit" + }, + "files": [ + "dist" + ], + "dependencies": { + "@a2a-js/sdk": "^0.3.2", + "@google-cloud/storage": "^7.16.0", + "@google/gemini-cli-core": "file:../core", + "express": "^5.1.0", + "fs-extra": "^11.3.0", + "tar": "^7.4.3", + "uuid": "^11.1.0", + "winston": "^3.17.0" + }, + "devDependencies": { + "@types/express": "^5.0.3", + "@types/fs-extra": "^11.0.4", + "@types/supertest": "^6.0.3", + "@types/tar": "^6.1.13", + "dotenv": "^16.4.5", + "supertest": "^7.1.4", + "typescript": "^5.3.3", + "vitest": "^3.1.1" + }, + "engines": { + "node": ">=20" + } +} diff --git a/projects/gemini-cli/packages/a2a-server/src/agent.test.ts b/projects/gemini-cli/packages/a2a-server/src/agent.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..28110d4bad2ed109896c9277fbd76fafd492797d --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/agent.test.ts @@ -0,0 +1,625 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { Config } from '@google/gemini-cli-core'; +import { + GeminiEventType, + ApprovalMode, + type ToolCallConfirmationDetails, +} from '@google/gemini-cli-core'; +import type { + TaskStatusUpdateEvent, + SendStreamingMessageSuccessResponse, +} from '@a2a-js/sdk'; +import type express from 'express'; +import type { Server } from 'node:http'; +import request from 'supertest'; +import { + afterAll, + afterEach, + beforeEach, + beforeAll, + describe, + expect, + it, + vi, +} from 'vitest'; +import { createApp } from './agent.js'; +import { + assertUniqueFinalEventIsLast, + assertTaskCreationAndWorkingStatus, + createStreamMessageRequest, + createMockConfig, +} from './testing_utils.js'; +import { MockTool } from '@google/gemini-cli-core'; + +const mockToolConfirmationFn = async () => + ({}) as unknown as ToolCallConfirmationDetails; + +const streamToSSEEvents = ( + stream: string, +): SendStreamingMessageSuccessResponse[] => + stream + .split('\n\n') + .filter(Boolean) // Remove empty strings from trailing newlines + .map((chunk) => { + const dataLine = chunk + .split('\n') + .find((line) => line.startsWith('data: ')); + if (!dataLine) { + throw new Error(`Invalid SSE chunk found: "${chunk}"`); + } + return JSON.parse(dataLine.substring(6)); + }); + +// Mock the logger to avoid polluting test output +// Comment out to debug tests +vi.mock('./logger.js', () => ({ + logger: { info: vi.fn(), warn: vi.fn(), error: vi.fn() }, +})); + +let config: Config; +const getToolRegistrySpy = vi.fn().mockReturnValue(ApprovalMode.DEFAULT); +const getApprovalModeSpy = vi.fn(); +vi.mock('./config.js', async () => { + const actual = await vi.importActual('./config.js'); + return { + ...actual, + loadConfig: vi.fn().mockImplementation(async () => { + const mockConfig = createMockConfig({ + getToolRegistry: getToolRegistrySpy, + getApprovalMode: getApprovalModeSpy, + }); + config = mockConfig as Config; + return config; + }), + }; +}); + +// Mock the GeminiClient to avoid actual API calls +const sendMessageStreamSpy = vi.fn(); +vi.mock('@google/gemini-cli-core', async () => { + const actual = await vi.importActual('@google/gemini-cli-core'); + return { + ...actual, + GeminiClient: vi.fn().mockImplementation(() => ({ + sendMessageStream: sendMessageStreamSpy, + getUserTier: vi.fn().mockReturnValue('free'), + initialize: vi.fn(), + })), + }; +}); + +describe('E2E Tests', () => { + let app: express.Express; + let server: Server; + + beforeAll(async () => { + app = await createApp(); + server = app.listen(0); // Listen on a random available port + }); + + beforeEach(() => { + getApprovalModeSpy.mockReturnValue(ApprovalMode.DEFAULT); + }); + + afterAll( + () => + new Promise((resolve) => { + server.close(() => { + resolve(); + }); + }), + ); + + afterEach(() => { + vi.clearAllMocks(); + }); + + it('should create a new task and stream status updates (text-content) via POST /', async () => { + sendMessageStreamSpy.mockImplementation(async function* () { + yield* [{ type: 'content', value: 'Hello how are you?' }]; + }); + + const agent = request.agent(app); + const res = await agent + .post('/') + .send(createStreamMessageRequest('hello', 'a2a-test-message')) + .set('Content-Type', 'application/json') + .expect(200); + + const events = streamToSSEEvents(res.text); + + assertTaskCreationAndWorkingStatus(events); + + // Status update: text-content + const textContentEvent = events[2].result as TaskStatusUpdateEvent; + expect(textContentEvent.kind).toBe('status-update'); + expect(textContentEvent.status.state).toBe('working'); + expect(textContentEvent.metadata?.['coderAgent']).toMatchObject({ + kind: 'text-content', + }); + expect(textContentEvent.status.message?.parts).toMatchObject([ + { kind: 'text', text: 'Hello how are you?' }, + ]); + + // Status update: input-required (final) + const finalEvent = events[3].result as TaskStatusUpdateEvent; + expect(finalEvent.kind).toBe('status-update'); + expect(finalEvent.status?.state).toBe('input-required'); + expect(finalEvent.final).toBe(true); + + assertUniqueFinalEventIsLast(events); + expect(events.length).toBe(4); + }); + + it('should create a new task, schedule a tool call, and wait for approval', async () => { + // First call yields the tool request + sendMessageStreamSpy.mockImplementationOnce(async function* () { + yield* [ + { + type: GeminiEventType.ToolCallRequest, + value: { + callId: 'test-call-id', + name: 'test-tool', + args: {}, + }, + }, + ]; + }); + // Subsequent calls yield nothing + sendMessageStreamSpy.mockImplementation(async function* () { + yield* []; + }); + + const mockTool = new MockTool({ + name: 'test-tool', + shouldConfirmExecute: vi.fn(mockToolConfirmationFn), + }); + + getToolRegistrySpy.mockReturnValue({ + getAllTools: vi.fn().mockReturnValue([mockTool]), + getToolsByServer: vi.fn().mockReturnValue([]), + getTool: vi.fn().mockReturnValue(mockTool), + }); + + const agent = request.agent(app); + const res = await agent + .post('/') + .send(createStreamMessageRequest('run a tool', 'a2a-tool-test-message')) + .set('Content-Type', 'application/json') + .expect(200); + + const events = streamToSSEEvents(res.text); + assertTaskCreationAndWorkingStatus(events); + + // Status update: working + const workingEvent2 = events[2].result as TaskStatusUpdateEvent; + expect(workingEvent2.kind).toBe('status-update'); + expect(workingEvent2.status.state).toBe('working'); + expect(workingEvent2.metadata?.['coderAgent']).toMatchObject({ + kind: 'state-change', + }); + + // Status update: tool-call-update + const toolCallUpdateEvent = events[3].result as TaskStatusUpdateEvent; + expect(toolCallUpdateEvent.kind).toBe('status-update'); + expect(toolCallUpdateEvent.status.state).toBe('working'); + expect(toolCallUpdateEvent.metadata?.['coderAgent']).toMatchObject({ + kind: 'tool-call-update', + }); + expect(toolCallUpdateEvent.status.message?.parts).toMatchObject([ + { + data: { + status: 'validating', + request: { callId: 'test-call-id' }, + }, + }, + ]); + + // State update: awaiting_approval update + const toolCallConfirmationEvent = events[4].result as TaskStatusUpdateEvent; + expect(toolCallConfirmationEvent.kind).toBe('status-update'); + expect(toolCallConfirmationEvent.metadata?.['coderAgent']).toMatchObject({ + kind: 'tool-call-confirmation', + }); + expect(toolCallConfirmationEvent.status.message?.parts).toMatchObject([ + { + data: { + status: 'awaiting_approval', + request: { callId: 'test-call-id' }, + }, + }, + ]); + expect(toolCallConfirmationEvent.status?.state).toBe('working'); + + assertUniqueFinalEventIsLast(events); + expect(events.length).toBe(6); + }); + + it('should handle multiple tool calls in a single turn', async () => { + // First call yields the tool request + sendMessageStreamSpy.mockImplementationOnce(async function* () { + yield* [ + { + type: GeminiEventType.ToolCallRequest, + value: { + callId: 'test-call-id-1', + name: 'test-tool-1', + args: {}, + }, + }, + { + type: GeminiEventType.ToolCallRequest, + value: { + callId: 'test-call-id-2', + name: 'test-tool-2', + args: {}, + }, + }, + ]; + }); + // Subsequent calls yield nothing + sendMessageStreamSpy.mockImplementation(async function* () { + yield* []; + }); + + const mockTool1 = new MockTool({ + name: 'test-tool-1', + displayName: 'Test Tool 1', + shouldConfirmExecute: vi.fn(mockToolConfirmationFn), + }); + const mockTool2 = new MockTool({ + name: 'test-tool-2', + displayName: 'Test Tool 2', + shouldConfirmExecute: vi.fn(mockToolConfirmationFn), + }); + + getToolRegistrySpy.mockReturnValue({ + getAllTools: vi.fn().mockReturnValue([mockTool1, mockTool2]), + getToolsByServer: vi.fn().mockReturnValue([]), + getTool: vi.fn().mockImplementation((name: string) => { + if (name === 'test-tool-1') return mockTool1; + if (name === 'test-tool-2') return mockTool2; + return undefined; + }), + }); + + const agent = request.agent(app); + const res = await agent + .post('/') + .send( + createStreamMessageRequest( + 'run two tools', + 'a2a-multi-tool-test-message', + ), + ) + .set('Content-Type', 'application/json') + .expect(200); + + const events = streamToSSEEvents(res.text); + assertTaskCreationAndWorkingStatus(events); + + // Second working update + const workingEvent = events[2].result as TaskStatusUpdateEvent; + expect(workingEvent.kind).toBe('status-update'); + expect(workingEvent.status.state).toBe('working'); + + // State Update: Validate each tool call + const toolCallValidateEvent1 = events[3].result as TaskStatusUpdateEvent; + expect(toolCallValidateEvent1.metadata?.['coderAgent']).toMatchObject({ + kind: 'tool-call-update', + }); + expect(toolCallValidateEvent1.status.message?.parts).toMatchObject([ + { + data: { + status: 'validating', + request: { callId: 'test-call-id-1' }, + }, + }, + ]); + const toolCallValidateEvent2 = events[4].result as TaskStatusUpdateEvent; + expect(toolCallValidateEvent2.metadata?.['coderAgent']).toMatchObject({ + kind: 'tool-call-update', + }); + expect(toolCallValidateEvent2.status.message?.parts).toMatchObject([ + { + data: { + status: 'validating', + request: { callId: 'test-call-id-2' }, + }, + }, + ]); + + // State Update: Set each tool call to awaiting + const toolCallAwaitEvent1 = events[5].result as TaskStatusUpdateEvent; + expect(toolCallAwaitEvent1.metadata?.['coderAgent']).toMatchObject({ + kind: 'tool-call-confirmation', + }); + expect(toolCallAwaitEvent1.status.message?.parts).toMatchObject([ + { + data: { + status: 'awaiting_approval', + request: { callId: 'test-call-id-1' }, + }, + }, + ]); + const toolCallAwaitEvent2 = events[6].result as TaskStatusUpdateEvent; + expect(toolCallAwaitEvent2.metadata?.['coderAgent']).toMatchObject({ + kind: 'tool-call-confirmation', + }); + expect(toolCallAwaitEvent2.status.message?.parts).toMatchObject([ + { + data: { + status: 'awaiting_approval', + request: { callId: 'test-call-id-2' }, + }, + }, + ]); + + assertUniqueFinalEventIsLast(events); + expect(events.length).toBe(8); + }); + + it('should handle tool calls that do not require approval', async () => { + // First call yields the tool request + sendMessageStreamSpy.mockImplementationOnce(async function* () { + yield* [ + { + type: GeminiEventType.ToolCallRequest, + value: { + callId: 'test-call-id-no-approval', + name: 'test-tool-no-approval', + args: {}, + }, + }, + ]; + }); + // Second call, after the tool runs, yields the final text + sendMessageStreamSpy.mockImplementationOnce(async function* () { + yield* [{ type: 'content', value: 'Tool executed successfully.' }]; + }); + + const mockTool = new MockTool({ + name: 'test-tool-no-approval', + displayName: 'Test Tool No Approval', + execute: vi.fn().mockResolvedValue({ + llmContent: 'Tool executed successfully.', + returnDisplay: 'Tool executed successfully.', + }), + }); + + getToolRegistrySpy.mockReturnValue({ + getAllTools: vi.fn().mockReturnValue([mockTool]), + getToolsByServer: vi.fn().mockReturnValue([]), + getTool: vi.fn().mockReturnValue(mockTool), + }); + + const agent = request.agent(app); + const res = await agent + .post('/') + .send( + createStreamMessageRequest( + 'run a tool without approval', + 'a2a-no-approval-test-message', + ), + ) + .set('Content-Type', 'application/json') + .expect(200); + + const events = streamToSSEEvents(res.text); + assertTaskCreationAndWorkingStatus(events); + + // Status update: working + const workingEvent2 = events[2].result as TaskStatusUpdateEvent; + expect(workingEvent2.kind).toBe('status-update'); + expect(workingEvent2.status.state).toBe('working'); + + // Status update: tool-call-update (validating) + const validatingEvent = events[3].result as TaskStatusUpdateEvent; + expect(validatingEvent.metadata?.['coderAgent']).toMatchObject({ + kind: 'tool-call-update', + }); + expect(validatingEvent.status.message?.parts).toMatchObject([ + { + data: { + status: 'validating', + request: { callId: 'test-call-id-no-approval' }, + }, + }, + ]); + + // Status update: tool-call-update (scheduled) + const scheduledEvent = events[4].result as TaskStatusUpdateEvent; + expect(scheduledEvent.metadata?.['coderAgent']).toMatchObject({ + kind: 'tool-call-update', + }); + expect(scheduledEvent.status.message?.parts).toMatchObject([ + { + data: { + status: 'scheduled', + request: { callId: 'test-call-id-no-approval' }, + }, + }, + ]); + + // Status update: tool-call-update (executing) + const executingEvent = events[5].result as TaskStatusUpdateEvent; + expect(executingEvent.metadata?.['coderAgent']).toMatchObject({ + kind: 'tool-call-update', + }); + expect(executingEvent.status.message?.parts).toMatchObject([ + { + data: { + status: 'executing', + request: { callId: 'test-call-id-no-approval' }, + }, + }, + ]); + + // Status update: tool-call-update (success) + const successEvent = events[6].result as TaskStatusUpdateEvent; + expect(successEvent.metadata?.['coderAgent']).toMatchObject({ + kind: 'tool-call-update', + }); + expect(successEvent.status.message?.parts).toMatchObject([ + { + data: { + status: 'success', + request: { callId: 'test-call-id-no-approval' }, + }, + }, + ]); + + // Status update: working (before sending tool result to LLM) + const workingEvent3 = events[7].result as TaskStatusUpdateEvent; + expect(workingEvent3.kind).toBe('status-update'); + expect(workingEvent3.status.state).toBe('working'); + + // Status update: text-content (final LLM response) + const textContentEvent = events[8].result as TaskStatusUpdateEvent; + expect(textContentEvent.metadata?.['coderAgent']).toMatchObject({ + kind: 'text-content', + }); + expect(textContentEvent.status.message?.parts).toMatchObject([ + { text: 'Tool executed successfully.' }, + ]); + + assertUniqueFinalEventIsLast(events); + expect(events.length).toBe(10); + }); + + it('should bypass tool approval in YOLO mode', async () => { + // First call yields the tool request + sendMessageStreamSpy.mockImplementationOnce(async function* () { + yield* [ + { + type: GeminiEventType.ToolCallRequest, + value: { + callId: 'test-call-id-yolo', + name: 'test-tool-yolo', + args: {}, + }, + }, + ]; + }); + // Second call, after the tool runs, yields the final text + sendMessageStreamSpy.mockImplementationOnce(async function* () { + yield* [{ type: 'content', value: 'Tool executed successfully.' }]; + }); + + // Set approval mode to yolo + getApprovalModeSpy.mockReturnValue(ApprovalMode.YOLO); + + const mockTool = new MockTool({ + name: 'test-tool-yolo', + displayName: 'Test Tool YOLO', + execute: vi.fn().mockResolvedValue({ + llmContent: 'Tool executed successfully.', + returnDisplay: 'Tool executed successfully.', + }), + }); + + getToolRegistrySpy.mockReturnValue({ + getAllTools: vi.fn().mockReturnValue([mockTool]), + getToolsByServer: vi.fn().mockReturnValue([]), + getTool: vi.fn().mockReturnValue(mockTool), + }); + + const agent = request.agent(app); + const res = await agent + .post('/') + .send( + createStreamMessageRequest( + 'run a tool in yolo mode', + 'a2a-yolo-mode-test-message', + ), + ) + .set('Content-Type', 'application/json') + .expect(200); + + const events = streamToSSEEvents(res.text); + assertTaskCreationAndWorkingStatus(events); + + // Status update: working + const workingEvent2 = events[2].result as TaskStatusUpdateEvent; + expect(workingEvent2.kind).toBe('status-update'); + expect(workingEvent2.status.state).toBe('working'); + + // Status update: tool-call-update (validating) + const validatingEvent = events[3].result as TaskStatusUpdateEvent; + expect(validatingEvent.metadata?.['coderAgent']).toMatchObject({ + kind: 'tool-call-update', + }); + expect(validatingEvent.status.message?.parts).toMatchObject([ + { + data: { + status: 'validating', + request: { callId: 'test-call-id-yolo' }, + }, + }, + ]); + + // Status update: tool-call-update (scheduled) + const awaitingEvent = events[4].result as TaskStatusUpdateEvent; + expect(awaitingEvent.metadata?.['coderAgent']).toMatchObject({ + kind: 'tool-call-update', + }); + expect(awaitingEvent.status.message?.parts).toMatchObject([ + { + data: { + status: 'scheduled', + request: { callId: 'test-call-id-yolo' }, + }, + }, + ]); + + // Status update: tool-call-update (executing) + const executingEvent = events[5].result as TaskStatusUpdateEvent; + expect(executingEvent.metadata?.['coderAgent']).toMatchObject({ + kind: 'tool-call-update', + }); + expect(executingEvent.status.message?.parts).toMatchObject([ + { + data: { + status: 'executing', + request: { callId: 'test-call-id-yolo' }, + }, + }, + ]); + + // Status update: tool-call-update (success) + const successEvent = events[6].result as TaskStatusUpdateEvent; + expect(successEvent.metadata?.['coderAgent']).toMatchObject({ + kind: 'tool-call-update', + }); + expect(successEvent.status.message?.parts).toMatchObject([ + { + data: { + status: 'success', + request: { callId: 'test-call-id-yolo' }, + }, + }, + ]); + + // Status update: working (before sending tool result to LLM) + const workingEvent3 = events[7].result as TaskStatusUpdateEvent; + expect(workingEvent3.kind).toBe('status-update'); + expect(workingEvent3.status.state).toBe('working'); + + // Status update: text-content (final LLM response) + const textContentEvent = events[8].result as TaskStatusUpdateEvent; + expect(textContentEvent.metadata?.['coderAgent']).toMatchObject({ + kind: 'text-content', + }); + expect(textContentEvent.status.message?.parts).toMatchObject([ + { text: 'Tool executed successfully.' }, + ]); + + assertUniqueFinalEventIsLast(events); + expect(events.length).toBe(10); + }); +}); diff --git a/projects/gemini-cli/packages/a2a-server/src/agent.ts b/projects/gemini-cli/packages/a2a-server/src/agent.ts new file mode 100644 index 0000000000000000000000000000000000000000..501b8cdbbbb901203504a017fb3ecbff3bdba274 --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/agent.ts @@ -0,0 +1,785 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import express from 'express'; +import { AsyncLocalStorage } from 'node:async_hooks'; + +import type { Message, Task as SDKTask, AgentCard } from '@a2a-js/sdk'; +import type { + TaskStore, + AgentExecutor, + AgentExecutionEvent, + RequestContext, + ExecutionEventBus, +} from '@a2a-js/sdk/server'; +import { DefaultRequestHandler, InMemoryTaskStore } from '@a2a-js/sdk/server'; +import { A2AExpressApp } from '@a2a-js/sdk/server/express'; // Import server components +import type { + ToolCallRequestInfo, + ServerGeminiToolCallRequestEvent, + Config, +} from '@google/gemini-cli-core'; +import { GeminiEventType } from '@google/gemini-cli-core'; +import { v4 as uuidv4 } from 'uuid'; +import { logger } from './logger.js'; +import type { StateChange, AgentSettings } from './types.js'; +import { CoderAgentEvent } from './types.js'; +import { loadConfig, loadEnvironment, setTargetDir } from './config.js'; +import { loadSettings } from './settings.js'; +import { loadExtensions } from './extension.js'; +import { Task } from './task.js'; +import { GCSTaskStore, NoOpTaskStore } from './gcs.js'; +import type { PersistedStateMetadata } from './metadata_types.js'; +import { getPersistedState, setPersistedState } from './metadata_types.js'; + +const requestStorage = new AsyncLocalStorage<{ req: express.Request }>(); + +/** + * Provides a wrapper for Task. Passes data from Task to SDKTask. + * The idea is to use this class inside CoderAgentExecutor to replace Task. + */ +class TaskWrapper { + task: Task; + agentSettings: AgentSettings; + + constructor(task: Task, agentSettings: AgentSettings) { + this.task = task; + this.agentSettings = agentSettings; + } + + get id() { + return this.task.id; + } + + toSDKTask(): SDKTask { + const persistedState: PersistedStateMetadata = { + _agentSettings: this.agentSettings, + _taskState: this.task.taskState, + }; + + const sdkTask: SDKTask = { + id: this.task.id, + contextId: this.task.contextId, + kind: 'task', + status: { + state: this.task.taskState, + timestamp: new Date().toISOString(), + }, + metadata: setPersistedState({}, persistedState), + history: [], + artifacts: [], + }; + sdkTask.metadata!['_contextId'] = this.task.contextId; + return sdkTask; + } +} + +const coderAgentCard: AgentCard = { + name: 'Gemini SDLC Agent', + description: + 'An agent that generates code based on natural language instructions and streams file outputs.', + url: 'http://localhost:41242/', + provider: { + organization: 'Google', + url: 'https://google.com', + }, + protocolVersion: '0.3.0', + version: '0.0.2', // Incremented version + capabilities: { + streaming: true, + pushNotifications: false, + stateTransitionHistory: true, + }, + securitySchemes: undefined, + security: undefined, + defaultInputModes: ['text'], + defaultOutputModes: ['text'], + skills: [ + { + id: 'code_generation', + name: 'Code Generation', + description: + 'Generates code snippets or complete files based on user requests, streaming the results.', + tags: ['code', 'development', 'programming'], + examples: [ + 'Write a python function to calculate fibonacci numbers.', + 'Create an HTML file with a basic button that alerts "Hello!" when clicked.', + ], + inputModes: ['text'], + outputModes: ['text'], + }, + ], + supportsAuthenticatedExtendedCard: false, +}; + +/** + * CoderAgentExecutor implements the agent's core logic for code generation. + */ +class CoderAgentExecutor implements AgentExecutor { + private tasks: Map = new Map(); + // Track tasks with an active execution loop. + private executingTasks = new Set(); + + constructor(private taskStore?: TaskStore) {} + + private async getConfig( + agentSettings: AgentSettings, + taskId: string, + ): Promise { + const workspaceRoot = setTargetDir(agentSettings); + loadEnvironment(); // Will override any global env with workspace envs + const settings = loadSettings(workspaceRoot); + const extensions = loadExtensions(workspaceRoot); + return await loadConfig(settings, extensions, taskId); + } + + /** + * Reconstructs TaskWrapper from SDKTask. + */ + async reconstruct( + sdkTask: SDKTask, + eventBus?: ExecutionEventBus, + ): Promise { + const metadata = sdkTask.metadata || {}; + const persistedState = getPersistedState(metadata); + + if (!persistedState) { + throw new Error( + `Cannot reconstruct task ${sdkTask.id}: missing persisted state in metadata.`, + ); + } + + const agentSettings = persistedState._agentSettings; + const config = await this.getConfig(agentSettings, sdkTask.id); + const contextId = + (metadata['_contextId'] as string) || (sdkTask.contextId as string); + const runtimeTask = await Task.create( + sdkTask.id, + contextId, + config, + eventBus, + ); + runtimeTask.taskState = persistedState._taskState; + await runtimeTask.geminiClient.initialize( + runtimeTask.config.getContentGeneratorConfig(), + ); + + const wrapper = new TaskWrapper(runtimeTask, agentSettings); + this.tasks.set(sdkTask.id, wrapper); + logger.info(`Task ${sdkTask.id} reconstructed from store.`); + return wrapper; + } + + async createTask( + taskId: string, + contextId: string, + agentSettingsInput?: AgentSettings, + eventBus?: ExecutionEventBus, + ): Promise { + const agentSettings = agentSettingsInput || ({} as AgentSettings); + const config = await this.getConfig(agentSettings, taskId); + const runtimeTask = await Task.create(taskId, contextId, config, eventBus); + await runtimeTask.geminiClient.initialize( + runtimeTask.config.getContentGeneratorConfig(), + ); + + const wrapper = new TaskWrapper(runtimeTask, agentSettings); + this.tasks.set(taskId, wrapper); + logger.info(`New task ${taskId} created.`); + return wrapper; + } + + getTask(taskId: string): TaskWrapper | undefined { + return this.tasks.get(taskId); + } + + getAllTasks(): TaskWrapper[] { + return Array.from(this.tasks.values()); + } + + cancelTask = async ( + taskId: string, + eventBus: ExecutionEventBus, + ): Promise => { + logger.info( + `[CoderAgentExecutor] Received cancel request for task ${taskId}`, + ); + const wrapper = this.tasks.get(taskId); + + if (!wrapper) { + logger.warn( + `[CoderAgentExecutor] Task ${taskId} not found for cancellation.`, + ); + eventBus.publish({ + kind: 'status-update', + taskId, + contextId: uuidv4(), + status: { + state: 'failed', + message: { + kind: 'message', + role: 'agent', + parts: [{ kind: 'text', text: `Task ${taskId} not found.` }], + messageId: uuidv4(), + taskId, + }, + }, + final: true, + }); + return; + } + + const { task } = wrapper; + + if (task.taskState === 'canceled' || task.taskState === 'failed') { + logger.info( + `[CoderAgentExecutor] Task ${taskId} is already in a final state: ${task.taskState}. No action needed for cancellation.`, + ); + eventBus.publish({ + kind: 'status-update', + taskId, + contextId: task.contextId, + status: { + state: task.taskState, + message: { + kind: 'message', + role: 'agent', + parts: [ + { + kind: 'text', + text: `Task ${taskId} is already ${task.taskState}.`, + }, + ], + messageId: uuidv4(), + taskId, + }, + }, + final: true, + }); + return; + } + + try { + logger.info( + `[CoderAgentExecutor] Initiating cancellation for task ${taskId}.`, + ); + task.cancelPendingTools('Task canceled by user request.'); + + const stateChange: StateChange = { + kind: CoderAgentEvent.StateChangeEvent, + }; + task.setTaskStateAndPublishUpdate( + 'canceled', + stateChange, + 'Task canceled by user request.', + undefined, + true, + ); + logger.info( + `[CoderAgentExecutor] Task ${taskId} cancellation processed. Saving state.`, + ); + await this.taskStore?.save(wrapper.toSDKTask()); + logger.info(`[CoderAgentExecutor] Task ${taskId} state CANCELED saved.`); + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : 'Unknown error'; + logger.error( + `[CoderAgentExecutor] Error during task cancellation for ${taskId}: ${errorMessage}`, + error, + ); + eventBus.publish({ + kind: 'status-update', + taskId, + contextId: task.contextId, + status: { + state: 'failed', + message: { + kind: 'message', + role: 'agent', + parts: [ + { + kind: 'text', + text: `Failed to process cancellation for task ${taskId}: ${errorMessage}`, + }, + ], + messageId: uuidv4(), + taskId, + }, + }, + final: true, + }); + } + }; + + async execute( + requestContext: RequestContext, + eventBus: ExecutionEventBus, + ): Promise { + const userMessage = requestContext.userMessage as Message; + const sdkTask = requestContext.task as SDKTask | undefined; + + const taskId = sdkTask?.id || userMessage.taskId || uuidv4(); + const contextId = + userMessage.contextId || + sdkTask?.contextId || + sdkTask?.metadata?.['_contextId'] || + uuidv4(); + + logger.info( + `[CoderAgentExecutor] Executing for taskId: ${taskId}, contextId: ${contextId}`, + ); + logger.info( + `[CoderAgentExecutor] userMessage: ${JSON.stringify(userMessage)}`, + ); + eventBus.on('event', (event: AgentExecutionEvent) => + logger.info('[EventBus event]: ', event), + ); + + const store = requestStorage.getStore(); + if (!store) { + logger.error( + '[CoderAgentExecutor] Could not get request from async local storage. Cancellation on socket close will not be handled for this request.', + ); + } + + const abortController = new AbortController(); + const abortSignal = abortController.signal; + + if (store) { + // Grab the raw socket from the request object + const socket = store.req.socket; + const onClientEnd = () => { + logger.info( + `[CoderAgentExecutor] Client socket closed for task ${taskId}. Cancelling execution.`, + ); + if (!abortController.signal.aborted) { + abortController.abort(); + } + // Clean up the listener to prevent memory leaks + socket.removeListener('close', onClientEnd); + }; + + // Listen on the socket's 'end' event (remote closed the connection) + socket.on('end', onClientEnd); + + // It's also good practice to remove the listener if the task completes successfully + abortSignal.addEventListener('abort', () => { + socket.removeListener('end', onClientEnd); + }); + logger.info( + `[CoderAgentExecutor] Socket close handler set up for task ${taskId}.`, + ); + } + + let wrapper: TaskWrapper | undefined = this.tasks.get(taskId); + + if (wrapper) { + wrapper.task.eventBus = eventBus; + logger.info(`[CoderAgentExecutor] Task ${taskId} found in memory cache.`); + } else if (sdkTask) { + logger.info( + `[CoderAgentExecutor] Task ${taskId} found in TaskStore. Reconstructing...`, + ); + try { + wrapper = await this.reconstruct(sdkTask, eventBus); + } catch (e) { + logger.error( + `[CoderAgentExecutor] Failed to hydrate task ${taskId}:`, + e, + ); + const stateChange: StateChange = { + kind: CoderAgentEvent.StateChangeEvent, + }; + eventBus.publish({ + kind: 'status-update', + taskId, + contextId: sdkTask.contextId, + status: { + state: 'failed', + message: { + kind: 'message', + role: 'agent', + parts: [ + { + kind: 'text', + text: 'Internal error: Task state lost or corrupted.', + }, + ], + messageId: uuidv4(), + taskId, + contextId: sdkTask.contextId, + } as Message, + }, + final: true, + metadata: { coderAgent: stateChange }, + }); + return; + } + } else { + logger.info(`[CoderAgentExecutor] Creating new task ${taskId}.`); + const agentSettings = userMessage.metadata?.[ + 'coderAgent' + ] as AgentSettings; + wrapper = await this.createTask( + taskId, + contextId as string, + agentSettings, + eventBus, + ); + const newTaskSDK = wrapper.toSDKTask(); + eventBus.publish({ + ...newTaskSDK, + kind: 'task', + status: { state: 'submitted', timestamp: new Date().toISOString() }, + history: [userMessage], + }); + try { + await this.taskStore?.save(newTaskSDK); + logger.info(`[CoderAgentExecutor] New task ${taskId} saved to store.`); + } catch (saveError) { + logger.error( + `[CoderAgentExecutor] Failed to save new task ${taskId} to store:`, + saveError, + ); + } + } + + if (!wrapper) { + logger.error( + `[CoderAgentExecutor] Task ${taskId} is unexpectedly undefined after load/create.`, + ); + return; + } + + const currentTask = wrapper.task; + + if (['canceled', 'failed', 'completed'].includes(currentTask.taskState)) { + logger.warn( + `[CoderAgentExecutor] Attempted to execute task ${taskId} which is already in state ${currentTask.taskState}. Ignoring.`, + ); + return; + } + + if (this.executingTasks.has(taskId)) { + logger.info( + `[CoderAgentExecutor] Task ${taskId} has a pending execution. Processing message and yielding.`, + ); + currentTask.eventBus = eventBus; + for await (const _ of currentTask.acceptUserMessage( + requestContext, + abortController.signal, + )) { + logger.info( + `[CoderAgentExecutor] Processing user message ${userMessage.messageId} in secondary execution loop for task ${taskId}.`, + ); + } + // End this execution-- the original/source will be resumed. + return; + } + + logger.info( + `[CoderAgentExecutor] Starting main execution for message ${userMessage.messageId} for task ${taskId}.`, + ); + this.executingTasks.add(taskId); + + try { + let agentTurnActive = true; + logger.info(`[CoderAgentExecutor] Task ${taskId}: Processing user turn.`); + let agentEvents = currentTask.acceptUserMessage( + requestContext, + abortSignal, + ); + + while (agentTurnActive) { + logger.info( + `[CoderAgentExecutor] Task ${taskId}: Processing agent turn (LLM stream).`, + ); + const toolCallRequests: ToolCallRequestInfo[] = []; + for await (const event of agentEvents) { + if (abortSignal.aborted) { + logger.warn( + `[CoderAgentExecutor] Task ${taskId}: Abort signal received during agent event processing.`, + ); + throw new Error('Execution aborted'); + } + if (event.type === GeminiEventType.ToolCallRequest) { + toolCallRequests.push( + (event as ServerGeminiToolCallRequestEvent).value, + ); + continue; + } + await currentTask.acceptAgentMessage(event); + } + + if (abortSignal.aborted) throw new Error('Execution aborted'); + + if (toolCallRequests.length > 0) { + logger.info( + `[CoderAgentExecutor] Task ${taskId}: Found ${toolCallRequests.length} tool call requests. Scheduling as a batch.`, + ); + await currentTask.scheduleToolCalls(toolCallRequests, abortSignal); + } + + logger.info( + `[CoderAgentExecutor] Task ${taskId}: Waiting for pending tools if any.`, + ); + await currentTask.waitForPendingTools(); + logger.info( + `[CoderAgentExecutor] Task ${taskId}: All pending tools completed or none were pending.`, + ); + + if (abortSignal.aborted) throw new Error('Execution aborted'); + + const completedTools = currentTask.getAndClearCompletedTools(); + + if (completedTools.length > 0) { + // If all completed tool calls were canceled, manually add them to history and set state to input-required, final:true + if (completedTools.every((tool) => tool.status === 'cancelled')) { + logger.info( + `[CoderAgentExecutor] Task ${taskId}: All tool calls were cancelled. Updating history and ending agent turn.`, + ); + currentTask.addToolResponsesToHistory(completedTools); + agentTurnActive = false; + const stateChange: StateChange = { + kind: CoderAgentEvent.StateChangeEvent, + }; + currentTask.setTaskStateAndPublishUpdate( + 'input-required', + stateChange, + undefined, + undefined, + true, + ); + } else { + logger.info( + `[CoderAgentExecutor] Task ${taskId}: Found ${completedTools.length} completed tool calls. Sending results back to LLM.`, + ); + + agentEvents = currentTask.sendCompletedToolsToLlm( + completedTools, + abortSignal, + ); + // Continue the loop to process the LLM response to the tool results. + } + } else { + logger.info( + `[CoderAgentExecutor] Task ${taskId}: No more tool calls to process. Ending agent turn.`, + ); + agentTurnActive = false; + } + } + + logger.info( + `[CoderAgentExecutor] Task ${taskId}: Agent turn finished, setting to input-required.`, + ); + const stateChange: StateChange = { + kind: CoderAgentEvent.StateChangeEvent, + }; + currentTask.setTaskStateAndPublishUpdate( + 'input-required', + stateChange, + undefined, + undefined, + true, + ); + } catch (error) { + if (abortSignal.aborted) { + logger.warn(`[CoderAgentExecutor] Task ${taskId} execution aborted.`); + currentTask.cancelPendingTools('Execution aborted'); + if ( + currentTask.taskState !== 'canceled' && + currentTask.taskState !== 'failed' + ) { + currentTask.setTaskStateAndPublishUpdate( + 'input-required', + { kind: CoderAgentEvent.StateChangeEvent }, + 'Execution aborted by client.', + undefined, + true, + ); + } + } else { + const errorMessage = + error instanceof Error ? error.message : 'Agent execution error'; + logger.error( + `[CoderAgentExecutor] Error executing agent for task ${taskId}:`, + error, + ); + currentTask.cancelPendingTools(errorMessage); + if (currentTask.taskState !== 'failed') { + const stateChange: StateChange = { + kind: CoderAgentEvent.StateChangeEvent, + }; + currentTask.setTaskStateAndPublishUpdate( + 'failed', + stateChange, + errorMessage, + undefined, + true, + ); + } + } + } finally { + this.executingTasks.delete(taskId); + logger.info( + `[CoderAgentExecutor] Saving final state for task ${taskId}.`, + ); + try { + await this.taskStore?.save(wrapper.toSDKTask()); + logger.info(`[CoderAgentExecutor] Task ${taskId} state saved.`); + } catch (saveError) { + logger.error( + `[CoderAgentExecutor] Failed to save task ${taskId} state in finally block:`, + saveError, + ); + } + } + } +} + +export function updateCoderAgentCardUrl(port: number) { + coderAgentCard.url = `http://localhost:${port}/`; +} + +export async function main() { + try { + const expressApp = await createApp(); + const port = process.env['CODER_AGENT_PORT'] || 0; + + const server = expressApp.listen(port, () => { + const address = server.address(); + let actualPort; + if (process.env['CODER_AGENT_PORT']) { + actualPort = process.env['CODER_AGENT_PORT']; + } else if (address && typeof address !== 'string') { + actualPort = address.port; + } else { + throw new Error('[Core Agent] Could not find port number.'); + } + updateCoderAgentCardUrl(Number(actualPort)); + logger.info( + `[CoreAgent] Agent Server started on http://localhost:${actualPort}`, + ); + logger.info( + `[CoreAgent] Agent Card: http://localhost:${actualPort}/.well-known/agent-card.json`, + ); + logger.info('[CoreAgent] Press Ctrl+C to stop the server'); + }); + } catch (error) { + logger.error('[CoreAgent] Error during startup:', error); + process.exit(1); + } +} + +export async function createApp() { + try { + // loadEnvironment() is called within getConfig now + const bucketName = process.env['GCS_BUCKET_NAME']; + let taskStoreForExecutor: TaskStore; + let taskStoreForHandler: TaskStore; + + if (bucketName) { + logger.info(`Using GCSTaskStore with bucket: ${bucketName}`); + const gcsTaskStore = new GCSTaskStore(bucketName); + taskStoreForExecutor = gcsTaskStore; + taskStoreForHandler = new NoOpTaskStore(gcsTaskStore); + } else { + logger.info('Using InMemoryTaskStore'); + const inMemoryTaskStore = new InMemoryTaskStore(); + taskStoreForExecutor = inMemoryTaskStore; + taskStoreForHandler = inMemoryTaskStore; + } + + const agentExecutor = new CoderAgentExecutor(taskStoreForExecutor); + + const requestHandler = new DefaultRequestHandler( + coderAgentCard, + taskStoreForHandler, + agentExecutor, + ); + + let expressApp = express(); + expressApp.use((req, res, next) => { + requestStorage.run({ req }, next); + }); + + const appBuilder = new A2AExpressApp(requestHandler); + expressApp = appBuilder.setupRoutes(expressApp, ''); + expressApp.use(express.json()); + + expressApp.post('/tasks', async (req, res) => { + try { + const taskId = uuidv4(); + const agentSettings = req.body.agentSettings as + | AgentSettings + | undefined; + const contextId = req.body.contextId || uuidv4(); + const wrapper = await agentExecutor.createTask( + taskId, + contextId, + agentSettings, + ); + await taskStoreForExecutor.save(wrapper.toSDKTask()); + res.status(201).json(wrapper.id); + } catch (error) { + logger.error('[CoreAgent] Error creating task:', error); + const errorMessage = + error instanceof Error + ? error.message + : 'Unknown error creating task'; + res.status(500).send({ error: errorMessage }); + } + }); + + expressApp.get('/tasks/metadata', async (req, res) => { + // This endpoint is only meaningful if the task store is in-memory. + if (!(taskStoreForExecutor instanceof InMemoryTaskStore)) { + res.status(501).send({ + error: + 'Listing all task metadata is only supported when using InMemoryTaskStore.', + }); + } + try { + const wrappers = agentExecutor.getAllTasks(); + if (wrappers && wrappers.length > 0) { + const tasksMetadata = await Promise.all( + wrappers.map((wrapper) => wrapper.task.getMetadata()), + ); + res.status(200).json(tasksMetadata); + } else { + res.status(204).send(); + } + } catch (error) { + logger.error('[CoreAgent] Error getting all task metadata:', error); + const errorMessage = + error instanceof Error + ? error.message + : 'Unknown error getting task metadata'; + res.status(500).send({ error: errorMessage }); + } + }); + + expressApp.get('/tasks/:taskId/metadata', async (req, res) => { + const taskId = req.params.taskId; + let wrapper = agentExecutor.getTask(taskId); + if (!wrapper) { + const sdkTask = await taskStoreForExecutor.load(taskId); + if (sdkTask) { + wrapper = await agentExecutor.reconstruct(sdkTask); + } + } + if (!wrapper) { + res.status(404).send({ error: 'Task not found' }); + return; + } + res.json({ metadata: await wrapper.task.getMetadata() }); + }); + return expressApp; + } catch (error) { + logger.error('[CoreAgent] Error during startup:', error); + process.exit(1); + } +} diff --git a/projects/gemini-cli/packages/a2a-server/src/config.ts b/projects/gemini-cli/packages/a2a-server/src/config.ts new file mode 100644 index 0000000000000000000000000000000000000000..cfbee1bcc87acbde50cbced5e68c46c65abd0325 --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/config.ts @@ -0,0 +1,203 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import { homedir } from 'node:os'; +import * as dotenv from 'dotenv'; + +import type { TelemetryTarget } from '@google/gemini-cli-core'; +import { + AuthType, + Config, + type ConfigParameters, + FileDiscoveryService, + ApprovalMode, + loadServerHierarchicalMemory, + GEMINI_CONFIG_DIR, + DEFAULT_GEMINI_EMBEDDING_MODEL, + DEFAULT_GEMINI_MODEL, +} from '@google/gemini-cli-core'; + +import { logger } from './logger.js'; +import type { Settings } from './settings.js'; +import type { Extension } from './extension.js'; +import { type AgentSettings, CoderAgentEvent } from './types.js'; + +export async function loadConfig( + settings: Settings, + extensions: Extension[], + taskId: string, +): Promise { + const mcpServers = mergeMcpServers(settings, extensions); + const workspaceDir = process.cwd(); + const adcFilePath = process.env['GOOGLE_APPLICATION_CREDENTIALS']; + + const configParams: ConfigParameters = { + sessionId: taskId, + model: DEFAULT_GEMINI_MODEL, + embeddingModel: DEFAULT_GEMINI_EMBEDDING_MODEL, + sandbox: undefined, // Sandbox might not be relevant for a server-side agent + targetDir: workspaceDir, // Or a specific directory the agent operates on + debugMode: process.env['DEBUG'] === 'true' || false, + question: '', // Not used in server mode directly like CLI + fullContext: false, // Server might have different context needs + coreTools: settings.coreTools || undefined, + excludeTools: settings.excludeTools || undefined, + showMemoryUsage: settings.showMemoryUsage || false, + approvalMode: + process.env['GEMINI_YOLO_MODE'] === 'true' + ? ApprovalMode.YOLO + : ApprovalMode.DEFAULT, + mcpServers, + cwd: workspaceDir, + telemetry: { + enabled: settings.telemetry?.enabled, + target: settings.telemetry?.target as TelemetryTarget, + otlpEndpoint: + process.env['OTEL_EXPORTER_OTLP_ENDPOINT'] ?? + settings.telemetry?.otlpEndpoint, + logPrompts: settings.telemetry?.logPrompts, + }, + // Git-aware file filtering settings + fileFiltering: { + respectGitIgnore: settings.fileFiltering?.respectGitIgnore, + enableRecursiveFileSearch: + settings.fileFiltering?.enableRecursiveFileSearch, + }, + ideMode: false, + }; + + const fileService = new FileDiscoveryService(workspaceDir); + const extensionContextFilePaths = extensions.flatMap((e) => e.contextFiles); + const { memoryContent, fileCount } = await loadServerHierarchicalMemory( + workspaceDir, + [workspaceDir], + false, + fileService, + extensionContextFilePaths, + true, /// TODO: Wire up folder trust logic here. + ); + configParams.userMemory = memoryContent; + configParams.geminiMdFileCount = fileCount; + const config = new Config({ + ...configParams, + }); + // Needed to initialize ToolRegistry, and git checkpointing if enabled + await config.initialize(); + + if (process.env['USE_CCPA']) { + logger.info('[Config] Using CCPA Auth:'); + try { + if (adcFilePath) { + path.resolve(adcFilePath); + } + } catch (e) { + logger.error( + `[Config] USE_CCPA env var is true but unable to resolve GOOGLE_APPLICATION_CREDENTIALS file path ${adcFilePath}. Error ${e}`, + ); + } + await config.refreshAuth(AuthType.LOGIN_WITH_GOOGLE); + logger.info( + `[Config] GOOGLE_CLOUD_PROJECT: ${process.env['GOOGLE_CLOUD_PROJECT']}`, + ); + } else if (process.env['GEMINI_API_KEY']) { + logger.info('[Config] Using Gemini API Key'); + await config.refreshAuth(AuthType.USE_GEMINI); + } else { + logger.error( + `[Config] Unable to set GeneratorConfig. Please provide a GEMINI_API_KEY or set USE_CCPA.`, + ); + } + + return config; +} + +export function mergeMcpServers(settings: Settings, extensions: Extension[]) { + const mcpServers = { ...(settings.mcpServers || {}) }; + for (const extension of extensions) { + Object.entries(extension.config.mcpServers || {}).forEach( + ([key, server]) => { + if (mcpServers[key]) { + console.warn( + `Skipping extension MCP config for server with key "${key}" as it already exists.`, + ); + return; + } + mcpServers[key] = server; + }, + ); + } + return mcpServers; +} + +export function setTargetDir(agentSettings: AgentSettings | undefined): string { + const originalCWD = process.cwd(); + const targetDir = + process.env['CODER_AGENT_WORKSPACE_PATH'] ?? + (agentSettings?.kind === CoderAgentEvent.StateAgentSettingsEvent + ? agentSettings.workspacePath + : undefined); + + if (!targetDir) { + return originalCWD; + } + + logger.info( + `[CoderAgentExecutor] Overriding workspace path to: ${targetDir}`, + ); + + try { + const resolvedPath = path.resolve(targetDir); + process.chdir(resolvedPath); + return resolvedPath; + } catch (e) { + logger.error( + `[CoderAgentExecutor] Error resolving workspace path: ${e}, returning original os.cwd()`, + ); + return originalCWD; + } +} + +export function loadEnvironment(): void { + const envFilePath = findEnvFile(process.cwd()); + if (envFilePath) { + dotenv.config({ path: envFilePath, override: true }); + } +} + +function findEnvFile(startDir: string): string | null { + let currentDir = path.resolve(startDir); + while (true) { + // prefer gemini-specific .env under GEMINI_DIR + const geminiEnvPath = path.join(currentDir, GEMINI_CONFIG_DIR, '.env'); + if (fs.existsSync(geminiEnvPath)) { + return geminiEnvPath; + } + const envPath = path.join(currentDir, '.env'); + if (fs.existsSync(envPath)) { + return envPath; + } + const parentDir = path.dirname(currentDir); + if (parentDir === currentDir || !parentDir) { + // check .env under home as fallback, again preferring gemini-specific .env + const homeGeminiEnvPath = path.join( + process.cwd(), + GEMINI_CONFIG_DIR, + '.env', + ); + if (fs.existsSync(homeGeminiEnvPath)) { + return homeGeminiEnvPath; + } + const homeEnvPath = path.join(homedir(), '.env'); + if (fs.existsSync(homeEnvPath)) { + return homeEnvPath; + } + return null; + } + currentDir = parentDir; + } +} diff --git a/projects/gemini-cli/packages/a2a-server/src/endpoints.test.ts b/projects/gemini-cli/packages/a2a-server/src/endpoints.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..77a1e59ac0aaa2ffe51f77d2cbb5e63d3afbaecc --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/endpoints.test.ts @@ -0,0 +1,146 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeAll, afterAll, vi } from 'vitest'; +import request from 'supertest'; +import type express from 'express'; +import { createApp, updateCoderAgentCardUrl } from './agent.js'; +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import * as os from 'node:os'; +import type { Server } from 'node:http'; +import type { TaskMetadata } from './types.js'; +import type { AddressInfo } from 'node:net'; + +// Mock the logger to avoid polluting test output +// Comment out to help debug +vi.mock('./logger.js', () => ({ + logger: { info: vi.fn(), warn: vi.fn(), error: vi.fn() }, +})); + +// Mock Task.create to avoid its complex setup +vi.mock('./task.js', () => { + class MockTask { + id: string; + contextId: string; + taskState = 'submitted'; + config = { + getContentGeneratorConfig: vi + .fn() + .mockReturnValue({ model: 'gemini-pro' }), + }; + geminiClient = { + initialize: vi.fn().mockResolvedValue(undefined), + }; + constructor(id: string, contextId: string) { + this.id = id; + this.contextId = contextId; + } + static create = vi + .fn() + .mockImplementation((id, contextId) => + Promise.resolve(new MockTask(id, contextId)), + ); + getMetadata = vi.fn().mockImplementation(async () => ({ + id: this.id, + contextId: this.contextId, + taskState: this.taskState, + model: 'gemini-pro', + mcpServers: [], + availableTools: [], + })); + } + return { Task: MockTask }; +}); + +describe('Agent Server Endpoints', () => { + let app: express.Express; + let server: Server; + let testWorkspace: string; + + const createTask = (contextId: string) => + request(app) + .post('/tasks') + .send({ + contextId, + agentSettings: { + kind: 'agent-settings', + workspacePath: testWorkspace, + }, + }) + .set('Content-Type', 'application/json'); + + beforeAll(async () => { + // Create a unique temporary directory for the workspace to avoid conflicts + testWorkspace = fs.mkdtempSync( + path.join(os.tmpdir(), 'gemini-agent-test-'), + ); + app = await createApp(); + await new Promise((resolve) => { + server = app.listen(0, () => { + const port = (server.address() as AddressInfo).port; + updateCoderAgentCardUrl(port); + resolve(); + }); + }); + }); + + afterAll( + () => + new Promise((resolve, reject) => { + server.close((err) => { + if (err) return reject(err); + + try { + fs.rmSync(testWorkspace, { recursive: true, force: true }); + } catch (e) { + console.warn(`Could not remove temp dir '${testWorkspace}':`, e); + } + resolve(); + }); + }), + ); + + it('should create a new task via POST /tasks', async () => { + const response = await createTask('test-context'); + expect(response.status).toBe(201); + expect(response.body).toBeTypeOf('string'); // Should return the task ID + }, 7000); + + it('should get metadata for a specific task via GET /tasks/:taskId/metadata', async () => { + const createResponse = await createTask('test-context-2'); + const taskId = createResponse.body; + const response = await request(app).get(`/tasks/${taskId}/metadata`); + expect(response.status).toBe(200); + expect(response.body.metadata.id).toBe(taskId); + }, 6000); + + it('should get metadata for all tasks via GET /tasks/metadata', async () => { + const createResponse = await createTask('test-context-3'); + const taskId = createResponse.body; + const response = await request(app).get('/tasks/metadata'); + expect(response.status).toBe(200); + expect(Array.isArray(response.body)).toBe(true); + expect(response.body.length).toBeGreaterThan(0); + const taskMetadata = response.body.find( + (m: TaskMetadata) => m.id === taskId, + ); + expect(taskMetadata).toBeDefined(); + }); + + it('should return 404 for a non-existent task', async () => { + const response = await request(app).get('/tasks/fake-task/metadata'); + expect(response.status).toBe(404); + }); + + it('should return agent metadata via GET /.well-known/agent-card.json', async () => { + const response = await request(app).get('/.well-known/agent-card.json'); + const port = (server.address() as AddressInfo).port; + expect(response.status).toBe(200); + expect(response.body.name).toBe('Gemini SDLC Agent'); + expect(response.body.url).toBe(`http://localhost:${port}/`); + }); +}); diff --git a/projects/gemini-cli/packages/a2a-server/src/extension.ts b/projects/gemini-cli/packages/a2a-server/src/extension.ts new file mode 100644 index 0000000000000000000000000000000000000000..3a33eebe3d70ac64c75a972b6d06a5fe619de81e --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/extension.ts @@ -0,0 +1,118 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// Copied exactly from packages/cli/src/config/extension.ts, last PR #1026 + +import type { MCPServerConfig } from '@google/gemini-cli-core'; +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import * as os from 'node:os'; +import { logger } from './logger.js'; + +export const EXTENSIONS_DIRECTORY_NAME = path.join('.gemini', 'extensions'); +export const EXTENSIONS_CONFIG_FILENAME = 'gemini-extension.json'; + +export interface Extension { + config: ExtensionConfig; + contextFiles: string[]; +} + +export interface ExtensionConfig { + name: string; + version: string; + mcpServers?: Record; + contextFileName?: string | string[]; +} + +export function loadExtensions(workspaceDir: string): Extension[] { + const allExtensions = [ + ...loadExtensionsFromDir(workspaceDir), + ...loadExtensionsFromDir(os.homedir()), + ]; + + const uniqueExtensions: Extension[] = []; + const seenNames = new Set(); + for (const extension of allExtensions) { + if (!seenNames.has(extension.config.name)) { + logger.info( + `Loading extension: ${extension.config.name} (version: ${extension.config.version})`, + ); + uniqueExtensions.push(extension); + seenNames.add(extension.config.name); + } + } + + return uniqueExtensions; +} + +function loadExtensionsFromDir(dir: string): Extension[] { + const extensionsDir = path.join(dir, EXTENSIONS_DIRECTORY_NAME); + if (!fs.existsSync(extensionsDir)) { + return []; + } + + const extensions: Extension[] = []; + for (const subdir of fs.readdirSync(extensionsDir)) { + const extensionDir = path.join(extensionsDir, subdir); + + const extension = loadExtension(extensionDir); + if (extension != null) { + extensions.push(extension); + } + } + return extensions; +} + +function loadExtension(extensionDir: string): Extension | null { + if (!fs.statSync(extensionDir).isDirectory()) { + logger.error( + `Warning: unexpected file ${extensionDir} in extensions directory.`, + ); + return null; + } + + const configFilePath = path.join(extensionDir, EXTENSIONS_CONFIG_FILENAME); + if (!fs.existsSync(configFilePath)) { + logger.error( + `Warning: extension directory ${extensionDir} does not contain a config file ${configFilePath}.`, + ); + return null; + } + + try { + const configContent = fs.readFileSync(configFilePath, 'utf-8'); + const config = JSON.parse(configContent) as ExtensionConfig; + if (!config.name || !config.version) { + logger.error( + `Invalid extension config in ${configFilePath}: missing name or version.`, + ); + return null; + } + + const contextFiles = getContextFileNames(config) + .map((contextFileName) => path.join(extensionDir, contextFileName)) + .filter((contextFilePath) => fs.existsSync(contextFilePath)); + + return { + config, + contextFiles, + }; + } catch (e) { + logger.error( + `Warning: error parsing extension config in ${configFilePath}: ${e}`, + ); + return null; + } +} + +function getContextFileNames(config: ExtensionConfig): string[] { + if (!config.contextFileName) { + return ['GEMINI.md']; + } else if (!Array.isArray(config.contextFileName)) { + return [config.contextFileName]; + } + return config.contextFileName; +} diff --git a/projects/gemini-cli/packages/a2a-server/src/gcs.test.ts b/projects/gemini-cli/packages/a2a-server/src/gcs.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..3553ccc6cc9e64da7c96698acc8dda6252cd0f5f --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/gcs.test.ts @@ -0,0 +1,340 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { Storage } from '@google-cloud/storage'; +import * as fse from 'fs-extra'; +import { promises as fsPromises, createReadStream } from 'node:fs'; +import * as tar from 'tar'; +import { gzipSync, gunzipSync } from 'node:zlib'; +import { v4 as uuidv4 } from 'uuid'; +import type { Task as SDKTask } from '@a2a-js/sdk'; +import type { TaskStore } from '@a2a-js/sdk/server'; +import type { Mocked, MockedClass, Mock } from 'vitest'; +import { describe, it, expect, beforeEach, vi } from 'vitest'; + +import { GCSTaskStore, NoOpTaskStore } from './gcs.js'; +import { logger } from './logger.js'; +import * as configModule from './config.js'; +import * as metadataModule from './metadata_types.js'; + +// Mock dependencies +vi.mock('@google-cloud/storage'); +vi.mock('fs-extra', () => ({ + pathExists: vi.fn(), + readdir: vi.fn(), + remove: vi.fn(), + ensureDir: vi.fn(), +})); +vi.mock('node:fs', async () => { + const actual = await vi.importActual('node:fs'); + return { + ...actual, + promises: { + ...actual.promises, + readdir: vi.fn(), + }, + createReadStream: vi.fn(), + }; +}); +vi.mock('tar'); +vi.mock('zlib'); +vi.mock('uuid'); +vi.mock('./logger', () => ({ + logger: { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + }, +})); +vi.mock('./config'); +vi.mock('./metadata_types'); +vi.mock('node:stream/promises', () => ({ + pipeline: vi.fn(), +})); + +const mockStorage = Storage as MockedClass; +const mockFse = fse as Mocked; +const mockCreateReadStream = createReadStream as Mock; +const mockTar = tar as Mocked; +const mockGzipSync = gzipSync as Mock; +const mockGunzipSync = gunzipSync as Mock; +const mockUuidv4 = uuidv4 as Mock; +const mockSetTargetDir = configModule.setTargetDir as Mock; +const mockGetPersistedState = metadataModule.getPersistedState as Mock; +const METADATA_KEY = metadataModule.METADATA_KEY || '__persistedState'; + +type MockWriteStream = { + on: Mock< + (event: string, cb: (error?: Error | null) => void) => MockWriteStream + >; + destroy: Mock<() => void>; + destroyed: boolean; +}; + +type MockFile = { + save: Mock<(data: Buffer | string) => Promise>; + download: Mock<() => Promise<[Buffer]>>; + exists: Mock<() => Promise<[boolean]>>; + createWriteStream: Mock<() => MockWriteStream>; +}; + +type MockBucket = { + exists: Mock<() => Promise<[boolean]>>; + file: Mock<(path: string) => MockFile>; + name: string; +}; + +type MockStorageInstance = { + bucket: Mock<(name: string) => MockBucket>; + getBuckets: Mock<() => Promise<[Array<{ name: string }>]>>; + createBucket: Mock<(name: string) => Promise<[MockBucket]>>; +}; + +describe('GCSTaskStore', () => { + let bucketName: string; + let mockBucket: MockBucket; + let mockFile: MockFile; + let mockWriteStream: MockWriteStream; + let mockStorageInstance: MockStorageInstance; + + beforeEach(() => { + vi.clearAllMocks(); + bucketName = 'test-bucket'; + + mockWriteStream = { + on: vi.fn((event, cb) => { + if (event === 'finish') setTimeout(cb, 0); // Simulate async finish + return mockWriteStream; + }), + destroy: vi.fn(), + destroyed: false, + }; + + mockFile = { + save: vi.fn().mockResolvedValue(undefined), + download: vi.fn().mockResolvedValue([Buffer.from('')]), + exists: vi.fn().mockResolvedValue([true]), + createWriteStream: vi.fn().mockReturnValue(mockWriteStream), + }; + + mockBucket = { + exists: vi.fn().mockResolvedValue([true]), + file: vi.fn().mockReturnValue(mockFile), + name: bucketName, + }; + + mockStorageInstance = { + bucket: vi.fn().mockReturnValue(mockBucket), + getBuckets: vi.fn().mockResolvedValue([[{ name: bucketName }]]), + createBucket: vi.fn().mockResolvedValue([mockBucket]), + }; + mockStorage.mockReturnValue(mockStorageInstance as unknown as Storage); + + mockUuidv4.mockReturnValue('test-uuid'); + mockSetTargetDir.mockReturnValue('/tmp/workdir'); + mockGetPersistedState.mockReturnValue({ + _agentSettings: {}, + _taskState: 'submitted', + }); + (fse.pathExists as Mock).mockResolvedValue(true); + (fsPromises.readdir as Mock).mockResolvedValue(['file1.txt']); + mockTar.c.mockResolvedValue(undefined); + mockTar.x.mockResolvedValue(undefined); + mockFse.remove.mockResolvedValue(undefined); + mockFse.ensureDir.mockResolvedValue(undefined); + mockGzipSync.mockReturnValue(Buffer.from('compressed')); + mockGunzipSync.mockReturnValue(Buffer.from('{}')); + mockCreateReadStream.mockReturnValue({ on: vi.fn(), pipe: vi.fn() }); + }); + + describe('Constructor & Initialization', () => { + it('should initialize and check bucket existence', async () => { + const store = new GCSTaskStore(bucketName); + await store['ensureBucketInitialized'](); + expect(mockStorage).toHaveBeenCalledTimes(1); + expect(mockStorageInstance.getBuckets).toHaveBeenCalled(); + expect(logger.info).toHaveBeenCalledWith( + expect.stringContaining('Bucket test-bucket exists'), + ); + }); + + it('should create bucket if it does not exist', async () => { + mockStorageInstance.getBuckets.mockResolvedValue([[]]); + const store = new GCSTaskStore(bucketName); + await store['ensureBucketInitialized'](); + expect(mockStorageInstance.createBucket).toHaveBeenCalledWith(bucketName); + expect(logger.info).toHaveBeenCalledWith( + expect.stringContaining('Bucket test-bucket created successfully'), + ); + }); + + it('should throw if bucket creation fails', async () => { + mockStorageInstance.getBuckets.mockResolvedValue([[]]); + mockStorageInstance.createBucket.mockRejectedValue( + new Error('Create failed'), + ); + const store = new GCSTaskStore(bucketName); + await expect(store['ensureBucketInitialized']()).rejects.toThrow( + 'Failed to create GCS bucket test-bucket: Error: Create failed', + ); + }); + }); + + describe('save', () => { + const mockTask: SDKTask = { + id: 'task1', + contextId: 'ctx1', + kind: 'task', + status: { state: 'working' }, + metadata: {}, + }; + + it('should save metadata and workspace', async () => { + const store = new GCSTaskStore(bucketName); + await store.save(mockTask); + + expect(mockFile.save).toHaveBeenCalledTimes(1); + expect(mockTar.c).toHaveBeenCalledTimes(1); + expect(mockCreateReadStream).toHaveBeenCalledTimes(1); + expect(mockFse.remove).toHaveBeenCalledTimes(1); + expect(logger.info).toHaveBeenCalledWith( + expect.stringContaining('metadata saved to GCS'), + ); + expect(logger.info).toHaveBeenCalledWith( + expect.stringContaining('workspace saved to GCS'), + ); + }); + + it('should handle tar creation failure', async () => { + mockFse.pathExists.mockImplementation( + async (path) => + !path.toString().includes('task-task1-workspace-test-uuid.tar.gz'), + ); + const store = new GCSTaskStore(bucketName); + await expect(store.save(mockTask)).rejects.toThrow( + 'tar.c command failed to create', + ); + }); + }); + + describe('load', () => { + it('should load task metadata and workspace', async () => { + mockGunzipSync.mockReturnValue( + Buffer.from( + JSON.stringify({ + [METADATA_KEY]: { _agentSettings: {}, _taskState: 'submitted' }, + _contextId: 'ctx1', + }), + ), + ); + mockFile.download.mockResolvedValue([Buffer.from('compressed metadata')]); + mockFile.download.mockResolvedValueOnce([ + Buffer.from('compressed metadata'), + ]); + mockBucket.file = vi.fn((path) => { + const newMockFile = { ...mockFile }; + if (path.includes('metadata')) { + newMockFile.download = vi + .fn() + .mockResolvedValue([Buffer.from('compressed metadata')]); + newMockFile.exists = vi.fn().mockResolvedValue([true]); + } else { + newMockFile.download = vi + .fn() + .mockResolvedValue([Buffer.from('compressed workspace')]); + newMockFile.exists = vi.fn().mockResolvedValue([true]); + } + return newMockFile; + }); + + const store = new GCSTaskStore(bucketName); + const task = await store.load('task1'); + + expect(task).toBeDefined(); + expect(task?.id).toBe('task1'); + expect(mockBucket.file).toHaveBeenCalledWith( + 'tasks/task1/metadata.tar.gz', + ); + expect(mockBucket.file).toHaveBeenCalledWith( + 'tasks/task1/workspace.tar.gz', + ); + expect(mockTar.x).toHaveBeenCalledTimes(1); + expect(mockFse.remove).toHaveBeenCalledTimes(1); + }); + + it('should return undefined if metadata not found', async () => { + mockFile.exists.mockResolvedValue([false]); + const store = new GCSTaskStore(bucketName); + const task = await store.load('task1'); + expect(task).toBeUndefined(); + expect(mockBucket.file).toHaveBeenCalledWith( + 'tasks/task1/metadata.tar.gz', + ); + }); + + it('should load metadata even if workspace not found', async () => { + mockGunzipSync.mockReturnValue( + Buffer.from( + JSON.stringify({ + [METADATA_KEY]: { _agentSettings: {}, _taskState: 'submitted' }, + _contextId: 'ctx1', + }), + ), + ); + + mockBucket.file = vi.fn((path) => { + const newMockFile = { ...mockFile }; + if (path.includes('workspace.tar.gz')) { + newMockFile.exists = vi.fn().mockResolvedValue([false]); + } else { + newMockFile.exists = vi.fn().mockResolvedValue([true]); + newMockFile.download = vi + .fn() + .mockResolvedValue([Buffer.from('compressed metadata')]); + } + return newMockFile; + }); + + const store = new GCSTaskStore(bucketName); + const task = await store.load('task1'); + + expect(task).toBeDefined(); + expect(mockTar.x).not.toHaveBeenCalled(); + expect(logger.info).toHaveBeenCalledWith( + expect.stringContaining('workspace archive not found'), + ); + }); + }); +}); + +describe('NoOpTaskStore', () => { + let realStore: TaskStore; + let noOpStore: NoOpTaskStore; + + beforeEach(() => { + // Create a mock of the real store to delegate to + realStore = { + save: vi.fn(), + load: vi.fn().mockResolvedValue({ id: 'task-123' } as SDKTask), + }; + noOpStore = new NoOpTaskStore(realStore); + }); + + it("should not call the real store's save method", async () => { + const mockTask: SDKTask = { id: 'test-task' } as SDKTask; + await noOpStore.save(mockTask); + expect(realStore.save).not.toHaveBeenCalled(); + }); + + it('should delegate the load method to the real store', async () => { + const taskId = 'task-123'; + const result = await noOpStore.load(taskId); + expect(realStore.load).toHaveBeenCalledWith(taskId); + expect(result).toBeDefined(); + expect(result?.id).toBe(taskId); + }); +}); diff --git a/projects/gemini-cli/packages/a2a-server/src/gcs.ts b/projects/gemini-cli/packages/a2a-server/src/gcs.ts new file mode 100644 index 0000000000000000000000000000000000000000..8591d454629e7b175a5d396d85c35e140c2386f7 --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/gcs.ts @@ -0,0 +1,308 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { Storage } from '@google-cloud/storage'; +import { gzipSync, gunzipSync } from 'node:zlib'; +import * as tar from 'tar'; +import * as fse from 'fs-extra'; +import { promises as fsPromises, createReadStream } from 'node:fs'; +import { tmpdir } from 'node:os'; +import { join } from 'node:path'; +import type { Task as SDKTask } from '@a2a-js/sdk'; +import type { TaskStore } from '@a2a-js/sdk/server'; +import { logger } from './logger.js'; +import { setTargetDir } from './config.js'; +import { + getPersistedState, + type PersistedTaskMetadata, +} from './metadata_types.js'; +import { v4 as uuidv4 } from 'uuid'; + +type ObjectType = 'metadata' | 'workspace'; + +const getTmpArchiveFilename = (taskId: string): string => + `task-${taskId}-workspace-${uuidv4()}.tar.gz`; + +export class GCSTaskStore implements TaskStore { + private storage: Storage; + private bucketName: string; + private bucketInitialized: Promise; + + constructor(bucketName: string) { + if (!bucketName) { + throw new Error('GCS bucket name is required.'); + } + this.storage = new Storage(); + this.bucketName = bucketName; + logger.info(`GCSTaskStore initializing with bucket: ${this.bucketName}`); + // Prerequisites: user account or service account must have storage admin IAM role + // and the bucket name must be unique. + this.bucketInitialized = this.initializeBucket(); + } + + private async initializeBucket(): Promise { + try { + const [buckets] = await this.storage.getBuckets(); + const exists = buckets.some((bucket) => bucket.name === this.bucketName); + + if (!exists) { + logger.info( + `Bucket ${this.bucketName} does not exist in the list. Attempting to create...`, + ); + try { + await this.storage.createBucket(this.bucketName); + logger.info(`Bucket ${this.bucketName} created successfully.`); + } catch (createError) { + logger.info( + `Failed to create bucket ${this.bucketName}: ${createError}`, + ); + throw new Error( + `Failed to create GCS bucket ${this.bucketName}: ${createError}`, + ); + } + } else { + logger.info(`Bucket ${this.bucketName} exists.`); + } + } catch (error) { + logger.info( + `Error during bucket initialization for ${this.bucketName}: ${error}`, + ); + throw new Error( + `Failed to initialize GCS bucket ${this.bucketName}: ${error}`, + ); + } + } + + private async ensureBucketInitialized(): Promise { + await this.bucketInitialized; + } + + private getObjectPath(taskId: string, type: ObjectType): string { + return `tasks/${taskId}/${type}.tar.gz`; + } + + async save(task: SDKTask): Promise { + await this.ensureBucketInitialized(); + const taskId = task.id; + const persistedState = getPersistedState( + task.metadata as PersistedTaskMetadata, + ); + + if (!persistedState) { + throw new Error(`Task ${taskId} is missing persisted state in metadata.`); + } + const workDir = process.cwd(); + + const metadataObjectPath = this.getObjectPath(taskId, 'metadata'); + const workspaceObjectPath = this.getObjectPath(taskId, 'workspace'); + + const dataToStore = task.metadata; + + try { + const jsonString = JSON.stringify(dataToStore); + const compressedMetadata = gzipSync(Buffer.from(jsonString)); + const metadataFile = this.storage + .bucket(this.bucketName) + .file(metadataObjectPath); + await metadataFile.save(compressedMetadata, { + contentType: 'application/gzip', + }); + logger.info( + `Task ${taskId} metadata saved to GCS: gs://${this.bucketName}/${metadataObjectPath}`, + ); + + if (await fse.pathExists(workDir)) { + const entries = await fsPromises.readdir(workDir); + if (entries.length > 0) { + const tmpArchiveFile = join(tmpdir(), getTmpArchiveFilename(taskId)); + try { + await tar.c( + { + gzip: true, + file: tmpArchiveFile, + cwd: workDir, + portable: true, + }, + entries, + ); + + if (!(await fse.pathExists(tmpArchiveFile))) { + throw new Error( + `tar.c command failed to create ${tmpArchiveFile}`, + ); + } + + const workspaceFile = this.storage + .bucket(this.bucketName) + .file(workspaceObjectPath); + const sourceStream = createReadStream(tmpArchiveFile); + const destStream = workspaceFile.createWriteStream({ + contentType: 'application/gzip', + resumable: true, + }); + + await new Promise((resolve, reject) => { + sourceStream.on('error', (err) => { + logger.error( + `Error in source stream for ${tmpArchiveFile}:`, + err, + ); + // Attempt to close destStream if source fails + if (!destStream.destroyed) { + destStream.destroy(err); + } + reject(err); + }); + + destStream.on('error', (err) => { + logger.error( + `Error in GCS dest stream for ${workspaceObjectPath}:`, + err, + ); + reject(err); + }); + + destStream.on('finish', () => { + logger.info( + `GCS destStream finished for ${workspaceObjectPath}`, + ); + resolve(); + }); + + logger.info( + `Piping ${tmpArchiveFile} to GCS object ${workspaceObjectPath}`, + ); + sourceStream.pipe(destStream); + }); + logger.info( + `Task ${taskId} workspace saved to GCS: gs://${this.bucketName}/${workspaceObjectPath}`, + ); + } catch (error) { + logger.error( + `Error during workspace save process for ${taskId}:`, + error, + ); + throw error; + } finally { + logger.info(`Cleaning up temporary file: ${tmpArchiveFile}`); + try { + if (await fse.pathExists(tmpArchiveFile)) { + await fse.remove(tmpArchiveFile); + logger.info( + `Successfully removed temporary file: ${tmpArchiveFile}`, + ); + } else { + logger.warn( + `Temporary file not found for cleanup: ${tmpArchiveFile}`, + ); + } + } catch (removeError) { + logger.error( + `Error removing temporary file ${tmpArchiveFile}:`, + removeError, + ); + } + } + } else { + logger.info( + `Workspace directory ${workDir} is empty, skipping workspace save for task ${taskId}.`, + ); + } + } else { + logger.info( + `Workspace directory ${workDir} not found, skipping workspace save for task ${taskId}.`, + ); + } + } catch (error) { + logger.error(`Failed to save task ${taskId} to GCS:`, error); + throw error; + } + } + + async load(taskId: string): Promise { + await this.ensureBucketInitialized(); + const metadataObjectPath = this.getObjectPath(taskId, 'metadata'); + const workspaceObjectPath = this.getObjectPath(taskId, 'workspace'); + + try { + const metadataFile = this.storage + .bucket(this.bucketName) + .file(metadataObjectPath); + const [metadataExists] = await metadataFile.exists(); + if (!metadataExists) { + logger.info(`Task ${taskId} metadata not found in GCS.`); + return undefined; + } + const [compressedMetadata] = await metadataFile.download(); + const jsonData = gunzipSync(compressedMetadata).toString(); + const loadedMetadata = JSON.parse(jsonData); + logger.info(`Task ${taskId} metadata loaded from GCS.`); + + const persistedState = getPersistedState(loadedMetadata); + if (!persistedState) { + throw new Error( + `Loaded metadata for task ${taskId} is missing internal persisted state.`, + ); + } + const agentSettings = persistedState._agentSettings; + + const workDir = setTargetDir(agentSettings); + await fse.ensureDir(workDir); + const workspaceFile = this.storage + .bucket(this.bucketName) + .file(workspaceObjectPath); + const [workspaceExists] = await workspaceFile.exists(); + if (workspaceExists) { + const tmpArchiveFile = join(tmpdir(), getTmpArchiveFilename(taskId)); + try { + await workspaceFile.download({ destination: tmpArchiveFile }); + await tar.x({ file: tmpArchiveFile, cwd: workDir }); + logger.info( + `Task ${taskId} workspace restored from GCS to ${workDir}`, + ); + } finally { + if (await fse.pathExists(tmpArchiveFile)) { + await fse.remove(tmpArchiveFile); + } + } + } else { + logger.info(`Task ${taskId} workspace archive not found in GCS.`); + } + + return { + id: taskId, + contextId: loadedMetadata._contextId || uuidv4(), + kind: 'task', + status: { + state: persistedState._taskState, + timestamp: new Date().toISOString(), + }, + metadata: loadedMetadata, + history: [], + artifacts: [], + }; + } catch (error) { + logger.error(`Failed to load task ${taskId} from GCS:`, error); + throw error; + } + } +} + +export class NoOpTaskStore implements TaskStore { + constructor(private realStore: TaskStore) {} + + async save(task: SDKTask): Promise { + logger.info(`[NoOpTaskStore] save called for task ${task.id} - IGNORED`); + return Promise.resolve(); + } + + async load(taskId: string): Promise { + logger.info( + `[NoOpTaskStore] load called for task ${taskId}, delegating to real store.`, + ); + return this.realStore.load(taskId); + } +} diff --git a/projects/gemini-cli/packages/a2a-server/src/index.ts b/projects/gemini-cli/packages/a2a-server/src/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..2d0221fe82e00e4aadd9b7536379033fb1ff3e4e --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/index.ts @@ -0,0 +1,8 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +export * from './agent.js'; +export * from './types.js'; diff --git a/projects/gemini-cli/packages/a2a-server/src/logger.ts b/projects/gemini-cli/packages/a2a-server/src/logger.ts new file mode 100644 index 0000000000000000000000000000000000000000..8dca944b910ca4644c90cbec617803a5326aa416 --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/logger.ts @@ -0,0 +1,28 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import winston from 'winston'; + +const logger = winston.createLogger({ + level: 'info', + format: winston.format.combine( + // First, add a timestamp to the log info object + winston.format.timestamp({ + format: 'YYYY-MM-DD HH:mm:ss.SSS A', // Custom timestamp format + }), + // Here we define the custom output format + winston.format.printf((info) => { + const { level, timestamp, message, ...rest } = info; + return ( + `[${level.toUpperCase()}] ${timestamp} -- ${message}` + + `${Object.keys(rest).length > 0 ? `\n${JSON.stringify(rest, null, 2)}` : ''}` + ); // Only print ...rest if present + }), + ), + transports: [new winston.transports.Console()], +}); + +export { logger }; diff --git a/projects/gemini-cli/packages/a2a-server/src/metadata_types.ts b/projects/gemini-cli/packages/a2a-server/src/metadata_types.ts new file mode 100644 index 0000000000000000000000000000000000000000..4e3383826fe34830e81c3f07dea98f25d289589e --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/metadata_types.ts @@ -0,0 +1,33 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { AgentSettings } from './types.js'; +import type { TaskState } from '@a2a-js/sdk'; + +export interface PersistedStateMetadata { + _agentSettings: AgentSettings; + _taskState: TaskState; +} + +export type PersistedTaskMetadata = { [k: string]: unknown }; + +export const METADATA_KEY = '__persistedState'; + +export function getPersistedState( + metadata: PersistedTaskMetadata, +): PersistedStateMetadata | undefined { + return metadata?.[METADATA_KEY] as PersistedStateMetadata | undefined; +} + +export function setPersistedState( + metadata: PersistedTaskMetadata, + state: PersistedStateMetadata, +): PersistedTaskMetadata { + return { + ...metadata, + [METADATA_KEY]: state, + }; +} diff --git a/projects/gemini-cli/packages/a2a-server/src/server.ts b/projects/gemini-cli/packages/a2a-server/src/server.ts new file mode 100644 index 0000000000000000000000000000000000000000..34a406ebad6724bdfab53cf5538b6e3afc575ab9 --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/server.ts @@ -0,0 +1,33 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as url from 'node:url'; +import * as path from 'node:path'; + +import { logger } from './logger.js'; +import { main } from './agent.js'; + +// Check if the module is the main script being run. path.resolve() creates a +// canonical, absolute path, which avoids cross-platform issues. +const isMainModule = + path.resolve(process.argv[1]) === + path.resolve(url.fileURLToPath(import.meta.url)); + +process.on('uncaughtException', (error) => { + logger.error('Unhandled exception:', error); + process.exit(1); +}); + +if ( + import.meta.url.startsWith('file:') && + isMainModule && + process.env['NODE_ENV'] !== 'test' +) { + main().catch((error) => { + logger.error('[CoreAgent] Unhandled error in main:', error); + process.exit(1); + }); +} diff --git a/projects/gemini-cli/packages/a2a-server/src/settings.ts b/projects/gemini-cli/packages/a2a-server/src/settings.ts new file mode 100644 index 0000000000000000000000000000000000000000..dbe5129955c4469f224bbb3cdc7f0e6d26d86810 --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/settings.ts @@ -0,0 +1,154 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import { homedir } from 'node:os'; + +import type { MCPServerConfig } from '@google/gemini-cli-core'; +import { + getErrorMessage, + type TelemetrySettings, +} from '@google/gemini-cli-core'; +import stripJsonComments from 'strip-json-comments'; + +export const SETTINGS_DIRECTORY_NAME = '.gemini'; +export const USER_SETTINGS_DIR = path.join(homedir(), SETTINGS_DIRECTORY_NAME); +export const USER_SETTINGS_PATH = path.join(USER_SETTINGS_DIR, 'settings.json'); + +// Reconcile with https://github.com/google-gemini/gemini-cli/blob/b09bc6656080d4d12e1d06734aae2ec33af5c1ed/packages/cli/src/config/settings.ts#L53 +export interface Settings { + mcpServers?: Record; + coreTools?: string[]; + excludeTools?: string[]; + telemetry?: TelemetrySettings; + showMemoryUsage?: boolean; + checkpointing?: CheckpointingSettings; + + // Git-aware file filtering settings + fileFiltering?: { + respectGitIgnore?: boolean; + enableRecursiveFileSearch?: boolean; + }; +} + +export interface SettingsError { + message: string; + path: string; +} + +export interface CheckpointingSettings { + enabled?: boolean; +} + +/** + * Loads settings from user and workspace directories. + * Project settings override user settings. + * + * How is it different to gemini-cli/cli: Returns already merged settings rather + * than `LoadedSettings` (unnecessary since we are not modifying users + * settings.json). + */ +export function loadSettings(workspaceDir: string): Settings { + let userSettings: Settings = {}; + let workspaceSettings: Settings = {}; + const settingsErrors: SettingsError[] = []; + + // Load user settings + try { + if (fs.existsSync(USER_SETTINGS_PATH)) { + const userContent = fs.readFileSync(USER_SETTINGS_PATH, 'utf-8'); + const parsedUserSettings = JSON.parse( + stripJsonComments(userContent), + ) as Settings; + userSettings = resolveEnvVarsInObject(parsedUserSettings); + } + } catch (error: unknown) { + settingsErrors.push({ + message: getErrorMessage(error), + path: USER_SETTINGS_PATH, + }); + } + + const workspaceSettingsPath = path.join( + workspaceDir, + SETTINGS_DIRECTORY_NAME, + 'settings.json', + ); + + // Load workspace settings + try { + if (fs.existsSync(workspaceSettingsPath)) { + const projectContent = fs.readFileSync(workspaceSettingsPath, 'utf-8'); + const parsedWorkspaceSettings = JSON.parse( + stripJsonComments(projectContent), + ) as Settings; + workspaceSettings = resolveEnvVarsInObject(parsedWorkspaceSettings); + } + } catch (error: unknown) { + settingsErrors.push({ + message: getErrorMessage(error), + path: workspaceSettingsPath, + }); + } + + if (settingsErrors.length > 0) { + console.error('Errors loading settings:'); + for (const error of settingsErrors) { + console.error(` Path: ${error.path}`); + console.error(` Message: ${error.message}`); + } + } + + // If there are overlapping keys, the values of workspaceSettings will + // override values from userSettings + return { + ...userSettings, + ...workspaceSettings, + }; +} + +function resolveEnvVarsInString(value: string): string { + const envVarRegex = /\$(?:(\w+)|{([^}]+)})/g; // Find $VAR_NAME or ${VAR_NAME} + return value.replace(envVarRegex, (match, varName1, varName2) => { + const varName = varName1 || varName2; + if (process && process.env && typeof process.env[varName] === 'string') { + return process.env[varName]!; + } + return match; + }); +} + +function resolveEnvVarsInObject(obj: T): T { + if ( + obj === null || + obj === undefined || + typeof obj === 'boolean' || + typeof obj === 'number' + ) { + return obj; + } + + if (typeof obj === 'string') { + return resolveEnvVarsInString(obj) as unknown as T; + } + + if (Array.isArray(obj)) { + return obj.map((item) => resolveEnvVarsInObject(item)) as unknown as T; + } + + if (typeof obj === 'object') { + const newObj = { ...obj } as T; + for (const key in newObj) { + if (Object.prototype.hasOwnProperty.call(newObj, key)) { + newObj[key] = resolveEnvVarsInObject(newObj[key]); + } + } + return newObj; + } + + return obj; +} diff --git a/projects/gemini-cli/packages/a2a-server/src/task.test.ts b/projects/gemini-cli/packages/a2a-server/src/task.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..6c14392f8ca8e43eb1cd0503069e78752b0a5ea7 --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/task.test.ts @@ -0,0 +1,59 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi } from 'vitest'; +import { Task } from './task.js'; +import type { Config, ToolCallRequestInfo } from '@google/gemini-cli-core'; +import { createMockConfig } from './testing_utils.js'; +import type { ExecutionEventBus } from '@a2a-js/sdk/server'; + +describe('Task', () => { + it('scheduleToolCalls should not modify the input requests array', async () => { + const mockConfig = createMockConfig(); + + const mockEventBus: ExecutionEventBus = { + publish: vi.fn(), + on: vi.fn(), + off: vi.fn(), + once: vi.fn(), + removeAllListeners: vi.fn(), + finished: vi.fn(), + }; + + // The Task constructor is private. We'll bypass it for this unit test. + // @ts-expect-error - Calling private constructor for test purposes. + const task = new Task( + 'task-id', + 'context-id', + mockConfig as Config, + mockEventBus, + ); + + task['setTaskStateAndPublishUpdate'] = vi.fn(); + task['getProposedContent'] = vi.fn().mockResolvedValue('new content'); + + const requests: ToolCallRequestInfo[] = [ + { + callId: '1', + name: 'replace', + args: { + file_path: 'test.txt', + old_string: 'old', + new_string: 'new', + }, + isClientInitiated: false, + prompt_id: 'prompt-id-1', + }, + ]; + + const originalRequests = JSON.parse(JSON.stringify(requests)); + const abortController = new AbortController(); + + await task.scheduleToolCalls(requests, abortController.signal); + + expect(requests).toEqual(originalRequests); + }); +}); diff --git a/projects/gemini-cli/packages/a2a-server/src/task.ts b/projects/gemini-cli/packages/a2a-server/src/task.ts new file mode 100644 index 0000000000000000000000000000000000000000..dbdbbf495b2b5640173523b597e9477bea27808b --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/task.ts @@ -0,0 +1,936 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + CoreToolScheduler, + GeminiClient, + GeminiEventType, + ToolConfirmationOutcome, + ApprovalMode, + getAllMCPServerStatuses, + MCPServerStatus, + isNodeError, + parseAndFormatApiError, +} from '@google/gemini-cli-core'; +import type { + ToolConfirmationPayload, + CompletedToolCall, + ToolCall, + ToolCallRequestInfo, + ServerGeminiErrorEvent, + ServerGeminiStreamEvent, + ToolCallConfirmationDetails, + Config, + UserTierId, +} from '@google/gemini-cli-core'; +import type { RequestContext } from '@a2a-js/sdk/server'; +import { type ExecutionEventBus } from '@a2a-js/sdk/server'; +import type { + TaskStatusUpdateEvent, + TaskArtifactUpdateEvent, + TaskState, + Message, + Part, + Artifact, +} from '@a2a-js/sdk'; +import { v4 as uuidv4 } from 'uuid'; +import { logger } from './logger.js'; +import * as fs from 'node:fs'; + +import { CoderAgentEvent } from './types.js'; +import type { + CoderAgentMessage, + StateChange, + ToolCallUpdate, + TextContent, + TaskMetadata, + Thought, + ThoughtSummary, +} from './types.js'; +import type { PartUnion, Part as genAiPart } from '@google/genai'; + +export class Task { + id: string; + contextId: string; + scheduler: CoreToolScheduler; + config: Config; + geminiClient: GeminiClient; + pendingToolConfirmationDetails: Map; + taskState: TaskState; + eventBus?: ExecutionEventBus; + completedToolCalls: CompletedToolCall[]; + skipFinalTrueAfterInlineEdit = false; + + // For tool waiting logic + private pendingToolCalls: Map = new Map(); //toolCallId --> status + private toolCompletionPromise?: Promise; + private toolCompletionNotifier?: { + resolve: () => void; + reject: (reason?: Error) => void; + }; + + private constructor( + id: string, + contextId: string, + config: Config, + eventBus?: ExecutionEventBus, + ) { + this.id = id; + this.contextId = contextId; + this.config = config; + this.scheduler = this.createScheduler(); + this.geminiClient = new GeminiClient(this.config); + this.pendingToolConfirmationDetails = new Map(); + this.taskState = 'submitted'; + this.eventBus = eventBus; + this.completedToolCalls = []; + this._resetToolCompletionPromise(); + this.config.setFlashFallbackHandler( + async (currentModel: string, fallbackModel: string): Promise => { + config.setModel(fallbackModel); // gemini-cli-core sets to DEFAULT_GEMINI_FLASH_MODEL + // Switch model for future use but return false to stop current retry + return false; + }, + ); + } + + static async create( + id: string, + contextId: string, + config: Config, + eventBus?: ExecutionEventBus, + ): Promise { + return new Task(id, contextId, config, eventBus); + } + + // Note: `getAllMCPServerStatuses` retrieves the status of all MCP servers for the entire + // process. This is not scoped to the individual task but reflects the global connection + // state managed within the @gemini-cli/core module. + async getMetadata(): Promise { + const toolRegistry = await this.config.getToolRegistry(); + const mcpServers = this.config.getMcpServers() || {}; + const serverStatuses = getAllMCPServerStatuses(); + const servers = Object.keys(mcpServers).map((serverName) => ({ + name: serverName, + status: serverStatuses.get(serverName) || MCPServerStatus.DISCONNECTED, + tools: toolRegistry.getToolsByServer(serverName).map((tool) => ({ + name: tool.name, + description: tool.description, + parameterSchema: tool.schema.parameters, + })), + })); + + const availableTools = toolRegistry.getAllTools().map((tool) => ({ + name: tool.name, + description: tool.description, + parameterSchema: tool.schema.parameters, + })); + + const metadata: TaskMetadata = { + id: this.id, + contextId: this.contextId, + taskState: this.taskState, + model: this.config.getContentGeneratorConfig().model, + mcpServers: servers, + availableTools, + }; + return metadata; + } + + private _resetToolCompletionPromise(): void { + this.toolCompletionPromise = new Promise((resolve, reject) => { + this.toolCompletionNotifier = { resolve, reject }; + }); + // If there are no pending calls when reset, resolve immediately. + if (this.pendingToolCalls.size === 0 && this.toolCompletionNotifier) { + this.toolCompletionNotifier.resolve(); + } + } + + private _registerToolCall(toolCallId: string, status: string): void { + const wasEmpty = this.pendingToolCalls.size === 0; + this.pendingToolCalls.set(toolCallId, status); + if (wasEmpty) { + this._resetToolCompletionPromise(); + } + logger.info( + `[Task] Registered tool call: ${toolCallId}. Pending: ${this.pendingToolCalls.size}`, + ); + } + + private _resolveToolCall(toolCallId: string): void { + if (this.pendingToolCalls.has(toolCallId)) { + this.pendingToolCalls.delete(toolCallId); + logger.info( + `[Task] Resolved tool call: ${toolCallId}. Pending: ${this.pendingToolCalls.size}`, + ); + if (this.pendingToolCalls.size === 0 && this.toolCompletionNotifier) { + this.toolCompletionNotifier.resolve(); + } + } + } + + async waitForPendingTools(): Promise { + if (this.pendingToolCalls.size === 0) { + return Promise.resolve(); + } + logger.info( + `[Task] Waiting for ${this.pendingToolCalls.size} pending tool(s)...`, + ); + return this.toolCompletionPromise; + } + + cancelPendingTools(reason: string): void { + if (this.pendingToolCalls.size > 0) { + logger.info( + `[Task] Cancelling all ${this.pendingToolCalls.size} pending tool calls. Reason: ${reason}`, + ); + } + if (this.toolCompletionNotifier) { + this.toolCompletionNotifier.reject(new Error(reason)); + } + this.pendingToolCalls.clear(); + // Reset the promise for any future operations, ensuring it's in a clean state. + this._resetToolCompletionPromise(); + } + + private _createTextMessage( + text: string, + role: 'agent' | 'user' = 'agent', + ): Message { + return { + kind: 'message', + role, + parts: [{ kind: 'text', text }], + messageId: uuidv4(), + taskId: this.id, + contextId: this.contextId, + }; + } + + private _createStatusUpdateEvent( + stateToReport: TaskState, + coderAgentMessage: CoderAgentMessage, + message?: Message, + final = false, + timestamp?: string, + metadataError?: string, + ): TaskStatusUpdateEvent { + const metadata: { + coderAgent: CoderAgentMessage; + model: string; + userTier?: UserTierId; + error?: string; + } = { + coderAgent: coderAgentMessage, + model: this.config.getModel(), + userTier: this.geminiClient.getUserTier(), + }; + + if (metadataError) { + metadata.error = metadataError; + } + + return { + kind: 'status-update', + taskId: this.id, + contextId: this.contextId, + status: { + state: stateToReport, + message, // Shorthand property + timestamp: timestamp || new Date().toISOString(), + }, + final, + metadata, + }; + } + + setTaskStateAndPublishUpdate( + newState: TaskState, + coderAgentMessage: CoderAgentMessage, + messageText?: string, + messageParts?: Part[], // For more complex messages + final = false, + metadataError?: string, + ): void { + this.taskState = newState; + let message: Message | undefined; + + if (messageText) { + message = this._createTextMessage(messageText); + } else if (messageParts) { + message = { + kind: 'message', + role: 'agent', + parts: messageParts, + messageId: uuidv4(), + taskId: this.id, + contextId: this.contextId, + }; + } + + const event = this._createStatusUpdateEvent( + this.taskState, + coderAgentMessage, + message, + final, + undefined, + metadataError, + ); + this.eventBus?.publish(event); + } + + private _schedulerOutputUpdate( + toolCallId: string, + outputChunk: string, + ): void { + logger.info( + '[Task] Scheduler output update for tool call ' + + toolCallId + + ': ' + + outputChunk, + ); + const artifact: Artifact = { + artifactId: `tool-${toolCallId}-output`, + parts: [ + { + kind: 'text', + text: outputChunk, + } as Part, + ], + }; + const artifactEvent: TaskArtifactUpdateEvent = { + kind: 'artifact-update', + taskId: this.id, + contextId: this.contextId, + artifact, + append: true, + lastChunk: false, + }; + this.eventBus?.publish(artifactEvent); + } + + private async _schedulerAllToolCallsComplete( + completedToolCalls: CompletedToolCall[], + ): Promise { + logger.info( + '[Task] All tool calls completed by scheduler (batch):', + completedToolCalls.map((tc) => tc.request.callId), + ); + this.completedToolCalls.push(...completedToolCalls); + completedToolCalls.forEach((tc) => { + this._resolveToolCall(tc.request.callId); + }); + } + + private _schedulerToolCallsUpdate(toolCalls: ToolCall[]): void { + logger.info( + '[Task] Scheduler tool calls updated:', + toolCalls.map((tc) => `${tc.request.callId} (${tc.status})`), + ); + + // Update state and send continuous, non-final updates + toolCalls.forEach((tc) => { + const previousStatus = this.pendingToolCalls.get(tc.request.callId); + const hasChanged = previousStatus !== tc.status; + + // Resolve tool call if it has reached a terminal state + if (['success', 'error', 'cancelled'].includes(tc.status)) { + this._resolveToolCall(tc.request.callId); + } else { + // This will update the map + this._registerToolCall(tc.request.callId, tc.status); + } + + if (tc.status === 'awaiting_approval' && tc.confirmationDetails) { + this.pendingToolConfirmationDetails.set( + tc.request.callId, + tc.confirmationDetails, + ); + } + + // Only send an update if the status has actually changed. + if (hasChanged) { + const message = this.toolStatusMessage(tc, this.id, this.contextId); + const coderAgentMessage: CoderAgentMessage = + tc.status === 'awaiting_approval' + ? { kind: CoderAgentEvent.ToolCallConfirmationEvent } + : { kind: CoderAgentEvent.ToolCallUpdateEvent }; + + const event = this._createStatusUpdateEvent( + this.taskState, + coderAgentMessage, + message, + false, // Always false for these continuous updates + ); + this.eventBus?.publish(event); + } + }); + + if (this.config.getApprovalMode() === ApprovalMode.YOLO) { + logger.info('[Task] YOLO mode enabled. Auto-approving all tool calls.'); + toolCalls.forEach((tc: ToolCall) => { + if (tc.status === 'awaiting_approval' && tc.confirmationDetails) { + tc.confirmationDetails.onConfirm(ToolConfirmationOutcome.ProceedOnce); + this.pendingToolConfirmationDetails.delete(tc.request.callId); + } + }); + return; + } + + const allPendingStatuses = Array.from(this.pendingToolCalls.values()); + const isAwaitingApproval = allPendingStatuses.some( + (status) => status === 'awaiting_approval', + ); + const allPendingAreStable = allPendingStatuses.every( + (status) => + status === 'awaiting_approval' || + status === 'success' || + status === 'error' || + status === 'cancelled', + ); + + // 1. Are any pending tool calls awaiting_approval + // 2. Are all pending tool calls in a stable state (i.e. not in validing or executing) + // 3. After an inline edit, the edited tool call will send awaiting_approval THEN scheduled. We wait for the next update in this case. + if ( + isAwaitingApproval && + allPendingAreStable && + !this.skipFinalTrueAfterInlineEdit + ) { + this.skipFinalTrueAfterInlineEdit = false; + + // We don't need to send another message, just a final status update. + this.setTaskStateAndPublishUpdate( + 'input-required', + { kind: CoderAgentEvent.StateChangeEvent }, + undefined, + undefined, + /*final*/ true, + ); + } + } + + private createScheduler(): CoreToolScheduler { + const scheduler = new CoreToolScheduler({ + outputUpdateHandler: this._schedulerOutputUpdate.bind(this), + onAllToolCallsComplete: this._schedulerAllToolCallsComplete.bind(this), + onToolCallsUpdate: this._schedulerToolCallsUpdate.bind(this), + getPreferredEditor: () => 'vscode', + config: this.config, + onEditorClose: () => {}, + }); + return scheduler; + } + + private toolStatusMessage( + tc: ToolCall, + taskId: string, + contextId: string, + ): Message { + const messageParts: Part[] = []; + + // Create a serializable version of the ToolCall (pick necesssary + // properties/avoic methods causing circular reference errors) + const serializableToolCall: { [key: string]: unknown } = { + request: tc.request, + status: tc.status, + }; + + // For WaitingToolCall type + if ('confirmationDetails' in tc) { + serializableToolCall['confirmationDetails'] = tc.confirmationDetails; + } + + if (tc.tool) { + serializableToolCall['tool'] = { + name: tc.tool.name, + displayName: tc.tool.displayName, + description: tc.tool.description, + kind: tc.tool.kind, + isOutputMarkdown: tc.tool.isOutputMarkdown, + canUpdateOutput: tc.tool.canUpdateOutput, + schema: tc.tool.schema, + parameterSchema: tc.tool.parameterSchema, + }; + } + + messageParts.push({ + kind: 'data', + data: serializableToolCall as ToolCall, + } as Part); + + return { + kind: 'message', + role: 'agent', + parts: messageParts, + messageId: uuidv4(), + taskId, + contextId, + }; + } + + private async getProposedContent( + file_path: string, + old_string: string, + new_string: string, + ): Promise { + try { + const currentContent = fs.readFileSync(file_path, 'utf8'); + return this._applyReplacement( + currentContent, + old_string, + new_string, + old_string === '' && currentContent === '', + ); + } catch (err) { + if (!isNodeError(err) || err.code !== 'ENOENT') throw err; + return ''; + } + } + + private _applyReplacement( + currentContent: string | null, + oldString: string, + newString: string, + isNewFile: boolean, + ): string { + if (isNewFile) { + return newString; + } + if (currentContent === null) { + // Should not happen if not a new file, but defensively return empty or newString if oldString is also empty + return oldString === '' ? newString : ''; + } + // If oldString is empty and it's not a new file, do not modify the content. + if (oldString === '' && !isNewFile) { + return currentContent; + } + return currentContent.replaceAll(oldString, newString); + } + + async scheduleToolCalls( + requests: ToolCallRequestInfo[], + abortSignal: AbortSignal, + ): Promise { + if (requests.length === 0) { + return; + } + + const updatedRequests = await Promise.all( + requests.map(async (request) => { + if ( + request.name === 'replace' && + request.args && + !request.args['newContent'] && + request.args['file_path'] && + request.args['old_string'] && + request.args['new_string'] + ) { + const newContent = await this.getProposedContent( + request.args['file_path'] as string, + request.args['old_string'] as string, + request.args['new_string'] as string, + ); + return { ...request, args: { ...request.args, newContent } }; + } + return request; + }), + ); + + logger.info( + `[Task] Scheduling batch of ${updatedRequests.length} tool calls.`, + ); + const stateChange: StateChange = { + kind: CoderAgentEvent.StateChangeEvent, + }; + this.setTaskStateAndPublishUpdate('working', stateChange); + + await this.scheduler.schedule(updatedRequests, abortSignal); + } + + async acceptAgentMessage(event: ServerGeminiStreamEvent): Promise { + const stateChange: StateChange = { + kind: CoderAgentEvent.StateChangeEvent, + }; + switch (event.type) { + case GeminiEventType.Content: + logger.info('[Task] Sending agent message content...'); + this._sendTextContent(event.value); + break; + case GeminiEventType.ToolCallRequest: + // This is now handled by the agent loop, which collects all requests + // and calls scheduleToolCalls once. + logger.warn( + '[Task] A single tool call request was passed to acceptAgentMessage. This should be handled in a batch by the agent. Ignoring.', + ); + break; + case GeminiEventType.ToolCallResponse: + // This event type from ServerGeminiStreamEvent might be for when LLM *generates* a tool response part. + // The actual execution result comes via user message. + logger.info( + '[Task] Received tool call response from LLM (part of generation):', + event.value, + ); + break; + case GeminiEventType.ToolCallConfirmation: + // This is when LLM requests confirmation, not when user provides it. + logger.info( + '[Task] Received tool call confirmation request from LLM:', + event.value.request.callId, + ); + this.pendingToolConfirmationDetails.set( + event.value.request.callId, + event.value.details, + ); + // This will be handled by the scheduler and _schedulerToolCallsUpdate will set InputRequired if needed. + // No direct state change here, scheduler drives it. + break; + case GeminiEventType.UserCancelled: + logger.info('[Task] Received user cancelled event from LLM stream.'); + this.cancelPendingTools('User cancelled via LLM stream event'); + this.setTaskStateAndPublishUpdate( + 'input-required', + stateChange, + 'Task cancelled by user', + undefined, + true, + ); + break; + case GeminiEventType.Thought: + logger.info('[Task] Sending agent thought...'); + this._sendThought(event.value); + break; + case GeminiEventType.ChatCompressed: + break; + case GeminiEventType.Finished: + logger.info(`[Task ${this.id}] Agent finished its turn.`); + break; + case GeminiEventType.Error: + default: { + // Block scope for lexical declaration + const errorEvent = event as ServerGeminiErrorEvent; // Type assertion + const errorMessage = + errorEvent.value?.error.message ?? 'Unknown error from LLM stream'; + logger.error( + '[Task] Received error event from LLM stream:', + errorMessage, + ); + + let errMessage = 'Unknown error from LLM stream'; + if (errorEvent.value) { + errMessage = parseAndFormatApiError(errorEvent.value); + } + this.cancelPendingTools(`LLM stream error: ${errorMessage}`); + this.setTaskStateAndPublishUpdate( + this.taskState, + stateChange, + `Agent Error, unknown agent message: ${errorMessage}`, + undefined, + false, + errMessage, + ); + break; + } + } + } + + private async _handleToolConfirmationPart(part: Part): Promise { + if ( + part.kind !== 'data' || + !part.data || + typeof part.data['callId'] !== 'string' || + typeof part.data['outcome'] !== 'string' + ) { + return false; + } + + const callId = part.data['callId'] as string; + const outcomeString = part.data['outcome'] as string; + let confirmationOutcome: ToolConfirmationOutcome | undefined; + + if (outcomeString === 'proceed_once') { + confirmationOutcome = ToolConfirmationOutcome.ProceedOnce; + } else if (outcomeString === 'cancel') { + confirmationOutcome = ToolConfirmationOutcome.Cancel; + } else if (outcomeString === 'proceed_always') { + confirmationOutcome = ToolConfirmationOutcome.ProceedAlways; + } else if (outcomeString === 'proceed_always_server') { + confirmationOutcome = ToolConfirmationOutcome.ProceedAlwaysServer; + } else if (outcomeString === 'proceed_always_tool') { + confirmationOutcome = ToolConfirmationOutcome.ProceedAlwaysTool; + } else if (outcomeString === 'modify_with_editor') { + confirmationOutcome = ToolConfirmationOutcome.ModifyWithEditor; + } else { + logger.warn( + `[Task] Unknown tool confirmation outcome: "${outcomeString}" for callId: ${callId}`, + ); + return false; + } + + const confirmationDetails = this.pendingToolConfirmationDetails.get(callId); + + if (!confirmationDetails) { + logger.warn( + `[Task] Received tool confirmation for unknown or already processed callId: ${callId}`, + ); + return false; + } + + logger.info( + `[Task] Handling tool confirmation for callId: ${callId} with outcome: ${outcomeString}`, + ); + try { + // Temporarily unset GCP environment variables so they do not leak into + // tool calls. + const gcpProject = process.env['GOOGLE_CLOUD_PROJECT']; + const gcpCreds = process.env['GOOGLE_APPLICATION_CREDENTIALS']; + try { + delete process.env['GOOGLE_CLOUD_PROJECT']; + delete process.env['GOOGLE_APPLICATION_CREDENTIALS']; + + // This will trigger the scheduler to continue or cancel the specific tool. + // The scheduler's onToolCallsUpdate will then reflect the new state (e.g., executing or cancelled). + + // If `edit` tool call, pass updated payload if presesent + if (confirmationDetails.type === 'edit') { + const payload = part.data['newContent'] + ? ({ + newContent: part.data['newContent'] as string, + } as ToolConfirmationPayload) + : undefined; + this.skipFinalTrueAfterInlineEdit = !!payload; + await confirmationDetails.onConfirm(confirmationOutcome, payload); + } else { + await confirmationDetails.onConfirm(confirmationOutcome); + } + } finally { + if (gcpProject) { + process.env['GOOGLE_CLOUD_PROJECT'] = gcpProject; + } + if (gcpCreds) { + process.env['GOOGLE_APPLICATION_CREDENTIALS'] = gcpCreds; + } + } + + // Do not delete if modifying, a subsequent tool confirmation for the same + // callId will be passed with ProceedOnce/Cancel/etc + // Note !== ToolConfirmationOutcome.ModifyWithEditor does not work! + if (confirmationOutcome !== 'modify_with_editor') { + this.pendingToolConfirmationDetails.delete(callId); + } + + // If outcome is Cancel, scheduler should update status to 'cancelled', which then resolves the tool. + // If ProceedOnce, scheduler updates to 'executing', then eventually 'success'/'error', which resolves. + return true; + } catch (error) { + logger.error( + `[Task] Error during tool confirmation for callId ${callId}:`, + error, + ); + // If confirming fails, we should probably mark this tool as failed + this._resolveToolCall(callId); // Resolve it as it won't proceed. + const errorMessageText = + error instanceof Error + ? error.message + : `Error processing tool confirmation for ${callId}`; + const message = this._createTextMessage(errorMessageText); + const toolCallUpdate: ToolCallUpdate = { + kind: CoderAgentEvent.ToolCallUpdateEvent, + }; + const event = this._createStatusUpdateEvent( + this.taskState, + toolCallUpdate, + message, + false, + ); + this.eventBus?.publish(event); + return false; + } + } + + getAndClearCompletedTools(): CompletedToolCall[] { + const tools = [...this.completedToolCalls]; + this.completedToolCalls = []; + return tools; + } + + addToolResponsesToHistory(completedTools: CompletedToolCall[]): void { + logger.info( + `[Task] Adding ${completedTools.length} tool responses to history without generating a new response.`, + ); + const responsesToAdd = completedTools.flatMap( + (toolCall) => toolCall.response.responseParts, + ); + + for (const response of responsesToAdd) { + let parts: genAiPart[]; + if (Array.isArray(response)) { + parts = response; + } else if (typeof response === 'string') { + parts = [{ text: response }]; + } else { + parts = [response]; + } + this.geminiClient.addHistory({ + role: 'user', + parts, + }); + } + } + + async *sendCompletedToolsToLlm( + completedToolCalls: CompletedToolCall[], + aborted: AbortSignal, + ): AsyncGenerator { + if (completedToolCalls.length === 0) { + yield* (async function* () {})(); // Yield nothing + return; + } + + const llmParts: PartUnion[] = []; + logger.info( + `[Task] Feeding ${completedToolCalls.length} tool responses to LLM.`, + ); + for (const completedToolCall of completedToolCalls) { + logger.info( + `[Task] Adding tool response for "${completedToolCall.request.name}" (callId: ${completedToolCall.request.callId}) to LLM input.`, + ); + const responseParts = completedToolCall.response.responseParts; + if (Array.isArray(responseParts)) { + llmParts.push(...responseParts); + } else { + llmParts.push(responseParts); + } + } + + logger.info('[Task] Sending new parts to agent.'); + const stateChange: StateChange = { + kind: CoderAgentEvent.StateChangeEvent, + }; + // Set task state to working as we are about to call LLM + this.setTaskStateAndPublishUpdate('working', stateChange); + // TODO: Determine what it mean to have, then add a prompt ID. + yield* this.geminiClient.sendMessageStream( + llmParts, + aborted, + /*prompt_id*/ '', + ); + } + + async *acceptUserMessage( + requestContext: RequestContext, + aborted: AbortSignal, + ): AsyncGenerator { + const userMessage = requestContext.userMessage; + const llmParts: PartUnion[] = []; + let anyConfirmationHandled = false; + let hasContentForLlm = false; + + for (const part of userMessage.parts) { + const confirmationHandled = await this._handleToolConfirmationPart(part); + if (confirmationHandled) { + anyConfirmationHandled = true; + // If a confirmation was handled, the scheduler will now run the tool (or cancel it). + // We don't send anything to the LLM for this part. + // The subsequent tool execution will eventually lead to resolveToolCall. + continue; + } + + if (part.kind === 'text') { + llmParts.push({ text: part.text }); + hasContentForLlm = true; + } + } + + if (hasContentForLlm) { + logger.info('[Task] Sending new parts to LLM.'); + const stateChange: StateChange = { + kind: CoderAgentEvent.StateChangeEvent, + }; + // Set task state to working as we are about to call LLM + this.setTaskStateAndPublishUpdate('working', stateChange); + // TODO: Determine what it mean to have, then add a prompt ID. + yield* this.geminiClient.sendMessageStream( + llmParts, + aborted, + /*prompt_id*/ '', + ); + } else if (anyConfirmationHandled) { + logger.info( + '[Task] User message only contained tool confirmations. Scheduler is active. No new input for LLM this turn.', + ); + // Ensure task state reflects that scheduler might be working due to confirmation. + // If scheduler is active, it will emit its own status updates. + // If all pending tools were just confirmed, waitForPendingTools will handle the wait. + // If some tools are still pending approval, scheduler would have set InputRequired. + // If not, and no new text, we are just waiting. + if ( + this.pendingToolCalls.size > 0 && + this.taskState !== 'input-required' + ) { + const stateChange: StateChange = { + kind: CoderAgentEvent.StateChangeEvent, + }; + this.setTaskStateAndPublishUpdate('working', stateChange); // Reflect potential background activity + } + yield* (async function* () {})(); // Yield nothing + } else { + logger.info( + '[Task] No relevant parts in user message for LLM interaction or tool confirmation.', + ); + // If there's no new text and no confirmations, and no pending tools, + // it implies we might need to signal input required if nothing else is happening. + // However, the agent.ts will make this determination after waitForPendingTools. + yield* (async function* () {})(); // Yield nothing + } + } + + _sendTextContent(content: string): void { + if (content === '') { + return; + } + logger.info('[Task] Sending text content to event bus.'); + const message = this._createTextMessage(content); + const textContent: TextContent = { + kind: CoderAgentEvent.TextContentEvent, + }; + this.eventBus?.publish( + this._createStatusUpdateEvent( + this.taskState, + textContent, + message, + false, + ), + ); + } + + _sendThought(content: ThoughtSummary): void { + if (!content.subject && !content.description) { + return; + } + logger.info('[Task] Sending thought to event bus.'); + const message: Message = { + kind: 'message', + role: 'agent', + parts: [ + { + kind: 'data', + data: content, + } as Part, + ], + messageId: uuidv4(), + taskId: this.id, + contextId: this.contextId, + }; + const thought: Thought = { + kind: CoderAgentEvent.ThoughtEvent, + }; + this.eventBus?.publish( + this._createStatusUpdateEvent(this.taskState, thought, message, false), + ); + } +} diff --git a/projects/gemini-cli/packages/a2a-server/src/testing_utils.ts b/projects/gemini-cli/packages/a2a-server/src/testing_utils.ts new file mode 100644 index 0000000000000000000000000000000000000000..07cfe783f1bed4cb70fdd61f7068a3f499ad9ae9 --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/testing_utils.ts @@ -0,0 +1,131 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { + Task as SDKTask, + TaskStatusUpdateEvent, + SendStreamingMessageSuccessResponse, +} from '@a2a-js/sdk'; +import { ApprovalMode } from '@google/gemini-cli-core'; +import type { Config } from '@google/gemini-cli-core'; +import { expect, vi } from 'vitest'; + +export function createMockConfig( + overrides: Partial = {}, +): Partial { + const mockConfig = { + getToolRegistry: vi.fn().mockReturnValue({ + getTool: vi.fn(), + getAllToolNames: vi.fn().mockReturnValue([]), + }), + getApprovalMode: vi.fn().mockReturnValue(ApprovalMode.DEFAULT), + getIdeMode: vi.fn().mockReturnValue(false), + getAllowedTools: vi.fn().mockReturnValue([]), + getIdeClient: vi.fn(), + getWorkspaceContext: vi.fn().mockReturnValue({ + isPathWithinWorkspace: () => true, + }), + getTargetDir: () => '/test', + getGeminiClient: vi.fn(), + getDebugMode: vi.fn().mockReturnValue(false), + getContentGeneratorConfig: vi.fn().mockReturnValue({ model: 'gemini-pro' }), + getModel: vi.fn().mockReturnValue('gemini-pro'), + getUsageStatisticsEnabled: vi.fn().mockReturnValue(false), + setFlashFallbackHandler: vi.fn(), + initialize: vi.fn().mockResolvedValue(undefined), + getProxy: vi.fn().mockReturnValue(undefined), + getHistory: vi.fn().mockReturnValue([]), + getEmbeddingModel: vi.fn().mockReturnValue('text-embedding-004'), + getSessionId: vi.fn().mockReturnValue('test-session-id'), + ...overrides, + }; + return mockConfig; +} + +export function createStreamMessageRequest( + text: string, + messageId: string, + taskId?: string, +) { + const request: { + jsonrpc: string; + id: string; + method: string; + params: { + message: { + kind: string; + role: string; + parts: [{ kind: string; text: string }]; + messageId: string; + }; + metadata: { + coderAgent: { + kind: string; + workspacePath: string; + }; + }; + taskId?: string; + }; + } = { + jsonrpc: '2.0', + id: '1', + method: 'message/stream', + params: { + message: { + kind: 'message', + role: 'user', + parts: [{ kind: 'text', text }], + messageId, + }, + metadata: { + coderAgent: { + kind: 'agent-settings', + workspacePath: '/tmp', + }, + }, + }, + }; + + if (taskId) { + request.params.taskId = taskId; + } + + return request; +} + +export function assertUniqueFinalEventIsLast( + events: SendStreamingMessageSuccessResponse[], +) { + // Final event is input-required & final + const finalEvent = events[events.length - 1].result as TaskStatusUpdateEvent; + expect(finalEvent.metadata?.['coderAgent']).toMatchObject({ + kind: 'state-change', + }); + expect(finalEvent.status?.state).toBe('input-required'); + expect(finalEvent.final).toBe(true); + + // There is only one event with final and its the last + expect( + events.filter((e) => (e.result as TaskStatusUpdateEvent).final).length, + ).toBe(1); + expect( + events.findIndex((e) => (e.result as TaskStatusUpdateEvent).final), + ).toBe(events.length - 1); +} + +export function assertTaskCreationAndWorkingStatus( + events: SendStreamingMessageSuccessResponse[], +) { + // Initial task creation event + const taskEvent = events[0].result as SDKTask; + expect(taskEvent.kind).toBe('task'); + expect(taskEvent.status.state).toBe('submitted'); + + // Status update: working + const workingEvent = events[1].result as TaskStatusUpdateEvent; + expect(workingEvent.kind).toBe('status-update'); + expect(workingEvent.status.state).toBe('working'); +} diff --git a/projects/gemini-cli/packages/a2a-server/src/types.ts b/projects/gemini-cli/packages/a2a-server/src/types.ts new file mode 100644 index 0000000000000000000000000000000000000000..5a82059b473fae98b6012ea2c014f1967860c8be --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/src/types.ts @@ -0,0 +1,104 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { + MCPServerStatus, + ToolConfirmationOutcome, +} from '@google/gemini-cli-core'; +import type { TaskState } from '@a2a-js/sdk'; + +// Interfaces and enums for the CoderAgent protocol. + +export enum CoderAgentEvent { + /** + * An event requesting one or more tool call confirmations. + */ + ToolCallConfirmationEvent = 'tool-call-confirmation', + /** + * An event updating on the status of one or more tool calls. + */ + ToolCallUpdateEvent = 'tool-call-update', + /** + * An event providing text updates on the task. + */ + TextContentEvent = 'text-content', + /** + * An event that indicates a change in the task's execution state. + */ + StateChangeEvent = 'state-change', + /** + * An user-sent event to initiate the agent. + */ + StateAgentSettingsEvent = 'agent-settings', + /** + * An event that contains a thought from the agent. + */ + ThoughtEvent = 'thought', +} + +export interface AgentSettings { + kind: CoderAgentEvent.StateAgentSettingsEvent; + workspacePath: string; +} + +export interface ToolCallConfirmation { + kind: CoderAgentEvent.ToolCallConfirmationEvent; +} + +export interface ToolCallUpdate { + kind: CoderAgentEvent.ToolCallUpdateEvent; +} + +export interface TextContent { + kind: CoderAgentEvent.TextContentEvent; +} + +export interface StateChange { + kind: CoderAgentEvent.StateChangeEvent; +} + +export interface Thought { + kind: CoderAgentEvent.ThoughtEvent; +} + +export type ThoughtSummary = { + subject: string; + description: string; +}; + +export interface ToolConfirmationResponse { + outcome: ToolConfirmationOutcome; + callId: string; +} + +export type CoderAgentMessage = + | AgentSettings + | ToolCallConfirmation + | ToolCallUpdate + | TextContent + | StateChange + | Thought; + +export interface TaskMetadata { + id: string; + contextId: string; + taskState: TaskState; + model: string; + mcpServers: Array<{ + name: string; + status: MCPServerStatus; + tools: Array<{ + name: string; + description: string; + parameterSchema: unknown; + }>; + }>; + availableTools: Array<{ + name: string; + description: string; + parameterSchema: unknown; + }>; +} diff --git a/projects/gemini-cli/packages/a2a-server/tsconfig.json b/projects/gemini-cli/packages/a2a-server/tsconfig.json new file mode 100644 index 0000000000000000000000000000000000000000..b788af471a25fcf152cca0cb570469e5e8abb62d --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/tsconfig.json @@ -0,0 +1,11 @@ +{ + "extends": "../../tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "lib": ["DOM", "DOM.Iterable", "ES2021"], + "composite": true, + "types": ["node", "vitest/globals"] + }, + "include": ["index.ts", "src/**/*.ts", "src/**/*.json"], + "exclude": ["node_modules", "dist"] +} diff --git a/projects/gemini-cli/packages/a2a-server/vitest.config.ts b/projects/gemini-cli/packages/a2a-server/vitest.config.ts new file mode 100644 index 0000000000000000000000000000000000000000..68332c394acf39cc054a8a4f035386d7d2dc833a --- /dev/null +++ b/projects/gemini-cli/packages/a2a-server/vitest.config.ts @@ -0,0 +1,26 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + test: { + reporters: [['default'], ['junit', { outputFile: 'junit.xml' }]], + passWithNoTests: true, + coverage: { + provider: 'v8', + reportsDirectory: './coverage', + reporter: [ + ['text', { file: 'full-text-summary.txt' }], + 'html', + 'json', + 'lcov', + 'cobertura', + ['json-summary', { outputFile: 'coverage-summary.json' }], + ], + }, + }, +}); diff --git a/projects/gemini-cli/packages/cli/index.ts b/projects/gemini-cli/packages/cli/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..e247b0959017cd696a88b47a4345036bf3e0701e --- /dev/null +++ b/projects/gemini-cli/packages/cli/index.ts @@ -0,0 +1,30 @@ +#!/usr/bin/env node + +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import './src/gemini.js'; +import { main } from './src/gemini.js'; +import { FatalError } from '@google/gemini-cli-core'; + +// --- Global Entry Point --- +main().catch((error) => { + if (error instanceof FatalError) { + let errorMessage = error.message; + if (!process.env['NO_COLOR']) { + errorMessage = `\x1b[31m${errorMessage}\x1b[0m`; + } + console.error(errorMessage); + process.exit(error.exitCode); + } + console.error('An unexpected critical error occurred:'); + if (error instanceof Error) { + console.error(error.stack); + } else { + console.error(String(error)); + } + process.exit(1); +}); diff --git a/projects/gemini-cli/packages/cli/package.json b/projects/gemini-cli/packages/cli/package.json new file mode 100644 index 0000000000000000000000000000000000000000..b44c3ae6acebf46cbb0c31732cf26a2f3f7a5920 --- /dev/null +++ b/projects/gemini-cli/packages/cli/package.json @@ -0,0 +1,84 @@ +{ + "name": "@google/gemini-cli", + "version": "0.2.2", + "description": "Gemini CLI", + "repository": { + "type": "git", + "url": "git+https://github.com/google-gemini/gemini-cli.git" + }, + "type": "module", + "main": "dist/index.js", + "bin": { + "gemini": "dist/index.js" + }, + "scripts": { + "build": "node ../../scripts/build_package.js", + "start": "node dist/index.js", + "debug": "node --inspect-brk dist/index.js", + "lint": "eslint . --ext .ts,.tsx", + "format": "prettier --write .", + "test": "vitest run", + "test:ci": "vitest run --coverage", + "typecheck": "tsc --noEmit" + }, + "files": [ + "dist" + ], + "config": { + "sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.2.2" + }, + "dependencies": { + "@google/gemini-cli-core": "file:../core", + "@google/genai": "1.16.0", + "@iarna/toml": "^2.2.5", + "@modelcontextprotocol/sdk": "^1.15.1", + "@types/update-notifier": "^6.0.8", + "command-exists": "^1.2.9", + "diff": "^7.0.0", + "dotenv": "^17.1.0", + "glob": "^10.4.1", + "highlight.js": "^11.11.1", + "ink": "^6.2.3", + "ink-gradient": "^3.0.0", + "ink-spinner": "^5.0.0", + "lodash-es": "^4.17.21", + "lowlight": "^3.3.0", + "mime-types": "^3.0.1", + "open": "^10.1.2", + "react": "^19.1.0", + "read-package-up": "^11.0.0", + "simple-git": "^3.28.0", + "shell-quote": "^1.8.3", + "string-width": "^7.1.0", + "strip-ansi": "^7.1.0", + "strip-json-comments": "^3.1.1", + "undici": "^7.10.0", + "update-notifier": "^7.3.1", + "yargs": "^17.7.2", + "zod": "^3.23.8" + }, + "devDependencies": { + "@babel/runtime": "^7.27.6", + "@google/gemini-cli-test-utils": "file:../test-utils", + "@testing-library/react": "^16.3.0", + "@types/command-exists": "^1.2.3", + "@types/diff": "^7.0.2", + "@types/dotenv": "^6.1.1", + "@types/lodash-es": "^4.17.12", + "@types/node": "^20.11.24", + "@types/react": "^19.1.8", + "@types/react-dom": "^19.1.6", + "@types/semver": "^7.7.0", + "@types/shell-quote": "^1.7.5", + "@types/yargs": "^17.0.32", + "ink-testing-library": "^4.0.0", + "jsdom": "^26.1.0", + "pretty-format": "^30.0.2", + "react-dom": "^19.1.0", + "typescript": "^5.3.3", + "vitest": "^3.1.1" + }, + "engines": { + "node": ">=20" + } +} diff --git a/projects/gemini-cli/packages/cli/src/commands/extensions.tsx b/projects/gemini-cli/packages/cli/src/commands/extensions.tsx new file mode 100644 index 0000000000000000000000000000000000000000..733866a1853737022d3137e78aaf4bb406f6cbc8 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/extensions.tsx @@ -0,0 +1,32 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { CommandModule } from 'yargs'; +import { installCommand } from './extensions/install.js'; +import { uninstallCommand } from './extensions/uninstall.js'; +import { listCommand } from './extensions/list.js'; +import { updateCommand } from './extensions/update.js'; +import { disableCommand } from './extensions/disable.js'; +import { enableCommand } from './extensions/enable.js'; + +export const extensionsCommand: CommandModule = { + command: 'extensions ', + describe: 'Manage Gemini CLI extensions.', + builder: (yargs) => + yargs + .command(installCommand) + .command(uninstallCommand) + .command(listCommand) + .command(updateCommand) + .command(disableCommand) + .command(enableCommand) + .demandCommand(1, 'You need at least one command before continuing.') + .version(false), + handler: () => { + // This handler is not called when a subcommand is provided. + // Yargs will show the help menu. + }, +}; diff --git a/projects/gemini-cli/packages/cli/src/commands/extensions/disable.ts b/projects/gemini-cli/packages/cli/src/commands/extensions/disable.ts new file mode 100644 index 0000000000000000000000000000000000000000..139e7da8c41d1205457b18f1df559e14a4d708f7 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/extensions/disable.ts @@ -0,0 +1,51 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { type CommandModule } from 'yargs'; +import { disableExtension } from '../../config/extension.js'; +import { SettingScope } from '../../config/settings.js'; +import { getErrorMessage } from '../../utils/errors.js'; + +interface DisableArgs { + name: string; + scope: SettingScope; +} + +export async function handleDisable(args: DisableArgs) { + try { + disableExtension(args.name, args.scope); + console.log( + `Extension "${args.name}" successfully disabled for scope "${args.scope}".`, + ); + } catch (error) { + console.error(getErrorMessage(error)); + process.exit(1); + } +} + +export const disableCommand: CommandModule = { + command: 'disable [--scope] ', + describe: 'Disables an extension.', + builder: (yargs) => + yargs + .positional('name', { + describe: 'The name of the extension to disable.', + type: 'string', + }) + .option('scope', { + describe: 'The scope to disable the extenison in.', + type: 'string', + default: SettingScope.User, + choices: [SettingScope.User, SettingScope.Workspace], + }) + .check((_argv) => true), + handler: async (argv) => { + await handleDisable({ + name: argv['name'] as string, + scope: argv['scope'] as SettingScope, + }); + }, +}; diff --git a/projects/gemini-cli/packages/cli/src/commands/extensions/enable.ts b/projects/gemini-cli/packages/cli/src/commands/extensions/enable.ts new file mode 100644 index 0000000000000000000000000000000000000000..0d4474208a2478af60e2ebfc72f039c5055730ee --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/extensions/enable.ts @@ -0,0 +1,59 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { type CommandModule } from 'yargs'; +import { FatalConfigError, getErrorMessage } from '@google/gemini-cli-core'; +import { enableExtension } from '../../config/extension.js'; +import { SettingScope } from '../../config/settings.js'; + +interface EnableArgs { + name: string; + scope?: SettingScope; +} + +export async function handleEnable(args: EnableArgs) { + try { + const scopes = args.scope + ? [args.scope] + : [SettingScope.User, SettingScope.Workspace]; + enableExtension(args.name, scopes); + if (args.scope) { + console.log( + `Extension "${args.name}" successfully enabled for scope "${args.scope}".`, + ); + } else { + console.log( + `Extension "${args.name}" successfully enabled in all scopes.`, + ); + } + } catch (error) { + throw new FatalConfigError(getErrorMessage(error)); + } +} + +export const enableCommand: CommandModule = { + command: 'enable [--scope] ', + describe: 'Enables an extension.', + builder: (yargs) => + yargs + .positional('name', { + describe: 'The name of the extension to enable.', + type: 'string', + }) + .option('scope', { + describe: + 'The scope to enable the extenison in. If not set, will be enabled in all scopes.', + type: 'string', + choices: [SettingScope.User, SettingScope.Workspace], + }) + .check((_argv) => true), + handler: async (argv) => { + await handleEnable({ + name: argv['name'] as string, + scope: argv['scope'] as SettingScope, + }); + }, +}; diff --git a/projects/gemini-cli/packages/cli/src/commands/extensions/install.test.ts b/projects/gemini-cli/packages/cli/src/commands/extensions/install.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..6bf5c8608f7240f46ac882e6e3b36705f4ca633a --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/extensions/install.test.ts @@ -0,0 +1,25 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { installCommand } from './install.js'; +import yargs from 'yargs'; + +describe('extensions install command', () => { + it('should fail if no source is provided', () => { + const validationParser = yargs([]).command(installCommand).fail(false); + expect(() => validationParser.parse('install')).toThrow( + 'Either a git URL --source or a --path must be provided.', + ); + }); + + it('should fail if both git source and local path are provided', () => { + const validationParser = yargs([]).command(installCommand).fail(false); + expect(() => + validationParser.parse('install --source some-url --path /some/path'), + ).toThrow('Arguments source and path are mutually exclusive'); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/commands/extensions/install.ts b/projects/gemini-cli/packages/cli/src/commands/extensions/install.ts new file mode 100644 index 0000000000000000000000000000000000000000..af411c3d470371a0ebbdcc3c04b859e78ef2d8e6 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/extensions/install.ts @@ -0,0 +1,64 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { CommandModule } from 'yargs'; +import { + installExtension, + type ExtensionInstallMetadata, +} from '../../config/extension.js'; + +import { getErrorMessage } from '../../utils/errors.js'; + +interface InstallArgs { + source?: string; + path?: string; +} + +export async function handleInstall(args: InstallArgs) { + try { + const installMetadata: ExtensionInstallMetadata = { + source: (args.source || args.path) as string, + type: args.source ? 'git' : 'local', + }; + const extensionName = await installExtension(installMetadata); + console.log( + `Extension "${extensionName}" installed successfully and enabled.`, + ); + } catch (error) { + console.error(getErrorMessage(error)); + process.exit(1); + } +} + +export const installCommand: CommandModule = { + command: 'install [--source | --path ]', + describe: 'Installs an extension from a git repository or a local path.', + builder: (yargs) => + yargs + .option('source', { + describe: 'The git URL of the extension to install.', + type: 'string', + }) + .option('path', { + describe: 'Path to a local extension directory.', + type: 'string', + }) + .conflicts('source', 'path') + .check((argv) => { + if (!argv.source && !argv.path) { + throw new Error( + 'Either a git URL --source or a --path must be provided.', + ); + } + return true; + }), + handler: async (argv) => { + await handleInstall({ + source: argv['source'] as string | undefined, + path: argv['path'] as string | undefined, + }); + }, +}; diff --git a/projects/gemini-cli/packages/cli/src/commands/extensions/list.ts b/projects/gemini-cli/packages/cli/src/commands/extensions/list.ts new file mode 100644 index 0000000000000000000000000000000000000000..4611062505bb0187a7c4afd2d33849385daba678 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/extensions/list.ts @@ -0,0 +1,36 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { CommandModule } from 'yargs'; +import { loadUserExtensions, toOutputString } from '../../config/extension.js'; +import { getErrorMessage } from '../../utils/errors.js'; + +export async function handleList() { + try { + const extensions = loadUserExtensions(); + if (extensions.length === 0) { + console.log('No extensions installed.'); + return; + } + console.log( + extensions + .map((extension, _): string => toOutputString(extension)) + .join('\n\n'), + ); + } catch (error) { + console.error(getErrorMessage(error)); + process.exit(1); + } +} + +export const listCommand: CommandModule = { + command: 'list', + describe: 'Lists installed extensions.', + builder: (yargs) => yargs, + handler: async () => { + await handleList(); + }, +}; diff --git a/projects/gemini-cli/packages/cli/src/commands/extensions/uninstall.test.ts b/projects/gemini-cli/packages/cli/src/commands/extensions/uninstall.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..927e805b312214f48bcc82ede1c8471dff9d3192 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/extensions/uninstall.test.ts @@ -0,0 +1,18 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { uninstallCommand } from './uninstall.js'; +import yargs from 'yargs'; + +describe('extensions uninstall command', () => { + it('should fail if no source is provided', () => { + const validationParser = yargs([]).command(uninstallCommand).fail(false); + expect(() => validationParser.parse('uninstall')).toThrow( + 'Not enough non-option arguments: got 0, need at least 1', + ); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/commands/extensions/uninstall.ts b/projects/gemini-cli/packages/cli/src/commands/extensions/uninstall.ts new file mode 100644 index 0000000000000000000000000000000000000000..ff93b79723f70ca640538c7a49f63b90324d1027 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/extensions/uninstall.ts @@ -0,0 +1,47 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { CommandModule } from 'yargs'; +import { uninstallExtension } from '../../config/extension.js'; +import { getErrorMessage } from '../../utils/errors.js'; + +interface UninstallArgs { + name: string; +} + +export async function handleUninstall(args: UninstallArgs) { + try { + await uninstallExtension(args.name); + console.log(`Extension "${args.name}" successfully uninstalled.`); + } catch (error) { + console.error(getErrorMessage(error)); + process.exit(1); + } +} + +export const uninstallCommand: CommandModule = { + command: 'uninstall ', + describe: 'Uninstalls an extension.', + builder: (yargs) => + yargs + .positional('name', { + describe: 'The name of the extension to uninstall.', + type: 'string', + }) + .check((argv) => { + if (!argv.name) { + throw new Error( + 'Please include the name of the extension to uninstall as a positional argument.', + ); + } + return true; + }), + handler: async (argv) => { + await handleUninstall({ + name: argv['name'] as string, + }); + }, +}; diff --git a/projects/gemini-cli/packages/cli/src/commands/extensions/update.ts b/projects/gemini-cli/packages/cli/src/commands/extensions/update.ts new file mode 100644 index 0000000000000000000000000000000000000000..f235304957ef1298af48ae1968fa6718ea2946b6 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/extensions/update.ts @@ -0,0 +1,76 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { CommandModule } from 'yargs'; +import { + updateExtensionByName, + updateAllUpdatableExtensions, + type ExtensionUpdateInfo, +} from '../../config/extension.js'; +import { getErrorMessage } from '../../utils/errors.js'; + +interface UpdateArgs { + name?: string; + all?: boolean; +} + +const updateOutput = (info: ExtensionUpdateInfo) => + `Extension "${info.name}" successfully updated: ${info.originalVersion} → ${info.updatedVersion}.`; + +export async function handleUpdate(args: UpdateArgs) { + if (args.all) { + try { + const updateInfos = await updateAllUpdatableExtensions(); + if (updateInfos.length === 0) { + console.log('No extensions to update.'); + return; + } + console.log(updateInfos.map((info) => updateOutput(info)).join('\n')); + } catch (error) { + console.error(getErrorMessage(error)); + } + return; + } + if (args.name) + try { + // TODO(chrstnb): we should list extensions if the requested extension is not installed. + const updatedExtensionInfo = await updateExtensionByName(args.name); + console.log( + `Extension "${args.name}" successfully updated: ${updatedExtensionInfo.originalVersion} → ${updatedExtensionInfo.updatedVersion}.`, + ); + } catch (error) { + console.error(getErrorMessage(error)); + } +} + +export const updateCommand: CommandModule = { + command: 'update [--all] [name]', + describe: + 'Updates all extensions or a named extension to the latest version.', + builder: (yargs) => + yargs + .positional('name', { + describe: 'The name of the extension to update.', + type: 'string', + }) + .option('all', { + describe: 'Update all extensions.', + type: 'boolean', + }) + .conflicts('name', 'all') + .check((argv) => { + if (!argv.all && !argv.name) { + throw new Error('Either an extension name or --all must be provided'); + } + return true; + }), + handler: async (argv) => { + await handleUpdate({ + name: argv['name'] as string | undefined, + all: argv['all'] as boolean | undefined, + }); + }, +}; diff --git a/projects/gemini-cli/packages/cli/src/commands/mcp.test.ts b/projects/gemini-cli/packages/cli/src/commands/mcp.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..b4e9980cb990241ce9a7f9cc2136711e782a4f4a --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/mcp.test.ts @@ -0,0 +1,55 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi } from 'vitest'; +import { mcpCommand } from './mcp.js'; +import { type Argv } from 'yargs'; +import yargs from 'yargs'; + +describe('mcp command', () => { + it('should have correct command definition', () => { + expect(mcpCommand.command).toBe('mcp'); + expect(mcpCommand.describe).toBe('Manage MCP servers'); + expect(typeof mcpCommand.builder).toBe('function'); + expect(typeof mcpCommand.handler).toBe('function'); + }); + + it('should have exactly one option (help flag)', () => { + // Test to ensure that the global 'gemini' flags are not added to the mcp command + const yargsInstance = yargs(); + const builtYargs = mcpCommand.builder(yargsInstance); + const options = builtYargs.getOptions(); + + // Should have exactly 1 option (help flag) + expect(Object.keys(options.key).length).toBe(1); + expect(options.key).toHaveProperty('help'); + }); + + it('should register add, remove, and list subcommands', () => { + const mockYargs = { + command: vi.fn().mockReturnThis(), + demandCommand: vi.fn().mockReturnThis(), + version: vi.fn().mockReturnThis(), + }; + + mcpCommand.builder(mockYargs as unknown as Argv); + + expect(mockYargs.command).toHaveBeenCalledTimes(3); + + // Verify that the specific subcommands are registered + const commandCalls = mockYargs.command.mock.calls; + const commandNames = commandCalls.map((call) => call[0].command); + + expect(commandNames).toContain('add [args...]'); + expect(commandNames).toContain('remove '); + expect(commandNames).toContain('list'); + + expect(mockYargs.demandCommand).toHaveBeenCalledWith( + 1, + 'You need at least one command before continuing.', + ); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/commands/mcp.ts b/projects/gemini-cli/packages/cli/src/commands/mcp.ts new file mode 100644 index 0000000000000000000000000000000000000000..5e55286c1db0bd53a1a739a831bab5080656bb2c --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/mcp.ts @@ -0,0 +1,27 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// File for 'gemini mcp' command +import type { CommandModule, Argv } from 'yargs'; +import { addCommand } from './mcp/add.js'; +import { removeCommand } from './mcp/remove.js'; +import { listCommand } from './mcp/list.js'; + +export const mcpCommand: CommandModule = { + command: 'mcp', + describe: 'Manage MCP servers', + builder: (yargs: Argv) => + yargs + .command(addCommand) + .command(removeCommand) + .command(listCommand) + .demandCommand(1, 'You need at least one command before continuing.') + .version(false), + handler: () => { + // yargs will automatically show help if no subcommand is provided + // thanks to demandCommand(1) in the builder. + }, +}; diff --git a/projects/gemini-cli/packages/cli/src/commands/mcp/add.test.ts b/projects/gemini-cli/packages/cli/src/commands/mcp/add.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..fc1ffb64cdafd360fd1a0aac6185f6ebfe2f4ce3 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/mcp/add.test.ts @@ -0,0 +1,122 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import yargs from 'yargs'; +import { addCommand } from './add.js'; +import { loadSettings, SettingScope } from '../../config/settings.js'; + +vi.mock('fs/promises', () => ({ + readFile: vi.fn(), + writeFile: vi.fn(), +})); + +vi.mock('../../config/settings.js', async () => { + const actual = await vi.importActual('../../config/settings.js'); + return { + ...actual, + loadSettings: vi.fn(), + }; +}); + +const mockedLoadSettings = loadSettings as vi.Mock; + +describe('mcp add command', () => { + let parser: yargs.Argv; + let mockSetValue: vi.Mock; + + beforeEach(() => { + vi.resetAllMocks(); + const yargsInstance = yargs([]).command(addCommand); + parser = yargsInstance; + mockSetValue = vi.fn(); + mockedLoadSettings.mockReturnValue({ + forScope: () => ({ settings: {} }), + setValue: mockSetValue, + }); + }); + + it('should add a stdio server to project settings', async () => { + await parser.parseAsync( + 'add my-server /path/to/server arg1 arg2 -e FOO=bar', + ); + + expect(mockSetValue).toHaveBeenCalledWith( + SettingScope.Workspace, + 'mcpServers', + { + 'my-server': { + command: '/path/to/server', + args: ['arg1', 'arg2'], + env: { FOO: 'bar' }, + }, + }, + ); + }); + + it('should add an sse server to user settings', async () => { + await parser.parseAsync( + 'add --transport sse sse-server https://example.com/sse-endpoint --scope user -H "X-API-Key: your-key"', + ); + + expect(mockSetValue).toHaveBeenCalledWith(SettingScope.User, 'mcpServers', { + 'sse-server': { + url: 'https://example.com/sse-endpoint', + headers: { 'X-API-Key': 'your-key' }, + }, + }); + }); + + it('should add an http server to project settings', async () => { + await parser.parseAsync( + 'add --transport http http-server https://example.com/mcp -H "Authorization: Bearer your-token"', + ); + + expect(mockSetValue).toHaveBeenCalledWith( + SettingScope.Workspace, + 'mcpServers', + { + 'http-server': { + httpUrl: 'https://example.com/mcp', + headers: { Authorization: 'Bearer your-token' }, + }, + }, + ); + }); + + it('should handle MCP server args with -- separator', async () => { + await parser.parseAsync( + 'add my-server npx -- -y http://example.com/some-package', + ); + + expect(mockSetValue).toHaveBeenCalledWith( + SettingScope.Workspace, + 'mcpServers', + { + 'my-server': { + command: 'npx', + args: ['-y', 'http://example.com/some-package'], + }, + }, + ); + }); + + it('should handle unknown options as MCP server args', async () => { + await parser.parseAsync( + 'add test-server npx -y http://example.com/some-package', + ); + + expect(mockSetValue).toHaveBeenCalledWith( + SettingScope.Workspace, + 'mcpServers', + { + 'test-server': { + command: 'npx', + args: ['-y', 'http://example.com/some-package'], + }, + }, + ); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/commands/mcp/add.ts b/projects/gemini-cli/packages/cli/src/commands/mcp/add.ts new file mode 100644 index 0000000000000000000000000000000000000000..47c481a29b60526b4f65db252251b0d8d9e16db6 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/mcp/add.ts @@ -0,0 +1,222 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// File for 'gemini mcp add' command +import type { CommandModule } from 'yargs'; +import { loadSettings, SettingScope } from '../../config/settings.js'; +import type { MCPServerConfig } from '@google/gemini-cli-core'; + +async function addMcpServer( + name: string, + commandOrUrl: string, + args: Array | undefined, + options: { + scope: string; + transport: string; + env: string[] | undefined; + header: string[] | undefined; + timeout?: number; + trust?: boolean; + description?: string; + includeTools?: string[]; + excludeTools?: string[]; + }, +) { + const { + scope, + transport, + env, + header, + timeout, + trust, + description, + includeTools, + excludeTools, + } = options; + const settingsScope = + scope === 'user' ? SettingScope.User : SettingScope.Workspace; + const settings = loadSettings(process.cwd()); + + let newServer: Partial = {}; + + const headers = header?.reduce( + (acc, curr) => { + const [key, ...valueParts] = curr.split(':'); + const value = valueParts.join(':').trim(); + if (key.trim() && value) { + acc[key.trim()] = value; + } + return acc; + }, + {} as Record, + ); + + switch (transport) { + case 'sse': + newServer = { + url: commandOrUrl, + headers, + timeout, + trust, + description, + includeTools, + excludeTools, + }; + break; + case 'http': + newServer = { + httpUrl: commandOrUrl, + headers, + timeout, + trust, + description, + includeTools, + excludeTools, + }; + break; + case 'stdio': + default: + newServer = { + command: commandOrUrl, + args: args?.map(String), + env: env?.reduce( + (acc, curr) => { + const [key, value] = curr.split('='); + if (key && value) { + acc[key] = value; + } + return acc; + }, + {} as Record, + ), + timeout, + trust, + description, + includeTools, + excludeTools, + }; + break; + } + + const existingSettings = settings.forScope(settingsScope).settings; + const mcpServers = existingSettings.mcpServers || {}; + + const isExistingServer = !!mcpServers[name]; + if (isExistingServer) { + console.log( + `MCP server "${name}" is already configured within ${scope} settings.`, + ); + } + + mcpServers[name] = newServer as MCPServerConfig; + + settings.setValue(settingsScope, 'mcpServers', mcpServers); + + if (isExistingServer) { + console.log(`MCP server "${name}" updated in ${scope} settings.`); + } else { + console.log( + `MCP server "${name}" added to ${scope} settings. (${transport})`, + ); + } +} + +export const addCommand: CommandModule = { + command: 'add [args...]', + describe: 'Add a server', + builder: (yargs) => + yargs + .usage('Usage: gemini mcp add [options] [args...]') + .parserConfiguration({ + 'unknown-options-as-args': true, // Pass unknown options as server args + 'populate--': true, // Populate server args after -- separator + }) + .positional('name', { + describe: 'Name of the server', + type: 'string', + demandOption: true, + }) + .positional('commandOrUrl', { + describe: 'Command (stdio) or URL (sse, http)', + type: 'string', + demandOption: true, + }) + .option('scope', { + alias: 's', + describe: 'Configuration scope (user or project)', + type: 'string', + default: 'project', + choices: ['user', 'project'], + }) + .option('transport', { + alias: 't', + describe: 'Transport type (stdio, sse, http)', + type: 'string', + default: 'stdio', + choices: ['stdio', 'sse', 'http'], + }) + .option('env', { + alias: 'e', + describe: 'Set environment variables (e.g. -e KEY=value)', + type: 'array', + string: true, + }) + .option('header', { + alias: 'H', + describe: + 'Set HTTP headers for SSE and HTTP transports (e.g. -H "X-Api-Key: abc123" -H "Authorization: Bearer abc123")', + type: 'array', + string: true, + }) + .option('timeout', { + describe: 'Set connection timeout in milliseconds', + type: 'number', + }) + .option('trust', { + describe: + 'Trust the server (bypass all tool call confirmation prompts)', + type: 'boolean', + }) + .option('description', { + describe: 'Set the description for the server', + type: 'string', + }) + .option('include-tools', { + describe: 'A comma-separated list of tools to include', + type: 'array', + string: true, + }) + .option('exclude-tools', { + describe: 'A comma-separated list of tools to exclude', + type: 'array', + string: true, + }) + .middleware((argv) => { + // Handle -- separator args as server args if present + if (argv['--']) { + const existingArgs = (argv['args'] as Array) || []; + argv['args'] = [...existingArgs, ...(argv['--'] as string[])]; + } + }), + handler: async (argv) => { + await addMcpServer( + argv['name'] as string, + argv['commandOrUrl'] as string, + argv['args'] as Array, + { + scope: argv['scope'] as string, + transport: argv['transport'] as string, + env: argv['env'] as string[], + header: argv['header'] as string[], + timeout: argv['timeout'] as number | undefined, + trust: argv['trust'] as boolean | undefined, + description: argv['description'] as string | undefined, + includeTools: argv['includeTools'] as string[] | undefined, + excludeTools: argv['excludeTools'] as string[] | undefined, + }, + ); + }, +}; diff --git a/projects/gemini-cli/packages/cli/src/commands/mcp/list.test.ts b/projects/gemini-cli/packages/cli/src/commands/mcp/list.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..f3eb72ea22ea4950562f4a5ad7df398bfc95feab --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/mcp/list.test.ts @@ -0,0 +1,172 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { listMcpServers } from './list.js'; +import { loadSettings } from '../../config/settings.js'; +import { loadExtensions } from '../../config/extension.js'; +import { createTransport } from '@google/gemini-cli-core'; +import { Client } from '@modelcontextprotocol/sdk/client/index.js'; + +vi.mock('../../config/settings.js', () => ({ + loadSettings: vi.fn(), +})); +vi.mock('../../config/extension.js', () => ({ + loadExtensions: vi.fn(), +})); +vi.mock('@google/gemini-cli-core', () => ({ + createTransport: vi.fn(), + MCPServerStatus: { + CONNECTED: 'CONNECTED', + CONNECTING: 'CONNECTING', + DISCONNECTED: 'DISCONNECTED', + }, + Storage: vi.fn().mockImplementation((_cwd: string) => ({ + getGlobalSettingsPath: () => '/tmp/gemini/settings.json', + getWorkspaceSettingsPath: () => '/tmp/gemini/workspace-settings.json', + getProjectTempDir: () => '/test/home/.gemini/tmp/mocked_hash', + })), + GEMINI_CONFIG_DIR: '.gemini', + getErrorMessage: (e: unknown) => (e instanceof Error ? e.message : String(e)), +})); +vi.mock('@modelcontextprotocol/sdk/client/index.js'); + +const mockedLoadSettings = loadSettings as vi.Mock; +const mockedLoadExtensions = loadExtensions as vi.Mock; +const mockedCreateTransport = createTransport as vi.Mock; +const MockedClient = Client as vi.Mock; + +interface MockClient { + connect: vi.Mock; + ping: vi.Mock; + close: vi.Mock; +} + +interface MockTransport { + close: vi.Mock; +} + +describe('mcp list command', () => { + let consoleSpy: vi.SpyInstance; + let mockClient: MockClient; + let mockTransport: MockTransport; + + beforeEach(() => { + vi.resetAllMocks(); + + consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + + mockTransport = { close: vi.fn() }; + mockClient = { + connect: vi.fn(), + ping: vi.fn(), + close: vi.fn(), + }; + + MockedClient.mockImplementation(() => mockClient); + mockedCreateTransport.mockResolvedValue(mockTransport); + mockedLoadExtensions.mockReturnValue([]); + }); + + afterEach(() => { + consoleSpy.mockRestore(); + }); + + it('should display message when no servers configured', async () => { + mockedLoadSettings.mockReturnValue({ merged: { mcpServers: {} } }); + + await listMcpServers(); + + expect(consoleSpy).toHaveBeenCalledWith('No MCP servers configured.'); + }); + + it('should display different server types with connected status', async () => { + mockedLoadSettings.mockReturnValue({ + merged: { + mcpServers: { + 'stdio-server': { command: '/path/to/server', args: ['arg1'] }, + 'sse-server': { url: 'https://example.com/sse' }, + 'http-server': { httpUrl: 'https://example.com/http' }, + }, + }, + }); + + mockClient.connect.mockResolvedValue(undefined); + mockClient.ping.mockResolvedValue(undefined); + + await listMcpServers(); + + expect(consoleSpy).toHaveBeenCalledWith('Configured MCP servers:\n'); + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'stdio-server: /path/to/server arg1 (stdio) - Connected', + ), + ); + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'sse-server: https://example.com/sse (sse) - Connected', + ), + ); + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'http-server: https://example.com/http (http) - Connected', + ), + ); + }); + + it('should display disconnected status when connection fails', async () => { + mockedLoadSettings.mockReturnValue({ + merged: { + mcpServers: { + 'test-server': { command: '/test/server' }, + }, + }, + }); + + mockClient.connect.mockRejectedValue(new Error('Connection failed')); + + await listMcpServers(); + + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'test-server: /test/server (stdio) - Disconnected', + ), + ); + }); + + it('should merge extension servers with config servers', async () => { + mockedLoadSettings.mockReturnValue({ + merged: { + mcpServers: { 'config-server': { command: '/config/server' } }, + }, + }); + + mockedLoadExtensions.mockReturnValue([ + { + config: { + name: 'test-extension', + mcpServers: { 'extension-server': { command: '/ext/server' } }, + }, + }, + ]); + + mockClient.connect.mockResolvedValue(undefined); + mockClient.ping.mockResolvedValue(undefined); + + await listMcpServers(); + + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'config-server: /config/server (stdio) - Connected', + ), + ); + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'extension-server: /ext/server (stdio) - Connected', + ), + ); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/commands/mcp/list.ts b/projects/gemini-cli/packages/cli/src/commands/mcp/list.ts new file mode 100644 index 0000000000000000000000000000000000000000..3d0f6e27cb48ff043bee7713c7ad3b7259dbcfce --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/mcp/list.ts @@ -0,0 +1,136 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// File for 'gemini mcp list' command +import type { CommandModule } from 'yargs'; +import { loadSettings } from '../../config/settings.js'; +import type { MCPServerConfig } from '@google/gemini-cli-core'; +import { MCPServerStatus, createTransport } from '@google/gemini-cli-core'; +import { Client } from '@modelcontextprotocol/sdk/client/index.js'; +import { loadExtensions } from '../../config/extension.js'; + +const COLOR_GREEN = '\u001b[32m'; +const COLOR_YELLOW = '\u001b[33m'; +const COLOR_RED = '\u001b[31m'; +const RESET_COLOR = '\u001b[0m'; + +async function getMcpServersFromConfig(): Promise< + Record +> { + const settings = loadSettings(process.cwd()); + const extensions = loadExtensions(process.cwd()); + const mcpServers = { ...(settings.merged.mcpServers || {}) }; + for (const extension of extensions) { + Object.entries(extension.config.mcpServers || {}).forEach( + ([key, server]) => { + if (mcpServers[key]) { + return; + } + mcpServers[key] = { + ...server, + extensionName: extension.config.name, + }; + }, + ); + } + return mcpServers; +} + +async function testMCPConnection( + serverName: string, + config: MCPServerConfig, +): Promise { + const client = new Client({ + name: 'mcp-test-client', + version: '0.0.1', + }); + + let transport; + try { + // Use the same transport creation logic as core + transport = await createTransport(serverName, config, false); + } catch (_error) { + await client.close(); + return MCPServerStatus.DISCONNECTED; + } + + try { + // Attempt actual MCP connection with short timeout + await client.connect(transport, { timeout: 5000 }); // 5s timeout + + // Test basic MCP protocol by pinging the server + await client.ping(); + + await client.close(); + return MCPServerStatus.CONNECTED; + } catch (_error) { + await transport.close(); + return MCPServerStatus.DISCONNECTED; + } +} + +async function getServerStatus( + serverName: string, + server: MCPServerConfig, +): Promise { + // Test all server types by attempting actual connection + return await testMCPConnection(serverName, server); +} + +export async function listMcpServers(): Promise { + const mcpServers = await getMcpServersFromConfig(); + const serverNames = Object.keys(mcpServers); + + if (serverNames.length === 0) { + console.log('No MCP servers configured.'); + return; + } + + console.log('Configured MCP servers:\n'); + + for (const serverName of serverNames) { + const server = mcpServers[serverName]; + + const status = await getServerStatus(serverName, server); + + let statusIndicator = ''; + let statusText = ''; + switch (status) { + case MCPServerStatus.CONNECTED: + statusIndicator = COLOR_GREEN + '✓' + RESET_COLOR; + statusText = 'Connected'; + break; + case MCPServerStatus.CONNECTING: + statusIndicator = COLOR_YELLOW + '…' + RESET_COLOR; + statusText = 'Connecting'; + break; + case MCPServerStatus.DISCONNECTED: + default: + statusIndicator = COLOR_RED + '✗' + RESET_COLOR; + statusText = 'Disconnected'; + break; + } + + let serverInfo = `${serverName}: `; + if (server.httpUrl) { + serverInfo += `${server.httpUrl} (http)`; + } else if (server.url) { + serverInfo += `${server.url} (sse)`; + } else if (server.command) { + serverInfo += `${server.command} ${server.args?.join(' ') || ''} (stdio)`; + } + + console.log(`${statusIndicator} ${serverInfo} - ${statusText}`); + } +} + +export const listCommand: CommandModule = { + command: 'list', + describe: 'List all configured MCP servers', + handler: async () => { + await listMcpServers(); + }, +}; diff --git a/projects/gemini-cli/packages/cli/src/commands/mcp/remove.test.ts b/projects/gemini-cli/packages/cli/src/commands/mcp/remove.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..eb7dedce50e1d62661f94baf1f20eab26a0bd00a --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/mcp/remove.test.ts @@ -0,0 +1,69 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach } from 'vitest'; +import yargs from 'yargs'; +import { loadSettings, SettingScope } from '../../config/settings.js'; +import { removeCommand } from './remove.js'; + +vi.mock('fs/promises', () => ({ + readFile: vi.fn(), + writeFile: vi.fn(), +})); + +vi.mock('../../config/settings.js', async () => { + const actual = await vi.importActual('../../config/settings.js'); + return { + ...actual, + loadSettings: vi.fn(), + }; +}); + +const mockedLoadSettings = loadSettings as vi.Mock; + +describe('mcp remove command', () => { + let parser: yargs.Argv; + let mockSetValue: vi.Mock; + let mockSettings: Record; + + beforeEach(() => { + vi.resetAllMocks(); + const yargsInstance = yargs([]).command(removeCommand); + parser = yargsInstance; + mockSetValue = vi.fn(); + mockSettings = { + mcpServers: { + 'test-server': { + command: 'echo "hello"', + }, + }, + }; + mockedLoadSettings.mockReturnValue({ + forScope: () => ({ settings: mockSettings }), + setValue: mockSetValue, + }); + }); + + it('should remove a server from project settings', async () => { + await parser.parseAsync('remove test-server'); + + expect(mockSetValue).toHaveBeenCalledWith( + SettingScope.Workspace, + 'mcpServers', + {}, + ); + }); + + it('should show a message if server not found', async () => { + const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + await parser.parseAsync('remove non-existent-server'); + + expect(mockSetValue).not.toHaveBeenCalled(); + expect(consoleSpy).toHaveBeenCalledWith( + 'Server "non-existent-server" not found in project settings.', + ); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/commands/mcp/remove.ts b/projects/gemini-cli/packages/cli/src/commands/mcp/remove.ts new file mode 100644 index 0000000000000000000000000000000000000000..e05478e37ec7dae31286646c73f1526f333522ab --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/commands/mcp/remove.ts @@ -0,0 +1,60 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// File for 'gemini mcp remove' command +import type { CommandModule } from 'yargs'; +import { loadSettings, SettingScope } from '../../config/settings.js'; + +async function removeMcpServer( + name: string, + options: { + scope: string; + }, +) { + const { scope } = options; + const settingsScope = + scope === 'user' ? SettingScope.User : SettingScope.Workspace; + const settings = loadSettings(process.cwd()); + + const existingSettings = settings.forScope(settingsScope).settings; + const mcpServers = existingSettings.mcpServers || {}; + + if (!mcpServers[name]) { + console.log(`Server "${name}" not found in ${scope} settings.`); + return; + } + + delete mcpServers[name]; + + settings.setValue(settingsScope, 'mcpServers', mcpServers); + + console.log(`Server "${name}" removed from ${scope} settings.`); +} + +export const removeCommand: CommandModule = { + command: 'remove ', + describe: 'Remove a server', + builder: (yargs) => + yargs + .usage('Usage: gemini mcp remove [options] ') + .positional('name', { + describe: 'Name of the server', + type: 'string', + demandOption: true, + }) + .option('scope', { + alias: 's', + describe: 'Configuration scope (user or project)', + type: 'string', + default: 'project', + choices: ['user', 'project'], + }), + handler: async (argv) => { + await removeMcpServer(argv['name'] as string, { + scope: argv['scope'] as string, + }); + }, +}; diff --git a/projects/gemini-cli/packages/cli/src/config/auth.test.ts b/projects/gemini-cli/packages/cli/src/config/auth.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..8d565dca132ab336a0519bb8ed2cd9edab665eb6 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/auth.test.ts @@ -0,0 +1,78 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { AuthType } from '@google/gemini-cli-core'; +import { vi } from 'vitest'; +import { validateAuthMethod } from './auth.js'; + +vi.mock('./settings.js', () => ({ + loadEnvironment: vi.fn(), + loadSettings: vi.fn().mockReturnValue({ + merged: vi.fn().mockReturnValue({}), + }), +})); + +describe('validateAuthMethod', () => { + beforeEach(() => { + vi.resetModules(); + }); + + afterEach(() => { + vi.unstubAllEnvs(); + }); + + it('should return null for LOGIN_WITH_GOOGLE', () => { + expect(validateAuthMethod(AuthType.LOGIN_WITH_GOOGLE)).toBeNull(); + }); + + it('should return null for CLOUD_SHELL', () => { + expect(validateAuthMethod(AuthType.CLOUD_SHELL)).toBeNull(); + }); + + describe('USE_GEMINI', () => { + it('should return null if GEMINI_API_KEY is set', () => { + vi.stubEnv('GEMINI_API_KEY', 'test-key'); + expect(validateAuthMethod(AuthType.USE_GEMINI)).toBeNull(); + }); + + it('should return an error message if GEMINI_API_KEY is not set', () => { + vi.stubEnv('GEMINI_API_KEY', undefined); + expect(validateAuthMethod(AuthType.USE_GEMINI)).toBe( + 'GEMINI_API_KEY environment variable not found. Add that to your environment and try again (no reload needed if using .env)!', + ); + }); + }); + + describe('USE_VERTEX_AI', () => { + it('should return null if GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION are set', () => { + vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project'); + vi.stubEnv('GOOGLE_CLOUD_LOCATION', 'test-location'); + expect(validateAuthMethod(AuthType.USE_VERTEX_AI)).toBeNull(); + }); + + it('should return null if GOOGLE_API_KEY is set', () => { + vi.stubEnv('GOOGLE_API_KEY', 'test-api-key'); + expect(validateAuthMethod(AuthType.USE_VERTEX_AI)).toBeNull(); + }); + + it('should return an error message if no required environment variables are set', () => { + vi.stubEnv('GOOGLE_CLOUD_PROJECT', undefined); + vi.stubEnv('GOOGLE_CLOUD_LOCATION', undefined); + expect(validateAuthMethod(AuthType.USE_VERTEX_AI)).toBe( + 'When using Vertex AI, you must specify either:\n' + + '• GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION environment variables.\n' + + '• GOOGLE_API_KEY environment variable (if using express mode).\n' + + 'Update your environment and try again (no reload needed if using .env)!', + ); + }); + }); + + it('should return an error message for an invalid auth method', () => { + expect(validateAuthMethod('invalid-method')).toBe( + 'Invalid auth method selected.', + ); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/config/auth.ts b/projects/gemini-cli/packages/cli/src/config/auth.ts new file mode 100644 index 0000000000000000000000000000000000000000..234a4d907af0b27d07b165f4a9e361784629fa5a --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/auth.ts @@ -0,0 +1,43 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { AuthType } from '@google/gemini-cli-core'; +import { loadEnvironment, loadSettings } from './settings.js'; + +export function validateAuthMethod(authMethod: string): string | null { + loadEnvironment(loadSettings(process.cwd()).merged); + if ( + authMethod === AuthType.LOGIN_WITH_GOOGLE || + authMethod === AuthType.CLOUD_SHELL + ) { + return null; + } + + if (authMethod === AuthType.USE_GEMINI) { + if (!process.env['GEMINI_API_KEY']) { + return 'GEMINI_API_KEY environment variable not found. Add that to your environment and try again (no reload needed if using .env)!'; + } + return null; + } + + if (authMethod === AuthType.USE_VERTEX_AI) { + const hasVertexProjectLocationConfig = + !!process.env['GOOGLE_CLOUD_PROJECT'] && + !!process.env['GOOGLE_CLOUD_LOCATION']; + const hasGoogleApiKey = !!process.env['GOOGLE_API_KEY']; + if (!hasVertexProjectLocationConfig && !hasGoogleApiKey) { + return ( + 'When using Vertex AI, you must specify either:\n' + + '• GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION environment variables.\n' + + '• GOOGLE_API_KEY environment variable (if using express mode).\n' + + 'Update your environment and try again (no reload needed if using .env)!' + ); + } + return null; + } + + return 'Invalid auth method selected.'; +} diff --git a/projects/gemini-cli/packages/cli/src/config/config.integration.test.ts b/projects/gemini-cli/packages/cli/src/config/config.integration.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..78f69bda9875738d540e6456f6a3eca27c533e57 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/config.integration.test.ts @@ -0,0 +1,407 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest'; +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import { tmpdir } from 'node:os'; +import type { + ConfigParameters, + ContentGeneratorConfig, +} from '@google/gemini-cli-core'; +import { Config } from '@google/gemini-cli-core'; +import { http, HttpResponse } from 'msw'; +import { setupServer } from 'msw/node'; + +export const server = setupServer(); + +// TODO(richieforeman): Consider moving this to test setup globally. +beforeAll(() => { + server.listen({}); +}); + +afterEach(() => { + server.resetHandlers(); +}); + +afterAll(() => { + server.close(); +}); + +const CLEARCUT_URL = 'https://play.googleapis.com/log'; + +const TEST_CONTENT_GENERATOR_CONFIG: ContentGeneratorConfig = { + apiKey: 'test-key', + model: 'test-model', + userAgent: 'test-agent', +}; + +// Mock file discovery service and tool registry +vi.mock('@google/gemini-cli-core', async () => { + const actual = await vi.importActual('@google/gemini-cli-core'); + return { + ...actual, + FileDiscoveryService: vi.fn().mockImplementation(() => ({ + initialize: vi.fn(), + })), + createToolRegistry: vi.fn().mockResolvedValue({}), + }; +}); + +describe('Configuration Integration Tests', () => { + let tempDir: string; + + beforeEach(() => { + server.resetHandlers(http.post(CLEARCUT_URL, () => HttpResponse.text())); + + tempDir = fs.mkdtempSync(path.join(tmpdir(), 'gemini-cli-test-')); + vi.stubEnv('GEMINI_API_KEY', 'test-api-key'); + vi.clearAllMocks(); + }); + + afterEach(() => { + vi.unstubAllEnvs(); + if (fs.existsSync(tempDir)) { + fs.rmSync(tempDir, { recursive: true }); + } + }); + + describe('File Filtering Configuration', () => { + it('should load default file filtering settings', async () => { + const configParams: ConfigParameters = { + cwd: '/tmp', + contentGeneratorConfig: TEST_CONTENT_GENERATOR_CONFIG, + embeddingModel: 'test-embedding-model', + sandbox: false, + targetDir: tempDir, + debugMode: false, + fileFilteringRespectGitIgnore: undefined, // Should default to true + }; + + const config = new Config(configParams); + + expect(config.getFileFilteringRespectGitIgnore()).toBe(true); + }); + + it('should load custom file filtering settings from configuration', async () => { + const configParams: ConfigParameters = { + cwd: '/tmp', + contentGeneratorConfig: TEST_CONTENT_GENERATOR_CONFIG, + embeddingModel: 'test-embedding-model', + sandbox: false, + targetDir: tempDir, + debugMode: false, + fileFiltering: { + respectGitIgnore: false, + }, + }; + + const config = new Config(configParams); + + expect(config.getFileFilteringRespectGitIgnore()).toBe(false); + }); + + it('should merge user and workspace file filtering settings', async () => { + const configParams: ConfigParameters = { + cwd: '/tmp', + contentGeneratorConfig: TEST_CONTENT_GENERATOR_CONFIG, + embeddingModel: 'test-embedding-model', + sandbox: false, + targetDir: tempDir, + debugMode: false, + fileFilteringRespectGitIgnore: true, + }; + + const config = new Config(configParams); + + expect(config.getFileFilteringRespectGitIgnore()).toBe(true); + }); + }); + + describe('Configuration Integration', () => { + it('should handle partial configuration objects gracefully', async () => { + const configParams: ConfigParameters = { + cwd: '/tmp', + contentGeneratorConfig: TEST_CONTENT_GENERATOR_CONFIG, + embeddingModel: 'test-embedding-model', + sandbox: false, + targetDir: tempDir, + debugMode: false, + fileFiltering: { + respectGitIgnore: false, + }, + }; + + const config = new Config(configParams); + + // Specified settings should be applied + expect(config.getFileFilteringRespectGitIgnore()).toBe(false); + }); + + it('should handle empty configuration objects gracefully', async () => { + const configParams: ConfigParameters = { + cwd: '/tmp', + contentGeneratorConfig: TEST_CONTENT_GENERATOR_CONFIG, + embeddingModel: 'test-embedding-model', + sandbox: false, + targetDir: tempDir, + debugMode: false, + fileFilteringRespectGitIgnore: undefined, + }; + + const config = new Config(configParams); + + // All settings should use defaults + expect(config.getFileFilteringRespectGitIgnore()).toBe(true); + }); + + it('should handle missing configuration sections gracefully', async () => { + const configParams: ConfigParameters = { + cwd: '/tmp', + contentGeneratorConfig: TEST_CONTENT_GENERATOR_CONFIG, + embeddingModel: 'test-embedding-model', + sandbox: false, + targetDir: tempDir, + debugMode: false, + // Missing fileFiltering configuration + }; + + const config = new Config(configParams); + + // All git-aware settings should use defaults + expect(config.getFileFilteringRespectGitIgnore()).toBe(true); + }); + }); + + describe('Real-world Configuration Scenarios', () => { + it('should handle a security-focused configuration', async () => { + const configParams: ConfigParameters = { + cwd: '/tmp', + contentGeneratorConfig: TEST_CONTENT_GENERATOR_CONFIG, + embeddingModel: 'test-embedding-model', + sandbox: false, + targetDir: tempDir, + debugMode: false, + fileFilteringRespectGitIgnore: true, + }; + + const config = new Config(configParams); + + expect(config.getFileFilteringRespectGitIgnore()).toBe(true); + }); + + it('should handle a CI/CD environment configuration', async () => { + const configParams: ConfigParameters = { + cwd: '/tmp', + contentGeneratorConfig: TEST_CONTENT_GENERATOR_CONFIG, + embeddingModel: 'test-embedding-model', + sandbox: false, + targetDir: tempDir, + debugMode: false, + fileFiltering: { + respectGitIgnore: false, + }, // CI might need to see all files + }; + + const config = new Config(configParams); + + expect(config.getFileFilteringRespectGitIgnore()).toBe(false); + }); + }); + + describe('Checkpointing Configuration', () => { + it('should enable checkpointing when the setting is true', async () => { + const configParams: ConfigParameters = { + cwd: '/tmp', + contentGeneratorConfig: TEST_CONTENT_GENERATOR_CONFIG, + embeddingModel: 'test-embedding-model', + sandbox: false, + targetDir: tempDir, + debugMode: false, + checkpointing: true, + }; + + const config = new Config(configParams); + + expect(config.getCheckpointingEnabled()).toBe(true); + }); + }); + + describe('Extension Context Files', () => { + it('should have an empty array for extension context files by default', () => { + const configParams: ConfigParameters = { + cwd: '/tmp', + contentGeneratorConfig: TEST_CONTENT_GENERATOR_CONFIG, + embeddingModel: 'test-embedding-model', + sandbox: false, + targetDir: tempDir, + debugMode: false, + }; + const config = new Config(configParams); + expect(config.getExtensionContextFilePaths()).toEqual([]); + }); + + it('should correctly store and return extension context file paths', () => { + const contextFiles = ['/path/to/file1.txt', '/path/to/file2.js']; + const configParams: ConfigParameters = { + cwd: '/tmp', + contentGeneratorConfig: TEST_CONTENT_GENERATOR_CONFIG, + embeddingModel: 'test-embedding-model', + sandbox: false, + targetDir: tempDir, + debugMode: false, + extensionContextFilePaths: contextFiles, + }; + const config = new Config(configParams); + expect(config.getExtensionContextFilePaths()).toEqual(contextFiles); + }); + }); + + describe('Approval Mode Integration Tests', () => { + let parseArguments: typeof import('./config').parseArguments; + + beforeEach(async () => { + // Import the argument parsing function for integration testing + const { parseArguments: parseArgs } = await import('./config'); + parseArguments = parseArgs; + }); + + it('should parse --approval-mode=auto_edit correctly through the full argument parsing flow', async () => { + const originalArgv = process.argv; + + try { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'auto_edit', + '-p', + 'test', + ]; + + const argv = await parseArguments({} as Settings); + + // Verify that the argument was parsed correctly + expect(argv.approvalMode).toBe('auto_edit'); + expect(argv.prompt).toBe('test'); + expect(argv.yolo).toBe(false); + } finally { + process.argv = originalArgv; + } + }); + + it('should parse --approval-mode=yolo correctly through the full argument parsing flow', async () => { + const originalArgv = process.argv; + + try { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'yolo', + '-p', + 'test', + ]; + + const argv = await parseArguments({} as Settings); + + expect(argv.approvalMode).toBe('yolo'); + expect(argv.prompt).toBe('test'); + expect(argv.yolo).toBe(false); // Should NOT be set when using --approval-mode + } finally { + process.argv = originalArgv; + } + }); + + it('should parse --approval-mode=default correctly through the full argument parsing flow', async () => { + const originalArgv = process.argv; + + try { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'default', + '-p', + 'test', + ]; + + const argv = await parseArguments({} as Settings); + + expect(argv.approvalMode).toBe('default'); + expect(argv.prompt).toBe('test'); + expect(argv.yolo).toBe(false); + } finally { + process.argv = originalArgv; + } + }); + + it('should parse legacy --yolo flag correctly', async () => { + const originalArgv = process.argv; + + try { + process.argv = ['node', 'script.js', '--yolo', '-p', 'test']; + + const argv = await parseArguments({} as Settings); + + expect(argv.yolo).toBe(true); + expect(argv.approvalMode).toBeUndefined(); // Should NOT be set when using --yolo + expect(argv.prompt).toBe('test'); + } finally { + process.argv = originalArgv; + } + }); + + it('should reject invalid approval mode values during argument parsing', async () => { + const originalArgv = process.argv; + + try { + process.argv = ['node', 'script.js', '--approval-mode', 'invalid_mode']; + + // Should throw during argument parsing due to yargs validation + await expect(parseArguments({} as Settings)).rejects.toThrow(); + } finally { + process.argv = originalArgv; + } + }); + + it('should reject conflicting --yolo and --approval-mode flags', async () => { + const originalArgv = process.argv; + + try { + process.argv = [ + 'node', + 'script.js', + '--yolo', + '--approval-mode', + 'default', + ]; + + // Should throw during argument parsing due to conflict validation + await expect(parseArguments({} as Settings)).rejects.toThrow(); + } finally { + process.argv = originalArgv; + } + }); + + it('should handle backward compatibility with mixed scenarios', async () => { + const originalArgv = process.argv; + + try { + // Test that no approval mode arguments defaults to no flags set + process.argv = ['node', 'script.js', '-p', 'test']; + + const argv = await parseArguments({} as Settings); + + expect(argv.approvalMode).toBeUndefined(); + expect(argv.yolo).toBe(false); + expect(argv.prompt).toBe('test'); + } finally { + process.argv = originalArgv; + } + }); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/config/config.test.ts b/projects/gemini-cli/packages/cli/src/config/config.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..79cf6d29c788c11f34245d88ad06f730e81f59b4 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/config.test.ts @@ -0,0 +1,2101 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + describe, + it, + expect, + vi, + beforeEach, + afterEach, + type Mock, +} from 'vitest'; +import * as os from 'node:os'; +import * as path from 'node:path'; +import { ShellTool, EditTool, WriteFileTool } from '@google/gemini-cli-core'; +import { loadCliConfig, parseArguments, type CliArgs } from './config.js'; +import type { Settings } from './settings.js'; +import type { Extension } from './extension.js'; +import * as ServerConfig from '@google/gemini-cli-core'; +import { isWorkspaceTrusted } from './trustedFolders.js'; + +vi.mock('./trustedFolders.js', () => ({ + isWorkspaceTrusted: vi.fn().mockReturnValue(true), // Default to trusted +})); + +vi.mock('fs', async (importOriginal) => { + const actualFs = await importOriginal(); + const pathMod = await import('node:path'); + const mockHome = '/mock/home/user'; + const MOCK_CWD1 = process.cwd(); + const MOCK_CWD2 = pathMod.resolve(pathMod.sep, 'home', 'user', 'project'); + + const mockPaths = new Set([ + MOCK_CWD1, + MOCK_CWD2, + pathMod.resolve(pathMod.sep, 'cli', 'path1'), + pathMod.resolve(pathMod.sep, 'settings', 'path1'), + pathMod.join(mockHome, 'settings', 'path2'), + pathMod.join(MOCK_CWD2, 'cli', 'path2'), + pathMod.join(MOCK_CWD2, 'settings', 'path3'), + ]); + + return { + ...actualFs, + mkdirSync: vi.fn(), + writeFileSync: vi.fn(), + existsSync: vi.fn((p) => mockPaths.has(p.toString())), + statSync: vi.fn((p) => { + if (mockPaths.has(p.toString())) { + return { isDirectory: () => true } as unknown as import('fs').Stats; + } + return (actualFs as typeof import('fs')).statSync(p as unknown as string); + }), + realpathSync: vi.fn((p) => p), + }; +}); + +vi.mock('os', async (importOriginal) => { + const actualOs = await importOriginal(); + return { + ...actualOs, + homedir: vi.fn(() => '/mock/home/user'), + }; +}); + +vi.mock('open', () => ({ + default: vi.fn(), +})); + +vi.mock('read-package-up', () => ({ + readPackageUp: vi.fn(() => + Promise.resolve({ packageJson: { version: 'test-version' } }), + ), +})); + +vi.mock('@google/gemini-cli-core', async () => { + const actualServer = await vi.importActual( + '@google/gemini-cli-core', + ); + return { + ...actualServer, + IdeClient: { + getInstance: vi.fn().mockResolvedValue({ + getConnectionStatus: vi.fn(), + initialize: vi.fn(), + shutdown: vi.fn(), + }), + }, + loadEnvironment: vi.fn(), + loadServerHierarchicalMemory: vi.fn( + (cwd, dirs, debug, fileService, extensionPaths, _maxDirs) => + Promise.resolve({ + memoryContent: extensionPaths?.join(',') || '', + fileCount: extensionPaths?.length || 0, + }), + ), + DEFAULT_MEMORY_FILE_FILTERING_OPTIONS: { + respectGitIgnore: false, + respectGeminiIgnore: true, + }, + DEFAULT_FILE_FILTERING_OPTIONS: { + respectGitIgnore: true, + respectGeminiIgnore: true, + }, + }; +}); + +describe('parseArguments', () => { + const originalArgv = process.argv; + + afterEach(() => { + process.argv = originalArgv; + }); + + it('should throw an error when both --prompt and --prompt-interactive are used together', async () => { + process.argv = [ + 'node', + 'script.js', + '--prompt', + 'test prompt', + '--prompt-interactive', + 'interactive prompt', + ]; + + const mockExit = vi.spyOn(process, 'exit').mockImplementation(() => { + throw new Error('process.exit called'); + }); + + const mockConsoleError = vi + .spyOn(console, 'error') + .mockImplementation(() => {}); + + await expect(parseArguments({} as Settings)).rejects.toThrow( + 'process.exit called', + ); + + expect(mockConsoleError).toHaveBeenCalledWith( + expect.stringContaining( + 'Cannot use both --prompt (-p) and --prompt-interactive (-i) together', + ), + ); + + mockExit.mockRestore(); + mockConsoleError.mockRestore(); + }); + + it('should throw an error when using short flags -p and -i together', async () => { + process.argv = [ + 'node', + 'script.js', + '-p', + 'test prompt', + '-i', + 'interactive prompt', + ]; + + const mockExit = vi.spyOn(process, 'exit').mockImplementation(() => { + throw new Error('process.exit called'); + }); + + const mockConsoleError = vi + .spyOn(console, 'error') + .mockImplementation(() => {}); + + await expect(parseArguments({} as Settings)).rejects.toThrow( + 'process.exit called', + ); + + expect(mockConsoleError).toHaveBeenCalledWith( + expect.stringContaining( + 'Cannot use both --prompt (-p) and --prompt-interactive (-i) together', + ), + ); + + mockExit.mockRestore(); + mockConsoleError.mockRestore(); + }); + + it('should allow --prompt without --prompt-interactive', async () => { + process.argv = ['node', 'script.js', '--prompt', 'test prompt']; + const argv = await parseArguments({} as Settings); + expect(argv.prompt).toBe('test prompt'); + expect(argv.promptInteractive).toBeUndefined(); + }); + + it('should allow --prompt-interactive without --prompt', async () => { + process.argv = [ + 'node', + 'script.js', + '--prompt-interactive', + 'interactive prompt', + ]; + const argv = await parseArguments({} as Settings); + expect(argv.promptInteractive).toBe('interactive prompt'); + expect(argv.prompt).toBeUndefined(); + }); + + it('should allow -i flag as alias for --prompt-interactive', async () => { + process.argv = ['node', 'script.js', '-i', 'interactive prompt']; + const argv = await parseArguments({} as Settings); + expect(argv.promptInteractive).toBe('interactive prompt'); + expect(argv.prompt).toBeUndefined(); + }); + + it('should throw an error when both --yolo and --approval-mode are used together', async () => { + process.argv = [ + 'node', + 'script.js', + '--yolo', + '--approval-mode', + 'default', + ]; + + const mockExit = vi.spyOn(process, 'exit').mockImplementation(() => { + throw new Error('process.exit called'); + }); + + const mockConsoleError = vi + .spyOn(console, 'error') + .mockImplementation(() => {}); + + await expect(parseArguments({} as Settings)).rejects.toThrow( + 'process.exit called', + ); + + expect(mockConsoleError).toHaveBeenCalledWith( + expect.stringContaining( + 'Cannot use both --yolo (-y) and --approval-mode together. Use --approval-mode=yolo instead.', + ), + ); + + mockExit.mockRestore(); + mockConsoleError.mockRestore(); + }); + + it('should throw an error when using short flags -y and --approval-mode together', async () => { + process.argv = ['node', 'script.js', '-y', '--approval-mode', 'yolo']; + + const mockExit = vi.spyOn(process, 'exit').mockImplementation(() => { + throw new Error('process.exit called'); + }); + + const mockConsoleError = vi + .spyOn(console, 'error') + .mockImplementation(() => {}); + + await expect(parseArguments({} as Settings)).rejects.toThrow( + 'process.exit called', + ); + + expect(mockConsoleError).toHaveBeenCalledWith( + expect.stringContaining( + 'Cannot use both --yolo (-y) and --approval-mode together. Use --approval-mode=yolo instead.', + ), + ); + + mockExit.mockRestore(); + mockConsoleError.mockRestore(); + }); + + it('should allow --approval-mode without --yolo', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'auto_edit']; + const argv = await parseArguments({} as Settings); + expect(argv.approvalMode).toBe('auto_edit'); + expect(argv.yolo).toBe(false); + }); + + it('should allow --yolo without --approval-mode', async () => { + process.argv = ['node', 'script.js', '--yolo']; + const argv = await parseArguments({} as Settings); + expect(argv.yolo).toBe(true); + expect(argv.approvalMode).toBeUndefined(); + }); + + it('should reject invalid --approval-mode values', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'invalid']; + + const mockExit = vi.spyOn(process, 'exit').mockImplementation(() => { + throw new Error('process.exit called'); + }); + + const mockConsoleError = vi + .spyOn(console, 'error') + .mockImplementation(() => {}); + + await expect(parseArguments({} as Settings)).rejects.toThrow( + 'process.exit called', + ); + + expect(mockConsoleError).toHaveBeenCalledWith( + expect.stringContaining('Invalid values:'), + ); + + mockExit.mockRestore(); + mockConsoleError.mockRestore(); + }); +}); + +describe('loadCliConfig', () => { + const originalArgv = process.argv; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + vi.stubEnv('GEMINI_API_KEY', 'test-api-key'); + }); + + afterEach(() => { + process.argv = originalArgv; + vi.unstubAllEnvs(); + vi.restoreAllMocks(); + }); + + it('should set showMemoryUsage to true when --show-memory-usage flag is present', async () => { + process.argv = ['node', 'script.js', '--show-memory-usage']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getShowMemoryUsage()).toBe(true); + }); + + it('should set showMemoryUsage to false when --memory flag is not present', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getShowMemoryUsage()).toBe(false); + }); + + it('should set showMemoryUsage to false by default from settings if CLI flag is not present', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { ui: { showMemoryUsage: false } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getShowMemoryUsage()).toBe(false); + }); + + it('should prioritize CLI flag over settings for showMemoryUsage (CLI true, settings false)', async () => { + process.argv = ['node', 'script.js', '--show-memory-usage']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { ui: { showMemoryUsage: false } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getShowMemoryUsage()).toBe(true); + }); + + describe('Proxy configuration', () => { + const originalProxyEnv: { [key: string]: string | undefined } = {}; + const proxyEnvVars = [ + 'HTTP_PROXY', + 'HTTPS_PROXY', + 'http_proxy', + 'https_proxy', + ]; + + beforeEach(() => { + for (const key of proxyEnvVars) { + originalProxyEnv[key] = process.env[key]; + delete process.env[key]; + } + }); + + afterEach(() => { + for (const key of proxyEnvVars) { + if (originalProxyEnv[key]) { + process.env[key] = originalProxyEnv[key]; + } else { + delete process.env[key]; + } + } + }); + + it(`should leave proxy to empty by default`, async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getProxy()).toBeFalsy(); + }); + + const proxy_url = 'http://localhost:7890'; + const testCases = [ + { + input: { + env_name: 'https_proxy', + proxy_url, + }, + expected: proxy_url, + }, + { + input: { + env_name: 'http_proxy', + proxy_url, + }, + expected: proxy_url, + }, + { + input: { + env_name: 'HTTPS_PROXY', + proxy_url, + }, + expected: proxy_url, + }, + { + input: { + env_name: 'HTTP_PROXY', + proxy_url, + }, + expected: proxy_url, + }, + ]; + testCases.forEach(({ input, expected }) => { + it(`should set proxy to ${expected} according to environment variable [${input.env_name}]`, async () => { + vi.stubEnv(input.env_name, input.proxy_url); + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getProxy()).toBe(expected); + }); + }); + + it('should set proxy when --proxy flag is present', async () => { + process.argv = ['node', 'script.js', '--proxy', 'http://localhost:7890']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getProxy()).toBe('http://localhost:7890'); + }); + + it('should prioritize CLI flag over environment variable for proxy (CLI http://localhost:7890, environment variable http://localhost:7891)', async () => { + vi.stubEnv('http_proxy', 'http://localhost:7891'); + process.argv = ['node', 'script.js', '--proxy', 'http://localhost:7890']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getProxy()).toBe('http://localhost:7890'); + }); + }); +}); + +describe('loadCliConfig telemetry', () => { + const originalArgv = process.argv; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + vi.stubEnv('GEMINI_API_KEY', 'test-api-key'); + }); + + afterEach(() => { + process.argv = originalArgv; + vi.unstubAllEnvs(); + vi.restoreAllMocks(); + }); + + it('should set telemetry to false by default when no flag or setting is present', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryEnabled()).toBe(false); + }); + + it('should set telemetry to true when --telemetry flag is present', async () => { + process.argv = ['node', 'script.js', '--telemetry']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryEnabled()).toBe(true); + }); + + it('should set telemetry to false when --no-telemetry flag is present', async () => { + process.argv = ['node', 'script.js', '--no-telemetry']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryEnabled()).toBe(false); + }); + + it('should use telemetry value from settings if CLI flag is not present (settings true)', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { telemetry: { enabled: true } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryEnabled()).toBe(true); + }); + + it('should use telemetry value from settings if CLI flag is not present (settings false)', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { telemetry: { enabled: false } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryEnabled()).toBe(false); + }); + + it('should prioritize --telemetry CLI flag (true) over settings (false)', async () => { + process.argv = ['node', 'script.js', '--telemetry']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { telemetry: { enabled: false } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryEnabled()).toBe(true); + }); + + it('should prioritize --no-telemetry CLI flag (false) over settings (true)', async () => { + process.argv = ['node', 'script.js', '--no-telemetry']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { telemetry: { enabled: true } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryEnabled()).toBe(false); + }); + + it('should use telemetry OTLP endpoint from settings if CLI flag is not present', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + telemetry: { otlpEndpoint: 'http://settings.example.com' }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryOtlpEndpoint()).toBe( + 'http://settings.example.com', + ); + }); + + it('should prioritize --telemetry-otlp-endpoint CLI flag over settings', async () => { + process.argv = [ + 'node', + 'script.js', + '--telemetry-otlp-endpoint', + 'http://cli.example.com', + ]; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + telemetry: { otlpEndpoint: 'http://settings.example.com' }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryOtlpEndpoint()).toBe('http://cli.example.com'); + }); + + it('should use default endpoint if no OTLP endpoint is provided via CLI or settings', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { telemetry: { enabled: true } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryOtlpEndpoint()).toBe('http://localhost:4317'); + }); + + it('should use telemetry target from settings if CLI flag is not present', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + telemetry: { target: ServerConfig.DEFAULT_TELEMETRY_TARGET }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryTarget()).toBe( + ServerConfig.DEFAULT_TELEMETRY_TARGET, + ); + }); + + it('should prioritize --telemetry-target CLI flag over settings', async () => { + process.argv = ['node', 'script.js', '--telemetry-target', 'gcp']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + telemetry: { target: ServerConfig.DEFAULT_TELEMETRY_TARGET }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryTarget()).toBe('gcp'); + }); + + it('should use default target if no target is provided via CLI or settings', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { telemetry: { enabled: true } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryTarget()).toBe( + ServerConfig.DEFAULT_TELEMETRY_TARGET, + ); + }); + + it('should use telemetry log prompts from settings if CLI flag is not present', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { telemetry: { logPrompts: false } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryLogPromptsEnabled()).toBe(false); + }); + + it('should prioritize --telemetry-log-prompts CLI flag (true) over settings (false)', async () => { + process.argv = ['node', 'script.js', '--telemetry-log-prompts']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { telemetry: { logPrompts: false } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryLogPromptsEnabled()).toBe(true); + }); + + it('should prioritize --no-telemetry-log-prompts CLI flag (false) over settings (true)', async () => { + process.argv = ['node', 'script.js', '--no-telemetry-log-prompts']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { telemetry: { logPrompts: true } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryLogPromptsEnabled()).toBe(false); + }); + + it('should use default log prompts (true) if no value is provided via CLI or settings', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { telemetry: { enabled: true } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryLogPromptsEnabled()).toBe(true); + }); + + it('should use telemetry OTLP protocol from settings if CLI flag is not present', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + telemetry: { otlpProtocol: 'http' }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryOtlpProtocol()).toBe('http'); + }); + + it('should prioritize --telemetry-otlp-protocol CLI flag over settings', async () => { + process.argv = ['node', 'script.js', '--telemetry-otlp-protocol', 'http']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + telemetry: { otlpProtocol: 'grpc' }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryOtlpProtocol()).toBe('http'); + }); + + it('should use default protocol if no OTLP protocol is provided via CLI or settings', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { telemetry: { enabled: true } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getTelemetryOtlpProtocol()).toBe('grpc'); + }); + + it('should reject invalid --telemetry-otlp-protocol values', async () => { + process.argv = [ + 'node', + 'script.js', + '--telemetry-otlp-protocol', + 'invalid', + ]; + + const mockExit = vi.spyOn(process, 'exit').mockImplementation(() => { + throw new Error('process.exit called'); + }); + + const mockConsoleError = vi + .spyOn(console, 'error') + .mockImplementation(() => {}); + + await expect(parseArguments({} as Settings)).rejects.toThrow( + 'process.exit called', + ); + + expect(mockConsoleError).toHaveBeenCalledWith( + expect.stringContaining('Invalid values:'), + ); + + mockExit.mockRestore(); + mockConsoleError.mockRestore(); + }); +}); + +describe('Hierarchical Memory Loading (config.ts) - Placeholder Suite', () => { + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + // Other common mocks would be reset here. + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should pass extension context file paths to loadServerHierarchicalMemory', async () => { + process.argv = ['node', 'script.js']; + const settings: Settings = {}; + const extensions: Extension[] = [ + { + path: '/path/to/ext1', + config: { + name: 'ext1', + version: '1.0.0', + }, + contextFiles: ['/path/to/ext1/GEMINI.md'], + }, + { + path: '/path/to/ext2', + config: { + name: 'ext2', + version: '1.0.0', + }, + contextFiles: [], + }, + { + path: '/path/to/ext3', + config: { + name: 'ext3', + version: '1.0.0', + }, + contextFiles: [ + '/path/to/ext3/context1.md', + '/path/to/ext3/context2.md', + ], + }, + ]; + const argv = await parseArguments({} as Settings); + await loadCliConfig(settings, extensions, 'session-id', argv); + expect(ServerConfig.loadServerHierarchicalMemory).toHaveBeenCalledWith( + expect.any(String), + [], + false, + expect.any(Object), + [ + '/path/to/ext1/GEMINI.md', + '/path/to/ext3/context1.md', + '/path/to/ext3/context2.md', + ], + true, + 'tree', + { + respectGitIgnore: false, + respectGeminiIgnore: true, + }, + undefined, // maxDirs + ); + }); + + // NOTE TO FUTURE DEVELOPERS: + // To re-enable tests for loadHierarchicalGeminiMemory, ensure that: + // 1. os.homedir() is reliably mocked *before* the config.ts module is loaded + // and its functions (which use os.homedir()) are called. + // 2. fs/promises and fs mocks correctly simulate file/directory existence, + // readability, and content based on paths derived from the mocked os.homedir(). + // 3. Spies on console functions (for logger output) are correctly set up if needed. + // Example of a previously failing test structure: + it.skip('should correctly use mocked homedir for global path', async () => { + const MOCK_GEMINI_DIR_LOCAL = path.join('/mock/home/user', '.gemini'); + const MOCK_GLOBAL_PATH_LOCAL = path.join( + MOCK_GEMINI_DIR_LOCAL, + 'GEMINI.md', + ); + mockFs({ + [MOCK_GLOBAL_PATH_LOCAL]: { type: 'file', content: 'GlobalContentOnly' }, + }); + const memory = await loadHierarchicalGeminiMemory('/some/other/cwd', false); + expect(memory).toBe('GlobalContentOnly'); + expect(vi.mocked(os.homedir)).toHaveBeenCalled(); + expect(fsPromises.readFile).toHaveBeenCalledWith( + MOCK_GLOBAL_PATH_LOCAL, + 'utf-8', + ); + }); +}); + +describe('mergeMcpServers', () => { + it('should not modify the original settings object', async () => { + const settings: Settings = { + mcpServers: { + 'test-server': { + url: 'http://localhost:8080', + }, + }, + }; + const extensions: Extension[] = [ + { + path: '/path/to/ext1', + config: { + name: 'ext1', + version: '1.0.0', + mcpServers: { + 'ext1-server': { + url: 'http://localhost:8081', + }, + }, + }, + contextFiles: [], + }, + ]; + const originalSettings = JSON.parse(JSON.stringify(settings)); + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + await loadCliConfig(settings, extensions, 'test-session', argv); + expect(settings).toEqual(originalSettings); + }); +}); + +describe('mergeExcludeTools', () => { + const defaultExcludes = [ShellTool.Name, EditTool.Name, WriteFileTool.Name]; + const originalIsTTY = process.stdin.isTTY; + + beforeEach(() => { + process.stdin.isTTY = true; + }); + + afterEach(() => { + process.stdin.isTTY = originalIsTTY; + }); + + it('should merge excludeTools from settings and extensions', async () => { + const settings: Settings = { tools: { exclude: ['tool1', 'tool2'] } }; + const extensions: Extension[] = [ + { + path: '/path/to/ext1', + config: { + name: 'ext1', + version: '1.0.0', + excludeTools: ['tool3', 'tool4'], + }, + contextFiles: [], + }, + { + path: '/path/to/ext2', + config: { + name: 'ext2', + version: '1.0.0', + excludeTools: ['tool5'], + }, + contextFiles: [], + }, + ]; + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + expect(config.getExcludeTools()).toEqual( + expect.arrayContaining(['tool1', 'tool2', 'tool3', 'tool4', 'tool5']), + ); + expect(config.getExcludeTools()).toHaveLength(5); + }); + + it('should handle overlapping excludeTools between settings and extensions', async () => { + const settings: Settings = { tools: { exclude: ['tool1', 'tool2'] } }; + const extensions: Extension[] = [ + { + path: '/path/to/ext1', + config: { + name: 'ext1', + version: '1.0.0', + excludeTools: ['tool2', 'tool3'], + }, + contextFiles: [], + }, + ]; + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + expect(config.getExcludeTools()).toEqual( + expect.arrayContaining(['tool1', 'tool2', 'tool3']), + ); + expect(config.getExcludeTools()).toHaveLength(3); + }); + + it('should handle overlapping excludeTools between extensions', async () => { + const settings: Settings = { tools: { exclude: ['tool1'] } }; + const extensions: Extension[] = [ + { + path: '/path/to/ext1', + config: { + name: 'ext1', + version: '1.0.0', + excludeTools: ['tool2', 'tool3'], + }, + contextFiles: [], + }, + { + path: '/path/to/ext2', + config: { + name: 'ext2', + version: '1.0.0', + excludeTools: ['tool3', 'tool4'], + }, + contextFiles: [], + }, + ]; + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + expect(config.getExcludeTools()).toEqual( + expect.arrayContaining(['tool1', 'tool2', 'tool3', 'tool4']), + ); + expect(config.getExcludeTools()).toHaveLength(4); + }); + + it('should return an empty array when no excludeTools are specified and it is interactive', async () => { + process.stdin.isTTY = true; + const settings: Settings = {}; + const extensions: Extension[] = []; + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + expect(config.getExcludeTools()).toEqual([]); + }); + + it('should return default excludes when no excludeTools are specified and it is not interactive', async () => { + process.stdin.isTTY = false; + const settings: Settings = {}; + const extensions: Extension[] = []; + process.argv = ['node', 'script.js', '-p', 'test']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + expect(config.getExcludeTools()).toEqual(defaultExcludes); + }); + + it('should handle settings with excludeTools but no extensions', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { tools: { exclude: ['tool1', 'tool2'] } }; + const extensions: Extension[] = []; + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + expect(config.getExcludeTools()).toEqual( + expect.arrayContaining(['tool1', 'tool2']), + ); + expect(config.getExcludeTools()).toHaveLength(2); + }); + + it('should handle extensions with excludeTools but no settings', async () => { + const settings: Settings = {}; + const extensions: Extension[] = [ + { + path: '/path/to/ext', + config: { + name: 'ext1', + version: '1.0.0', + excludeTools: ['tool1', 'tool2'], + }, + contextFiles: [], + }, + ]; + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + expect(config.getExcludeTools()).toEqual( + expect.arrayContaining(['tool1', 'tool2']), + ); + expect(config.getExcludeTools()).toHaveLength(2); + }); + + it('should not modify the original settings object', async () => { + const settings: Settings = { tools: { exclude: ['tool1'] } }; + const extensions: Extension[] = [ + { + path: '/path/to/ext', + config: { + name: 'ext1', + version: '1.0.0', + excludeTools: ['tool2'], + }, + contextFiles: [], + }, + ]; + const originalSettings = JSON.parse(JSON.stringify(settings)); + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + await loadCliConfig(settings, extensions, 'test-session', argv); + expect(settings).toEqual(originalSettings); + }); +}); + +describe('Approval mode tool exclusion logic', () => { + const originalIsTTY = process.stdin.isTTY; + + beforeEach(() => { + process.stdin.isTTY = false; // Ensure non-interactive mode + vi.mocked(isWorkspaceTrusted).mockReturnValue(true); + }); + + afterEach(() => { + process.stdin.isTTY = originalIsTTY; + }); + + it('should exclude all interactive tools in non-interactive mode with default approval mode', async () => { + process.argv = ['node', 'script.js', '-p', 'test']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).toContain(ShellTool.Name); + expect(excludedTools).toContain(EditTool.Name); + expect(excludedTools).toContain(WriteFileTool.Name); + }); + + it('should exclude all interactive tools in non-interactive mode with explicit default approval mode', async () => { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'default', + '-p', + 'test', + ]; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).toContain(ShellTool.Name); + expect(excludedTools).toContain(EditTool.Name); + expect(excludedTools).toContain(WriteFileTool.Name); + }); + + it('should exclude only shell tools in non-interactive mode with auto_edit approval mode', async () => { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'auto_edit', + '-p', + 'test', + ]; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).toContain(ShellTool.Name); + expect(excludedTools).not.toContain(EditTool.Name); + expect(excludedTools).not.toContain(WriteFileTool.Name); + }); + + it('should exclude no interactive tools in non-interactive mode with yolo approval mode', async () => { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'yolo', + '-p', + 'test', + ]; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).not.toContain(ShellTool.Name); + expect(excludedTools).not.toContain(EditTool.Name); + expect(excludedTools).not.toContain(WriteFileTool.Name); + }); + + it('should exclude no interactive tools in non-interactive mode with legacy yolo flag', async () => { + process.argv = ['node', 'script.js', '--yolo', '-p', 'test']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).not.toContain(ShellTool.Name); + expect(excludedTools).not.toContain(EditTool.Name); + expect(excludedTools).not.toContain(WriteFileTool.Name); + }); + + it('should not exclude interactive tools in interactive mode regardless of approval mode', async () => { + process.stdin.isTTY = true; // Interactive mode + + const testCases = [ + { args: ['node', 'script.js'] }, // default + { args: ['node', 'script.js', '--approval-mode', 'default'] }, + { args: ['node', 'script.js', '--approval-mode', 'auto_edit'] }, + { args: ['node', 'script.js', '--approval-mode', 'yolo'] }, + { args: ['node', 'script.js', '--yolo'] }, + ]; + + for (const testCase of testCases) { + process.argv = testCase.args; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).not.toContain(ShellTool.Name); + expect(excludedTools).not.toContain(EditTool.Name); + expect(excludedTools).not.toContain(WriteFileTool.Name); + } + }); + + it('should merge approval mode exclusions with settings exclusions in auto_edit mode', async () => { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'auto_edit', + '-p', + 'test', + ]; + const argv = await parseArguments({} as Settings); + const settings: Settings = { tools: { exclude: ['custom_tool'] } }; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).toContain('custom_tool'); // From settings + expect(excludedTools).toContain(ShellTool.Name); // From approval mode + expect(excludedTools).not.toContain(EditTool.Name); // Should be allowed in auto_edit + expect(excludedTools).not.toContain(WriteFileTool.Name); // Should be allowed in auto_edit + }); + + it('should throw an error for invalid approval mode values in loadCliConfig', async () => { + // Create a mock argv with an invalid approval mode that bypasses argument parsing validation + const invalidArgv: Partial & { approvalMode: string } = { + approvalMode: 'invalid_mode', + promptInteractive: '', + prompt: '', + yolo: false, + }; + + const settings: Settings = {}; + const extensions: Extension[] = []; + + await expect( + loadCliConfig( + settings, + extensions, + 'test-session', + invalidArgv as CliArgs, + ), + ).rejects.toThrow( + 'Invalid approval mode: invalid_mode. Valid values are: yolo, auto_edit, default', + ); + }); +}); + +describe('loadCliConfig with allowed-mcp-server-names', () => { + const originalArgv = process.argv; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + vi.stubEnv('GEMINI_API_KEY', 'test-api-key'); + }); + + afterEach(() => { + process.argv = originalArgv; + vi.unstubAllEnvs(); + vi.restoreAllMocks(); + }); + + const baseSettings: Settings = { + mcpServers: { + server1: { url: 'http://localhost:8080' }, + server2: { url: 'http://localhost:8081' }, + server3: { url: 'http://localhost:8082' }, + }, + }; + + it('should allow all MCP servers if the flag is not provided', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig(baseSettings, [], 'test-session', argv); + expect(config.getMcpServers()).toEqual(baseSettings.mcpServers); + }); + + it('should allow only the specified MCP server', async () => { + process.argv = [ + 'node', + 'script.js', + '--allowed-mcp-server-names', + 'server1', + ]; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig(baseSettings, [], 'test-session', argv); + expect(config.getMcpServers()).toEqual({ + server1: { url: 'http://localhost:8080' }, + }); + }); + + it('should allow multiple specified MCP servers', async () => { + process.argv = [ + 'node', + 'script.js', + '--allowed-mcp-server-names', + 'server1', + '--allowed-mcp-server-names', + 'server3', + ]; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig(baseSettings, [], 'test-session', argv); + expect(config.getMcpServers()).toEqual({ + server1: { url: 'http://localhost:8080' }, + server3: { url: 'http://localhost:8082' }, + }); + }); + + it('should handle server names that do not exist', async () => { + process.argv = [ + 'node', + 'script.js', + '--allowed-mcp-server-names', + 'server1', + '--allowed-mcp-server-names', + 'server4', + ]; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig(baseSettings, [], 'test-session', argv); + expect(config.getMcpServers()).toEqual({ + server1: { url: 'http://localhost:8080' }, + }); + }); + + it('should allow no MCP servers if the flag is provided but empty', async () => { + process.argv = ['node', 'script.js', '--allowed-mcp-server-names', '']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig(baseSettings, [], 'test-session', argv); + expect(config.getMcpServers()).toEqual({}); + }); + + it('should read allowMCPServers from settings', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + ...baseSettings, + mcp: { allowed: ['server1', 'server2'] }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getMcpServers()).toEqual({ + server1: { url: 'http://localhost:8080' }, + server2: { url: 'http://localhost:8081' }, + }); + }); + + it('should read excludeMCPServers from settings', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + ...baseSettings, + mcp: { excluded: ['server1', 'server2'] }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getMcpServers()).toEqual({ + server3: { url: 'http://localhost:8082' }, + }); + }); + + it('should override allowMCPServers with excludeMCPServers if overlapping', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + ...baseSettings, + mcp: { + excluded: ['server1'], + allowed: ['server1', 'server2'], + }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getMcpServers()).toEqual({ + server2: { url: 'http://localhost:8081' }, + }); + }); + + it('should prioritize mcp server flag if set', async () => { + process.argv = [ + 'node', + 'script.js', + '--allowed-mcp-server-names', + 'server1', + ]; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + ...baseSettings, + mcp: { + excluded: ['server1'], + allowed: ['server2'], + }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getMcpServers()).toEqual({ + server1: { url: 'http://localhost:8080' }, + }); + }); + + it('should prioritize CLI flag over both allowed and excluded settings', async () => { + process.argv = [ + 'node', + 'script.js', + '--allowed-mcp-server-names', + 'server2', + '--allowed-mcp-server-names', + 'server3', + ]; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + ...baseSettings, + mcp: { + allowed: ['server1', 'server2'], // Should be ignored + excluded: ['server3'], // Should be ignored + }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getMcpServers()).toEqual({ + server2: { url: 'http://localhost:8081' }, + server3: { url: 'http://localhost:8082' }, + }); + }); +}); + +describe('loadCliConfig extensions', () => { + const mockExtensions: Extension[] = [ + { + path: '/path/to/ext1', + config: { name: 'ext1', version: '1.0.0' }, + contextFiles: ['/path/to/ext1.md'], + }, + { + path: '/path/to/ext2', + config: { name: 'ext2', version: '1.0.0' }, + contextFiles: ['/path/to/ext2.md'], + }, + ]; + + it('should not filter extensions if --extensions flag is not used', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const config = await loadCliConfig( + settings, + mockExtensions, + 'test-session', + argv, + ); + expect(config.getExtensionContextFilePaths()).toEqual([ + '/path/to/ext1.md', + '/path/to/ext2.md', + ]); + }); + + it('should filter extensions if --extensions flag is used', async () => { + process.argv = ['node', 'script.js', '--extensions', 'ext1']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const config = await loadCliConfig( + settings, + mockExtensions, + 'test-session', + argv, + ); + expect(config.getExtensionContextFilePaths()).toEqual(['/path/to/ext1.md']); + }); +}); + +describe('loadCliConfig model selection', () => { + it('selects a model from settings.json if provided', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig( + { + model: { + name: 'gemini-9001-ultra', + }, + }, + [], + 'test-session', + argv, + ); + + expect(config.getModel()).toBe('gemini-9001-ultra'); + }); + + it('uses the default gemini model if nothing is set', async () => { + process.argv = ['node', 'script.js']; // No model set. + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig( + { + // No model set. + }, + [], + 'test-session', + argv, + ); + + expect(config.getModel()).toBe('gemini-2.5-pro'); + }); + + it('always prefers model from argvs', async () => { + process.argv = ['node', 'script.js', '--model', 'gemini-8675309-ultra']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig( + { + model: { + name: 'gemini-9001-ultra', + }, + }, + [], + 'test-session', + argv, + ); + + expect(config.getModel()).toBe('gemini-8675309-ultra'); + }); + + it('selects the model from argvs if provided', async () => { + process.argv = ['node', 'script.js', '--model', 'gemini-8675309-ultra']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig( + { + // No model provided via settings. + }, + [], + 'test-session', + argv, + ); + + expect(config.getModel()).toBe('gemini-8675309-ultra'); + }); +}); + +describe('loadCliConfig folderTrustFeature', () => { + const originalArgv = process.argv; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + vi.stubEnv('GEMINI_API_KEY', 'test-api-key'); + }); + + afterEach(() => { + process.argv = originalArgv; + vi.unstubAllEnvs(); + vi.restoreAllMocks(); + }); + + it('should be false by default', async () => { + process.argv = ['node', 'script.js']; + const settings: Settings = {}; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getFolderTrustFeature()).toBe(false); + }); + + it('should be true when settings.folderTrustFeature is true', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + security: { folderTrust: { featureEnabled: true } }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getFolderTrustFeature()).toBe(true); + }); +}); + +describe('loadCliConfig folderTrust', () => { + const originalArgv = process.argv; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + vi.stubEnv('GEMINI_API_KEY', 'test-api-key'); + }); + + afterEach(() => { + process.argv = originalArgv; + vi.unstubAllEnvs(); + vi.restoreAllMocks(); + }); + + it('should be false if folderTrustFeature is false and folderTrust is false', async () => { + process.argv = ['node', 'script.js']; + const settings: Settings = { + security: { + folderTrust: { + featureEnabled: false, + enabled: false, + }, + }, + }; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getFolderTrust()).toBe(false); + }); + + it('should be false if folderTrustFeature is true and folderTrust is false', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + security: { + folderTrust: { + featureEnabled: true, + enabled: false, + }, + }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getFolderTrust()).toBe(false); + }); + + it('should be false if folderTrustFeature is false and folderTrust is true', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + security: { + folderTrust: { + featureEnabled: false, + enabled: true, + }, + }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getFolderTrust()).toBe(false); + }); + + it('should be true when folderTrustFeature is true and folderTrust is true', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + security: { + folderTrust: { + featureEnabled: true, + enabled: true, + }, + }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getFolderTrust()).toBe(true); + }); +}); + +describe('loadCliConfig with includeDirectories', () => { + const originalArgv = process.argv; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + vi.stubEnv('GEMINI_API_KEY', 'test-api-key'); + vi.spyOn(process, 'cwd').mockReturnValue( + path.resolve(path.sep, 'home', 'user', 'project'), + ); + }); + + afterEach(() => { + process.argv = originalArgv; + vi.unstubAllEnvs(); + vi.restoreAllMocks(); + }); + + it('should combine and resolve paths from settings and CLI arguments', async () => { + const mockCwd = path.resolve(path.sep, 'home', 'user', 'project'); + process.argv = [ + 'node', + 'script.js', + '--include-directories', + `${path.resolve(path.sep, 'cli', 'path1')},${path.join(mockCwd, 'cli', 'path2')}`, + ]; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + context: { + includeDirectories: [ + path.resolve(path.sep, 'settings', 'path1'), + path.join(os.homedir(), 'settings', 'path2'), + path.join(mockCwd, 'settings', 'path3'), + ], + }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + const expected = [ + mockCwd, + path.resolve(path.sep, 'cli', 'path1'), + path.join(mockCwd, 'cli', 'path2'), + path.resolve(path.sep, 'settings', 'path1'), + path.join(os.homedir(), 'settings', 'path2'), + path.join(mockCwd, 'settings', 'path3'), + ]; + expect(config.getWorkspaceContext().getDirectories()).toEqual( + expect.arrayContaining(expected), + ); + expect(config.getWorkspaceContext().getDirectories()).toHaveLength( + expected.length, + ); + }); +}); + +describe('loadCliConfig chatCompression', () => { + const originalArgv = process.argv; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + vi.stubEnv('GEMINI_API_KEY', 'test-api-key'); + }); + + afterEach(() => { + process.argv = originalArgv; + vi.unstubAllEnvs(); + vi.restoreAllMocks(); + }); + + it('should pass chatCompression settings to the core config', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { + model: { + chatCompression: { + contextPercentageThreshold: 0.5, + }, + }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getChatCompression()).toEqual({ + contextPercentageThreshold: 0.5, + }); + }); + + it('should have undefined chatCompression if not in settings', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getChatCompression()).toBeUndefined(); + }); +}); + +describe('loadCliConfig useRipgrep', () => { + const originalArgv = process.argv; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + vi.stubEnv('GEMINI_API_KEY', 'test-api-key'); + }); + + afterEach(() => { + process.argv = originalArgv; + vi.unstubAllEnvs(); + vi.restoreAllMocks(); + }); + + it('should be false by default when useRipgrep is not set in settings', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getUseRipgrep()).toBe(false); + }); + + it('should be true when useRipgrep is set to true in settings', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { tools: { useRipgrep: true } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getUseRipgrep()).toBe(true); + }); + + it('should be false when useRipgrep is explicitly set to false in settings', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const settings: Settings = { tools: { useRipgrep: false } }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getUseRipgrep()).toBe(false); + }); +}); + +describe('loadCliConfig tool exclusions', () => { + const originalArgv = process.argv; + const originalIsTTY = process.stdin.isTTY; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + vi.stubEnv('GEMINI_API_KEY', 'test-api-key'); + process.stdin.isTTY = true; + vi.mocked(isWorkspaceTrusted).mockReturnValue(true); + }); + + afterEach(() => { + process.argv = originalArgv; + process.stdin.isTTY = originalIsTTY; + vi.unstubAllEnvs(); + vi.restoreAllMocks(); + }); + + it('should not exclude interactive tools in interactive mode without YOLO', async () => { + process.stdin.isTTY = true; + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getExcludeTools()).not.toContain('run_shell_command'); + expect(config.getExcludeTools()).not.toContain('replace'); + expect(config.getExcludeTools()).not.toContain('write_file'); + }); + + it('should not exclude interactive tools in interactive mode with YOLO', async () => { + process.stdin.isTTY = true; + process.argv = ['node', 'script.js', '--yolo']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getExcludeTools()).not.toContain('run_shell_command'); + expect(config.getExcludeTools()).not.toContain('replace'); + expect(config.getExcludeTools()).not.toContain('write_file'); + }); + + it('should exclude interactive tools in non-interactive mode without YOLO', async () => { + process.stdin.isTTY = false; + process.argv = ['node', 'script.js', '-p', 'test']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getExcludeTools()).toContain('run_shell_command'); + expect(config.getExcludeTools()).toContain('replace'); + expect(config.getExcludeTools()).toContain('write_file'); + }); + + it('should not exclude interactive tools in non-interactive mode with YOLO', async () => { + process.stdin.isTTY = false; + process.argv = ['node', 'script.js', '-p', 'test', '--yolo']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getExcludeTools()).not.toContain('run_shell_command'); + expect(config.getExcludeTools()).not.toContain('replace'); + expect(config.getExcludeTools()).not.toContain('write_file'); + }); +}); + +describe('loadCliConfig interactive', () => { + const originalArgv = process.argv; + const originalIsTTY = process.stdin.isTTY; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + vi.stubEnv('GEMINI_API_KEY', 'test-api-key'); + process.stdin.isTTY = true; + }); + + afterEach(() => { + process.argv = originalArgv; + process.stdin.isTTY = originalIsTTY; + vi.unstubAllEnvs(); + vi.restoreAllMocks(); + }); + + it('should be interactive if isTTY and no prompt', async () => { + process.stdin.isTTY = true; + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.isInteractive()).toBe(true); + }); + + it('should be interactive if prompt-interactive is set', async () => { + process.stdin.isTTY = false; + process.argv = ['node', 'script.js', '--prompt-interactive', 'test']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.isInteractive()).toBe(true); + }); + + it('should not be interactive if not isTTY and no prompt', async () => { + process.stdin.isTTY = false; + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.isInteractive()).toBe(false); + }); + + it('should not be interactive if prompt is set', async () => { + process.stdin.isTTY = true; + process.argv = ['node', 'script.js', '--prompt', 'test']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.isInteractive()).toBe(false); + }); +}); + +describe('loadCliConfig approval mode', () => { + const originalArgv = process.argv; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + vi.stubEnv('GEMINI_API_KEY', 'test-api-key'); + process.argv = ['node', 'script.js']; // Reset argv for each test + vi.mocked(isWorkspaceTrusted).mockReturnValue(true); + }); + + afterEach(() => { + process.argv = originalArgv; + vi.unstubAllEnvs(); + vi.restoreAllMocks(); + }); + + it('should default to DEFAULT approval mode when no flags are set', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.DEFAULT); + }); + + it('should set YOLO approval mode when --yolo flag is used', async () => { + process.argv = ['node', 'script.js', '--yolo']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.YOLO); + }); + + it('should set YOLO approval mode when -y flag is used', async () => { + process.argv = ['node', 'script.js', '-y']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.YOLO); + }); + + it('should set DEFAULT approval mode when --approval-mode=default', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'default']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.DEFAULT); + }); + + it('should set AUTO_EDIT approval mode when --approval-mode=auto_edit', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'auto_edit']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.AUTO_EDIT); + }); + + it('should set YOLO approval mode when --approval-mode=yolo', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'yolo']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.YOLO); + }); + + it('should prioritize --approval-mode over --yolo when both would be valid (but validation prevents this)', async () => { + // Note: This test documents the intended behavior, but in practice the validation + // prevents both flags from being used together + process.argv = ['node', 'script.js', '--approval-mode', 'default']; + const argv = await parseArguments({} as Settings); + // Manually set yolo to true to simulate what would happen if validation didn't prevent it + argv.yolo = true; + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.DEFAULT); + }); + + it('should fall back to --yolo behavior when --approval-mode is not set', async () => { + process.argv = ['node', 'script.js', '--yolo']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.YOLO); + }); + + // --- Untrusted Folder Scenarios --- + describe('when folder is NOT trusted', () => { + beforeEach(() => { + vi.mocked(isWorkspaceTrusted).mockReturnValue(false); + }); + + it('should override --approval-mode=yolo to DEFAULT', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'yolo']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.DEFAULT); + }); + + it('should override --approval-mode=auto_edit to DEFAULT', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'auto_edit']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.DEFAULT); + }); + + it('should override --yolo flag to DEFAULT', async () => { + process.argv = ['node', 'script.js', '--yolo']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.DEFAULT); + }); + + it('should remain DEFAULT when --approval-mode=default', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'default']; + const argv = await parseArguments({} as Settings); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.DEFAULT); + }); + }); +}); + +describe('loadCliConfig trustedFolder', () => { + const originalArgv = process.argv; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + vi.stubEnv('GEMINI_API_KEY', 'test-api-key'); + process.argv = ['node', 'script.js']; // Reset argv for each test + }); + + afterEach(() => { + process.argv = originalArgv; + vi.unstubAllEnvs(); + vi.restoreAllMocks(); + }); + + const testCases = [ + // Cases where folderTrustFeature is false (feature disabled) + { + folderTrustFeature: false, + folderTrust: true, + isWorkspaceTrusted: true, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature disabled, folderTrust true, workspace trusted -> behave as trusted', + }, + { + folderTrustFeature: false, + folderTrust: true, + isWorkspaceTrusted: false, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature disabled, folderTrust true, workspace not trusted -> behave as trusted', + }, + { + folderTrustFeature: false, + folderTrust: false, + isWorkspaceTrusted: true, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature disabled, folderTrust false, workspace trusted -> behave as trusted', + }, + { + folderTrustFeature: false, + folderTrust: false, + isWorkspaceTrusted: false, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature disabled, folderTrust false, workspace not trusted -> behave as trusted', + }, + // Cases where folderTrustFeature is true (feature enabled) + { + folderTrustFeature: true, + folderTrust: true, + isWorkspaceTrusted: true, + expectedFolderTrust: true, + expectedIsTrustedFolder: true, + description: + 'feature enabled, folderTrust true, workspace trusted -> behave as trusted', + }, + { + folderTrustFeature: true, + folderTrust: true, + isWorkspaceTrusted: false, + expectedFolderTrust: true, + expectedIsTrustedFolder: false, + description: + 'feature enabled, folderTrust true, workspace not trusted -> behave as not trusted', + }, + { + folderTrustFeature: true, + folderTrust: true, + isWorkspaceTrusted: undefined, + expectedFolderTrust: true, + expectedIsTrustedFolder: true, + description: + 'feature enabled, folderTrust false, workspace trust unknown -> behave as trusted', + }, + { + folderTrustFeature: true, + folderTrust: false, + isWorkspaceTrusted: true, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature enabled, folderTrust false, workspace trusted -> behave as trusted', + }, + { + folderTrustFeature: true, + folderTrust: false, + isWorkspaceTrusted: false, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature enabled, folderTrust false, workspace not trusted -> behave as trusted', + }, + ]; + + for (const { + folderTrustFeature, + folderTrust, + isWorkspaceTrusted: isWorkspaceTrustedValue, + expectedFolderTrust, + expectedIsTrustedFolder, + description, + } of testCases) { + it(`should correctly set folderTrust and isTrustedFolder when ${description}`, async () => { + (isWorkspaceTrusted as Mock).mockImplementation((settings: Settings) => { + const folderTrustFeature = + settings.security?.folderTrust?.featureEnabled ?? false; + const folderTrustSetting = + settings.security?.folderTrust?.enabled ?? true; + const folderTrustEnabled = folderTrustFeature && folderTrustSetting; + + if (!folderTrustEnabled) { + return true; + } + return isWorkspaceTrustedValue; // This is the part that comes from the test case + }); + const argv = await parseArguments({} as Settings); + const settings: Settings = { + security: { + folderTrust: { + featureEnabled: folderTrustFeature, + enabled: folderTrust, + }, + }, + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + + expect(config.getFolderTrust()).toBe(expectedFolderTrust); + expect(config.isTrustedFolder()).toBe(expectedIsTrustedFolder); + }); + } +}); diff --git a/projects/gemini-cli/packages/cli/src/config/config.ts b/projects/gemini-cli/packages/cli/src/config/config.ts new file mode 100644 index 0000000000000000000000000000000000000000..0673fecb6ddfe24fdd8f2761d67b6efb2527c891 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/config.ts @@ -0,0 +1,696 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import { homedir } from 'node:os'; +import yargs from 'yargs/yargs'; +import { hideBin } from 'yargs/helpers'; +import process from 'node:process'; +import { mcpCommand } from '../commands/mcp.js'; +import type { + TelemetryTarget, + FileFilteringOptions, + MCPServerConfig, +} from '@google/gemini-cli-core'; +import { extensionsCommand } from '../commands/extensions.js'; +import { + Config, + loadServerHierarchicalMemory, + setGeminiMdFilename as setServerGeminiMdFilename, + getCurrentGeminiMdFilename, + ApprovalMode, + DEFAULT_GEMINI_MODEL, + DEFAULT_GEMINI_EMBEDDING_MODEL, + DEFAULT_MEMORY_FILE_FILTERING_OPTIONS, + FileDiscoveryService, + ShellTool, + EditTool, + WriteFileTool, +} from '@google/gemini-cli-core'; +import type { Settings } from './settings.js'; + +import type { Extension } from './extension.js'; +import { annotateActiveExtensions } from './extension.js'; +import { getCliVersion } from '../utils/version.js'; +import { loadSandboxConfig } from './sandboxConfig.js'; +import { resolvePath } from '../utils/resolvePath.js'; +import { appEvents } from '../utils/events.js'; + +import { isWorkspaceTrusted } from './trustedFolders.js'; + +// Simple console logger for now - replace with actual logger if available +const logger = { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + debug: (...args: any[]) => console.debug('[DEBUG]', ...args), + // eslint-disable-next-line @typescript-eslint/no-explicit-any + warn: (...args: any[]) => console.warn('[WARN]', ...args), + // eslint-disable-next-line @typescript-eslint/no-explicit-any + error: (...args: any[]) => console.error('[ERROR]', ...args), +}; + +export interface CliArgs { + model: string | undefined; + sandbox: boolean | string | undefined; + sandboxImage: string | undefined; + debug: boolean | undefined; + prompt: string | undefined; + promptInteractive: string | undefined; + allFiles: boolean | undefined; + showMemoryUsage: boolean | undefined; + yolo: boolean | undefined; + approvalMode: string | undefined; + telemetry: boolean | undefined; + checkpointing: boolean | undefined; + telemetryTarget: string | undefined; + telemetryOtlpEndpoint: string | undefined; + telemetryOtlpProtocol: string | undefined; + telemetryLogPrompts: boolean | undefined; + telemetryOutfile: string | undefined; + allowedMcpServerNames: string[] | undefined; + allowedTools: string[] | undefined; + experimentalAcp: boolean | undefined; + extensions: string[] | undefined; + listExtensions: boolean | undefined; + proxy: string | undefined; + includeDirectories: string[] | undefined; + screenReader: boolean | undefined; + useSmartEdit: boolean | undefined; + sessionSummary: string | undefined; +} + +export async function parseArguments(settings: Settings): Promise { + const yargsInstance = yargs(hideBin(process.argv)) + .locale('en') + .scriptName('gemini') + .usage( + 'Usage: gemini [options] [command]\n\nGemini CLI - Launch an interactive CLI, use -p/--prompt for non-interactive mode', + ) + .command('$0', 'Launch Gemini CLI', (yargsInstance) => + yargsInstance + .option('model', { + alias: 'm', + type: 'string', + description: `Model`, + default: process.env['GEMINI_MODEL'], + }) + .option('prompt', { + alias: 'p', + type: 'string', + description: 'Prompt. Appended to input on stdin (if any).', + }) + .option('prompt-interactive', { + alias: 'i', + type: 'string', + description: + 'Execute the provided prompt and continue in interactive mode', + }) + .option('sandbox', { + alias: 's', + type: 'boolean', + description: 'Run in sandbox?', + }) + .option('sandbox-image', { + type: 'string', + description: 'Sandbox image URI.', + }) + .option('debug', { + alias: 'd', + type: 'boolean', + description: 'Run in debug mode?', + default: false, + }) + .option('all-files', { + alias: ['a'], + type: 'boolean', + description: 'Include ALL files in context?', + default: false, + }) + .option('show-memory-usage', { + type: 'boolean', + description: 'Show memory usage in status bar', + default: false, + }) + .option('yolo', { + alias: 'y', + type: 'boolean', + description: + 'Automatically accept all actions (aka YOLO mode, see https://www.youtube.com/watch?v=xvFZjo5PgG0 for more details)?', + default: false, + }) + .option('approval-mode', { + type: 'string', + choices: ['default', 'auto_edit', 'yolo'], + description: + 'Set the approval mode: default (prompt for approval), auto_edit (auto-approve edit tools), yolo (auto-approve all tools)', + }) + .option('telemetry', { + type: 'boolean', + description: + 'Enable telemetry? This flag specifically controls if telemetry is sent. Other --telemetry-* flags set specific values but do not enable telemetry on their own.', + }) + .option('telemetry-target', { + type: 'string', + choices: ['local', 'gcp'], + description: + 'Set the telemetry target (local or gcp). Overrides settings files.', + }) + .option('telemetry-otlp-endpoint', { + type: 'string', + description: + 'Set the OTLP endpoint for telemetry. Overrides environment variables and settings files.', + }) + .option('telemetry-otlp-protocol', { + type: 'string', + choices: ['grpc', 'http'], + description: + 'Set the OTLP protocol for telemetry (grpc or http). Overrides settings files.', + }) + .option('telemetry-log-prompts', { + type: 'boolean', + description: + 'Enable or disable logging of user prompts for telemetry. Overrides settings files.', + }) + .option('telemetry-outfile', { + type: 'string', + description: 'Redirect all telemetry output to the specified file.', + }) + .option('checkpointing', { + alias: 'c', + type: 'boolean', + description: 'Enables checkpointing of file edits', + default: false, + }) + .option('experimental-acp', { + type: 'boolean', + description: 'Starts the agent in ACP mode', + }) + .option('allowed-mcp-server-names', { + type: 'array', + string: true, + description: 'Allowed MCP server names', + }) + .option('allowed-tools', { + type: 'array', + string: true, + description: 'Tools that are allowed to run without confirmation', + }) + .option('extensions', { + alias: 'e', + type: 'array', + string: true, + description: + 'A list of extensions to use. If not provided, all extensions are used.', + }) + .option('list-extensions', { + alias: 'l', + type: 'boolean', + description: 'List all available extensions and exit.', + }) + .option('proxy', { + type: 'string', + description: + 'Proxy for gemini client, like schema://user:password@host:port', + }) + .option('include-directories', { + type: 'array', + string: true, + description: + 'Additional directories to include in the workspace (comma-separated or multiple --include-directories)', + coerce: (dirs: string[]) => + // Handle comma-separated values + dirs.flatMap((dir) => dir.split(',').map((d) => d.trim())), + }) + .option('screen-reader', { + type: 'boolean', + description: 'Enable screen reader mode for accessibility.', + default: false, + }) + .option('session-summary', { + type: 'string', + description: 'File to write session summary to.', + }) + .deprecateOption( + 'telemetry', + 'Use settings.json instead. This flag will be removed in a future version.', + ) + .deprecateOption( + 'telemetry-target', + 'Use settings.json instead. This flag will be removed in a future version.', + ) + .deprecateOption( + 'telemetry-otlp-endpoint', + 'Use settings.json instead. This flag will be removed in a future version.', + ) + .deprecateOption( + 'telemetry-otlp-protocol', + 'Use settings.json instead. This flag will be removed in a future version.', + ) + .deprecateOption( + 'telemetry-log-prompts', + 'Use settings.json instead. This flag will be removed in a future version.', + ) + .deprecateOption( + 'telemetry-outfile', + 'Use settings.json instead. This flag will be removed in a future version.', + ) + .deprecateOption( + 'show-memory-usage', + 'Use settings.json instead. This flag will be removed in a future version.', + ) + .deprecateOption( + 'sandbox-image', + 'Use settings.json instead. This flag will be removed in a future version.', + ) + .deprecateOption( + 'proxy', + 'Use settings.json instead. This flag will be removed in a future version.', + ) + .deprecateOption( + 'checkpointing', + 'Use settings.json instead. This flag will be removed in a future version.', + ) + .deprecateOption( + 'all-files', + 'Use @ includes in the application instead. This flag will be removed in a future version.', + ) + .check((argv) => { + if (argv.prompt && argv['promptInteractive']) { + throw new Error( + 'Cannot use both --prompt (-p) and --prompt-interactive (-i) together', + ); + } + if (argv.yolo && argv['approvalMode']) { + throw new Error( + 'Cannot use both --yolo (-y) and --approval-mode together. Use --approval-mode=yolo instead.', + ); + } + return true; + }), + ) + // Register MCP subcommands + .command(mcpCommand); + + if (settings?.experimental?.extensionManagement ?? false) { + yargsInstance.command(extensionsCommand); + } + + yargsInstance + .version(await getCliVersion()) // This will enable the --version flag based on package.json + .alias('v', 'version') + .help() + .alias('h', 'help') + .strict() + .demandCommand(0, 0); // Allow base command to run with no subcommands + + yargsInstance.wrap(yargsInstance.terminalWidth()); + const result = await yargsInstance.parse(); + + // Handle case where MCP subcommands are executed - they should exit the process + // and not return to main CLI logic + if ( + result._.length > 0 && + (result._[0] === 'mcp' || result._[0] === 'extensions') + ) { + // MCP commands handle their own execution and process exit + process.exit(0); + } + + // The import format is now only controlled by settings.memoryImportFormat + // We no longer accept it as a CLI argument + return result as unknown as CliArgs; +} + +// This function is now a thin wrapper around the server's implementation. +// It's kept in the CLI for now as App.tsx directly calls it for memory refresh. +// TODO: Consider if App.tsx should get memory via a server call or if Config should refresh itself. +export async function loadHierarchicalGeminiMemory( + currentWorkingDirectory: string, + includeDirectoriesToReadGemini: readonly string[] = [], + debugMode: boolean, + fileService: FileDiscoveryService, + settings: Settings, + extensionContextFilePaths: string[] = [], + folderTrust: boolean, + memoryImportFormat: 'flat' | 'tree' = 'tree', + fileFilteringOptions?: FileFilteringOptions, +): Promise<{ memoryContent: string; fileCount: number }> { + // FIX: Use real, canonical paths for a reliable comparison to handle symlinks. + const realCwd = fs.realpathSync(path.resolve(currentWorkingDirectory)); + const realHome = fs.realpathSync(path.resolve(homedir())); + const isHomeDirectory = realCwd === realHome; + + // If it is the home directory, pass an empty string to the core memory + // function to signal that it should skip the workspace search. + const effectiveCwd = isHomeDirectory ? '' : currentWorkingDirectory; + + if (debugMode) { + logger.debug( + `CLI: Delegating hierarchical memory load to server for CWD: ${currentWorkingDirectory} (memoryImportFormat: ${memoryImportFormat})`, + ); + } + + // Directly call the server function with the corrected path. + return loadServerHierarchicalMemory( + effectiveCwd, + includeDirectoriesToReadGemini, + debugMode, + fileService, + extensionContextFilePaths, + folderTrust, + memoryImportFormat, + fileFilteringOptions, + settings.context?.discoveryMaxDirs, + ); +} + +export async function loadCliConfig( + settings: Settings, + extensions: Extension[], + sessionId: string, + argv: CliArgs, + cwd: string = process.cwd(), +): Promise { + const debugMode = + argv.debug || + [process.env['DEBUG'], process.env['DEBUG_MODE']].some( + (v) => v === 'true' || v === '1', + ) || + false; + const memoryImportFormat = settings.context?.importFormat || 'tree'; + + const ideMode = settings.ide?.enabled ?? false; + + const folderTrustFeature = + settings.security?.folderTrust?.featureEnabled ?? false; + const folderTrustSetting = settings.security?.folderTrust?.enabled ?? true; + const folderTrust = folderTrustFeature && folderTrustSetting; + const trustedFolder = isWorkspaceTrusted(settings) ?? true; + + const allExtensions = annotateActiveExtensions( + extensions, + argv.extensions || [], + cwd, + ); + + const activeExtensions = extensions.filter( + (_, i) => allExtensions[i].isActive, + ); + + // Set the context filename in the server's memoryTool module BEFORE loading memory + // TODO(b/343434939): This is a bit of a hack. The contextFileName should ideally be passed + // directly to the Config constructor in core, and have core handle setGeminiMdFilename. + // However, loadHierarchicalGeminiMemory is called *before* createServerConfig. + if (settings.context?.fileName) { + setServerGeminiMdFilename(settings.context.fileName); + } else { + // Reset to default if not provided in settings. + setServerGeminiMdFilename(getCurrentGeminiMdFilename()); + } + + const extensionContextFilePaths = activeExtensions.flatMap( + (e) => e.contextFiles, + ); + + const fileService = new FileDiscoveryService(cwd); + + const fileFiltering = { + ...DEFAULT_MEMORY_FILE_FILTERING_OPTIONS, + ...settings.context?.fileFiltering, + }; + + const includeDirectories = (settings.context?.includeDirectories || []) + .map(resolvePath) + .concat((argv.includeDirectories || []).map(resolvePath)); + + // Call the (now wrapper) loadHierarchicalGeminiMemory which calls the server's version + const { memoryContent, fileCount } = await loadHierarchicalGeminiMemory( + cwd, + settings.context?.loadMemoryFromIncludeDirectories + ? includeDirectories + : [], + debugMode, + fileService, + settings, + extensionContextFilePaths, + trustedFolder, + memoryImportFormat, + fileFiltering, + ); + + let mcpServers = mergeMcpServers(settings, activeExtensions); + const question = argv.promptInteractive || argv.prompt || ''; + + // Determine approval mode with backward compatibility + let approvalMode: ApprovalMode; + if (argv.approvalMode) { + // New --approval-mode flag takes precedence + switch (argv.approvalMode) { + case 'yolo': + approvalMode = ApprovalMode.YOLO; + break; + case 'auto_edit': + approvalMode = ApprovalMode.AUTO_EDIT; + break; + case 'default': + approvalMode = ApprovalMode.DEFAULT; + break; + default: + throw new Error( + `Invalid approval mode: ${argv.approvalMode}. Valid values are: yolo, auto_edit, default`, + ); + } + } else { + // Fallback to legacy --yolo flag behavior + approvalMode = + argv.yolo || false ? ApprovalMode.YOLO : ApprovalMode.DEFAULT; + } + + // Force approval mode to default if the folder is not trusted. + if (!trustedFolder && approvalMode !== ApprovalMode.DEFAULT) { + logger.warn( + `Approval mode overridden to "default" because the current folder is not trusted.`, + ); + approvalMode = ApprovalMode.DEFAULT; + } + + const interactive = + !!argv.promptInteractive || (process.stdin.isTTY && question.length === 0); + // In non-interactive mode, exclude tools that require a prompt. + const extraExcludes: string[] = []; + if (!interactive && !argv.experimentalAcp) { + switch (approvalMode) { + case ApprovalMode.DEFAULT: + // In default non-interactive mode, all tools that require approval are excluded. + extraExcludes.push(ShellTool.Name, EditTool.Name, WriteFileTool.Name); + break; + case ApprovalMode.AUTO_EDIT: + // In auto-edit non-interactive mode, only tools that still require a prompt are excluded. + extraExcludes.push(ShellTool.Name); + break; + case ApprovalMode.YOLO: + // No extra excludes for YOLO mode. + break; + default: + // This should never happen due to validation earlier, but satisfies the linter + break; + } + } + + const excludeTools = mergeExcludeTools( + settings, + activeExtensions, + extraExcludes.length > 0 ? extraExcludes : undefined, + ); + const blockedMcpServers: Array<{ name: string; extensionName: string }> = []; + + if (!argv.allowedMcpServerNames) { + if (settings.mcp?.allowed) { + mcpServers = allowedMcpServers( + mcpServers, + settings.mcp.allowed, + blockedMcpServers, + ); + } + + if (settings.mcp?.excluded) { + const excludedNames = new Set(settings.mcp.excluded.filter(Boolean)); + if (excludedNames.size > 0) { + mcpServers = Object.fromEntries( + Object.entries(mcpServers).filter(([key]) => !excludedNames.has(key)), + ); + } + } + } + + if (argv.allowedMcpServerNames) { + mcpServers = allowedMcpServers( + mcpServers, + argv.allowedMcpServerNames, + blockedMcpServers, + ); + } + + const sandboxConfig = await loadSandboxConfig(settings, argv); + const screenReader = + argv.screenReader !== undefined + ? argv.screenReader + : (settings.ui?.accessibility?.screenReader ?? false); + return new Config({ + sessionId, + embeddingModel: DEFAULT_GEMINI_EMBEDDING_MODEL, + sandbox: sandboxConfig, + targetDir: cwd, + includeDirectories, + loadMemoryFromIncludeDirectories: + settings.context?.loadMemoryFromIncludeDirectories || false, + debugMode, + question, + fullContext: argv.allFiles || false, + coreTools: settings.tools?.core || undefined, + allowedTools: argv.allowedTools || settings.tools?.allowed || undefined, + excludeTools, + toolDiscoveryCommand: settings.tools?.discoveryCommand, + toolCallCommand: settings.tools?.callCommand, + mcpServerCommand: settings.mcp?.serverCommand, + mcpServers, + userMemory: memoryContent, + geminiMdFileCount: fileCount, + approvalMode, + showMemoryUsage: + argv.showMemoryUsage || settings.ui?.showMemoryUsage || false, + accessibility: { + ...settings.ui?.accessibility, + screenReader, + }, + telemetry: { + enabled: argv.telemetry ?? settings.telemetry?.enabled, + target: (argv.telemetryTarget ?? + settings.telemetry?.target) as TelemetryTarget, + otlpEndpoint: + argv.telemetryOtlpEndpoint ?? + process.env['OTEL_EXPORTER_OTLP_ENDPOINT'] ?? + settings.telemetry?.otlpEndpoint, + otlpProtocol: (['grpc', 'http'] as const).find( + (p) => + p === + (argv.telemetryOtlpProtocol ?? settings.telemetry?.otlpProtocol), + ), + logPrompts: argv.telemetryLogPrompts ?? settings.telemetry?.logPrompts, + outfile: argv.telemetryOutfile ?? settings.telemetry?.outfile, + }, + usageStatisticsEnabled: settings.privacy?.usageStatisticsEnabled ?? true, + // Git-aware file filtering settings + fileFiltering: { + respectGitIgnore: settings.context?.fileFiltering?.respectGitIgnore, + respectGeminiIgnore: settings.context?.fileFiltering?.respectGeminiIgnore, + enableRecursiveFileSearch: + settings.context?.fileFiltering?.enableRecursiveFileSearch, + disableFuzzySearch: settings.context?.fileFiltering?.disableFuzzySearch, + }, + checkpointing: + argv.checkpointing || settings.general?.checkpointing?.enabled, + proxy: + argv.proxy || + process.env['HTTPS_PROXY'] || + process.env['https_proxy'] || + process.env['HTTP_PROXY'] || + process.env['http_proxy'], + cwd, + fileDiscoveryService: fileService, + bugCommand: settings.advanced?.bugCommand, + model: argv.model || settings.model?.name || DEFAULT_GEMINI_MODEL, + extensionContextFilePaths, + maxSessionTurns: settings.model?.maxSessionTurns ?? -1, + experimentalZedIntegration: argv.experimentalAcp || false, + listExtensions: argv.listExtensions || false, + extensions: allExtensions, + blockedMcpServers, + noBrowser: !!process.env['NO_BROWSER'], + summarizeToolOutput: settings.model?.summarizeToolOutput, + ideMode, + chatCompression: settings.model?.chatCompression, + folderTrustFeature, + folderTrust, + interactive, + trustedFolder, + useRipgrep: settings.tools?.useRipgrep, + shouldUseNodePtyShell: settings.tools?.usePty, + skipNextSpeakerCheck: settings.model?.skipNextSpeakerCheck, + enablePromptCompletion: settings.general?.enablePromptCompletion ?? false, + eventEmitter: appEvents, + useSmartEdit: argv.useSmartEdit ?? settings.useSmartEdit, + }); +} + +function allowedMcpServers( + mcpServers: { [x: string]: MCPServerConfig }, + allowMCPServers: string[], + blockedMcpServers: Array<{ name: string; extensionName: string }>, +) { + const allowedNames = new Set(allowMCPServers.filter(Boolean)); + if (allowedNames.size > 0) { + mcpServers = Object.fromEntries( + Object.entries(mcpServers).filter(([key, server]) => { + const isAllowed = allowedNames.has(key); + if (!isAllowed) { + blockedMcpServers.push({ + name: key, + extensionName: server.extensionName || '', + }); + } + return isAllowed; + }), + ); + } else { + blockedMcpServers.push( + ...Object.entries(mcpServers).map(([key, server]) => ({ + name: key, + extensionName: server.extensionName || '', + })), + ); + mcpServers = {}; + } + return mcpServers; +} + +function mergeMcpServers(settings: Settings, extensions: Extension[]) { + const mcpServers = { ...(settings.mcpServers || {}) }; + for (const extension of extensions) { + Object.entries(extension.config.mcpServers || {}).forEach( + ([key, server]) => { + if (mcpServers[key]) { + logger.warn( + `Skipping extension MCP config for server with key "${key}" as it already exists.`, + ); + return; + } + mcpServers[key] = { + ...server, + extensionName: extension.config.name, + }; + }, + ); + } + return mcpServers; +} + +function mergeExcludeTools( + settings: Settings, + extensions: Extension[], + extraExcludes?: string[] | undefined, +): string[] { + const allExcludeTools = new Set([ + ...(settings.tools?.exclude || []), + ...(extraExcludes || []), + ]); + for (const extension of extensions) { + for (const tool of extension.config.excludeTools || []) { + allExcludeTools.add(tool); + } + } + return [...allExcludeTools]; +} diff --git a/projects/gemini-cli/packages/cli/src/config/extension.test.ts b/projects/gemini-cli/packages/cli/src/config/extension.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..6db44665e26910167d4fe2af3096afdcd2d97532 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/extension.test.ts @@ -0,0 +1,898 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi } from 'vitest'; +import * as fs from 'node:fs'; +import * as os from 'node:os'; +import * as path from 'node:path'; +import { + EXTENSIONS_CONFIG_FILENAME, + INSTALL_METADATA_FILENAME, + annotateActiveExtensions, + disableExtension, + enableExtension, + installExtension, + loadExtension, + loadExtensions, + performWorkspaceExtensionMigration, + uninstallExtension, + updateExtension, + type Extension, +} from './extension.js'; +import { + GEMINI_DIR, + type GeminiCLIExtension, + type MCPServerConfig, +} from '@google/gemini-cli-core'; +import { execSync } from 'node:child_process'; +import { SettingScope, loadSettings } from './settings.js'; +import { type SimpleGit, simpleGit } from 'simple-git'; +import { isWorkspaceTrusted } from './trustedFolders.js'; + +vi.mock('simple-git', () => ({ + simpleGit: vi.fn(), +})); + +vi.mock('os', async (importOriginal) => { + const os = await importOriginal(); + return { + ...os, + homedir: vi.fn(), + }; +}); + +vi.mock('./trustedFolders.js', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + isWorkspaceTrusted: vi.fn(), + }; +}); + +vi.mock('child_process', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + execSync: vi.fn(), + }; +}); + +const EXTENSIONS_DIRECTORY_NAME = path.join(GEMINI_DIR, 'extensions'); + +describe('loadExtensions', () => { + let tempWorkspaceDir: string; + let tempHomeDir: string; + let workspaceExtensionsDir: string; + + beforeEach(() => { + tempWorkspaceDir = fs.mkdtempSync( + path.join(os.tmpdir(), 'gemini-cli-test-workspace-'), + ); + tempHomeDir = fs.mkdtempSync( + path.join(os.tmpdir(), 'gemini-cli-test-home-'), + ); + vi.mocked(os.homedir).mockReturnValue(tempHomeDir); + vi.mocked(isWorkspaceTrusted).mockReturnValue(true); + + workspaceExtensionsDir = path.join( + tempWorkspaceDir, + EXTENSIONS_DIRECTORY_NAME, + ); + fs.mkdirSync(workspaceExtensionsDir, { recursive: true }); + }); + + afterEach(() => { + fs.rmSync(tempWorkspaceDir, { recursive: true, force: true }); + fs.rmSync(tempHomeDir, { recursive: true, force: true }); + vi.restoreAllMocks(); + }); + + it('ignores extensions in untrusted workspaces', () => { + vi.mocked(isWorkspaceTrusted).mockReturnValue(false); + + const extensionDir = path.join(workspaceExtensionsDir, 'test-extension'); + fs.mkdirSync(extensionDir, { recursive: true }); + createExtension({ + extensionsDir: workspaceExtensionsDir, + name: 'ext1', + version: '1.0.0', + addContextFile: true, + }); + const extensions = loadExtensions(tempWorkspaceDir); + expect(extensions.length).toBe(0); + }); + + it('should include extension path in loaded extension', () => { + const extensionDir = path.join(workspaceExtensionsDir, 'test-extension'); + fs.mkdirSync(extensionDir, { recursive: true }); + + createExtension({ + extensionsDir: workspaceExtensionsDir, + name: 'test-extension', + version: '1.0.0', + }); + + const extensions = loadExtensions(tempWorkspaceDir); + expect(extensions).toHaveLength(1); + expect(extensions[0].path).toBe(extensionDir); + expect(extensions[0].config.name).toBe('test-extension'); + }); + + it('should load context file path when GEMINI.md is present', () => { + createExtension({ + extensionsDir: workspaceExtensionsDir, + name: 'ext1', + version: '1.0.0', + addContextFile: true, + }); + createExtension({ + extensionsDir: workspaceExtensionsDir, + name: 'ext2', + version: '2.0.0', + }); + + const extensions = loadExtensions(tempWorkspaceDir); + + expect(extensions).toHaveLength(2); + const ext1 = extensions.find((e) => e.config.name === 'ext1'); + const ext2 = extensions.find((e) => e.config.name === 'ext2'); + expect(ext1?.contextFiles).toEqual([ + path.join(workspaceExtensionsDir, 'ext1', 'GEMINI.md'), + ]); + expect(ext2?.contextFiles).toEqual([]); + }); + + it('should load context file path from the extension config', () => { + createExtension({ + extensionsDir: workspaceExtensionsDir, + name: 'ext1', + version: '1.0.0', + addContextFile: false, + contextFileName: 'my-context-file.md', + }); + + const extensions = loadExtensions(tempWorkspaceDir); + + expect(extensions).toHaveLength(1); + const ext1 = extensions.find((e) => e.config.name === 'ext1'); + expect(ext1?.contextFiles).toEqual([ + path.join(workspaceExtensionsDir, 'ext1', 'my-context-file.md'), + ]); + }); + + it('should filter out disabled extensions', () => { + createExtension({ + extensionsDir: workspaceExtensionsDir, + name: 'ext1', + version: '1.0.0', + }); + createExtension({ + extensionsDir: workspaceExtensionsDir, + name: 'ext2', + version: '2.0.0', + }); + + const settingsDir = path.join(tempWorkspaceDir, GEMINI_DIR); + fs.mkdirSync(settingsDir, { recursive: true }); + fs.writeFileSync( + path.join(settingsDir, 'settings.json'), + JSON.stringify({ extensions: { disabled: ['ext1'] } }), + ); + + const extensions = loadExtensions(tempWorkspaceDir); + const activeExtensions = annotateActiveExtensions( + extensions, + [], + tempWorkspaceDir, + ).filter((e) => e.isActive); + expect(activeExtensions).toHaveLength(1); + expect(activeExtensions[0].name).toBe('ext2'); + }); + + it('should hydrate variables', () => { + createExtension({ + extensionsDir: workspaceExtensionsDir, + name: 'test-extension', + version: '1.0.0', + addContextFile: false, + contextFileName: undefined, + mcpServers: { + 'test-server': { + cwd: '${extensionPath}${/}server', + }, + }, + }); + + const extensions = loadExtensions(tempWorkspaceDir); + expect(extensions).toHaveLength(1); + const loadedConfig = extensions[0].config; + const expectedCwd = path.join( + workspaceExtensionsDir, + 'test-extension', + 'server', + ); + expect(loadedConfig.mcpServers?.['test-server'].cwd).toBe(expectedCwd); + }); + + it('should resolve environment variables in extension configuration', () => { + process.env.TEST_API_KEY = 'test-api-key-123'; + process.env.TEST_DB_URL = 'postgresql://localhost:5432/testdb'; + + try { + const workspaceExtensionsDir = path.join( + tempWorkspaceDir, + EXTENSIONS_DIRECTORY_NAME, + ); + fs.mkdirSync(workspaceExtensionsDir, { recursive: true }); + + const extDir = path.join(workspaceExtensionsDir, 'test-extension'); + fs.mkdirSync(extDir); + + // Write config to a separate file for clarity and good practices + const configPath = path.join(extDir, EXTENSIONS_CONFIG_FILENAME); + const extensionConfig = { + name: 'test-extension', + version: '1.0.0', + mcpServers: { + 'test-server': { + command: 'node', + args: ['server.js'], + env: { + API_KEY: '$TEST_API_KEY', + DATABASE_URL: '${TEST_DB_URL}', + STATIC_VALUE: 'no-substitution', + }, + }, + }, + }; + fs.writeFileSync(configPath, JSON.stringify(extensionConfig)); + + const extensions = loadExtensions(tempWorkspaceDir); + + expect(extensions).toHaveLength(1); + const extension = extensions[0]; + expect(extension.config.name).toBe('test-extension'); + expect(extension.config.mcpServers).toBeDefined(); + + const serverConfig = extension.config.mcpServers?.['test-server']; + expect(serverConfig).toBeDefined(); + expect(serverConfig?.env).toBeDefined(); + expect(serverConfig?.env?.API_KEY).toBe('test-api-key-123'); + expect(serverConfig?.env?.DATABASE_URL).toBe( + 'postgresql://localhost:5432/testdb', + ); + expect(serverConfig?.env?.STATIC_VALUE).toBe('no-substitution'); + } finally { + delete process.env.TEST_API_KEY; + delete process.env.TEST_DB_URL; + } + }); + + it('should handle missing environment variables gracefully', () => { + const workspaceExtensionsDir = path.join( + tempWorkspaceDir, + EXTENSIONS_DIRECTORY_NAME, + ); + fs.mkdirSync(workspaceExtensionsDir, { recursive: true }); + + const extDir = path.join(workspaceExtensionsDir, 'test-extension'); + fs.mkdirSync(extDir); + + const extensionConfig = { + name: 'test-extension', + version: '1.0.0', + mcpServers: { + 'test-server': { + command: 'node', + args: ['server.js'], + env: { + MISSING_VAR: '$UNDEFINED_ENV_VAR', + MISSING_VAR_BRACES: '${ALSO_UNDEFINED}', + }, + }, + }, + }; + + fs.writeFileSync( + path.join(extDir, EXTENSIONS_CONFIG_FILENAME), + JSON.stringify(extensionConfig), + ); + + const extensions = loadExtensions(tempWorkspaceDir); + + expect(extensions).toHaveLength(1); + const extension = extensions[0]; + const serverConfig = extension.config.mcpServers!['test-server']; + expect(serverConfig.env).toBeDefined(); + expect(serverConfig.env!.MISSING_VAR).toBe('$UNDEFINED_ENV_VAR'); + expect(serverConfig.env!.MISSING_VAR_BRACES).toBe('${ALSO_UNDEFINED}'); + }); +}); + +describe('annotateActiveExtensions', () => { + const extensions: Extension[] = [ + { + path: '/path/to/ext1', + config: { name: 'ext1', version: '1.0.0' }, + contextFiles: [], + }, + { + path: '/path/to/ext2', + config: { name: 'ext2', version: '1.0.0' }, + contextFiles: [], + }, + { + path: '/path/to/ext3', + config: { name: 'ext3', version: '1.0.0' }, + contextFiles: [], + }, + ]; + + it('should mark all extensions as active if no enabled extensions are provided', () => { + const activeExtensions = annotateActiveExtensions( + extensions, + [], + '/path/to/workspace', + ); + expect(activeExtensions).toHaveLength(3); + expect(activeExtensions.every((e) => e.isActive)).toBe(true); + }); + + it('should mark only the enabled extensions as active', () => { + const activeExtensions = annotateActiveExtensions( + extensions, + ['ext1', 'ext3'], + '/path/to/workspace', + ); + expect(activeExtensions).toHaveLength(3); + expect(activeExtensions.find((e) => e.name === 'ext1')?.isActive).toBe( + true, + ); + expect(activeExtensions.find((e) => e.name === 'ext2')?.isActive).toBe( + false, + ); + expect(activeExtensions.find((e) => e.name === 'ext3')?.isActive).toBe( + true, + ); + }); + + it('should mark all extensions as inactive when "none" is provided', () => { + const activeExtensions = annotateActiveExtensions( + extensions, + ['none'], + '/path/to/workspace', + ); + expect(activeExtensions).toHaveLength(3); + expect(activeExtensions.every((e) => !e.isActive)).toBe(true); + }); + + it('should handle case-insensitivity', () => { + const activeExtensions = annotateActiveExtensions( + extensions, + ['EXT1'], + '/path/to/workspace', + ); + expect(activeExtensions.find((e) => e.name === 'ext1')?.isActive).toBe( + true, + ); + }); + + it('should log an error for unknown extensions', () => { + const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + annotateActiveExtensions(extensions, ['ext4'], '/path/to/workspace'); + expect(consoleSpy).toHaveBeenCalledWith('Extension not found: ext4'); + consoleSpy.mockRestore(); + }); +}); + +describe('installExtension', () => { + let tempHomeDir: string; + let userExtensionsDir: string; + + beforeEach(() => { + tempHomeDir = fs.mkdtempSync( + path.join(os.tmpdir(), 'gemini-cli-test-home-'), + ); + vi.mocked(os.homedir).mockReturnValue(tempHomeDir); + userExtensionsDir = path.join(tempHomeDir, GEMINI_DIR, 'extensions'); + // Clean up before each test + fs.rmSync(userExtensionsDir, { recursive: true, force: true }); + fs.mkdirSync(userExtensionsDir, { recursive: true }); + vi.mocked(isWorkspaceTrusted).mockReturnValue(true); + + vi.mocked(execSync).mockClear(); + }); + + afterEach(() => { + fs.rmSync(tempHomeDir, { recursive: true, force: true }); + }); + + it('should install an extension from a local path', async () => { + const sourceExtDir = createExtension({ + extensionsDir: tempHomeDir, + name: 'my-local-extension', + version: '1.0.0', + }); + const targetExtDir = path.join(userExtensionsDir, 'my-local-extension'); + const metadataPath = path.join(targetExtDir, INSTALL_METADATA_FILENAME); + + await installExtension({ source: sourceExtDir, type: 'local' }); + + expect(fs.existsSync(targetExtDir)).toBe(true); + expect(fs.existsSync(metadataPath)).toBe(true); + const metadata = JSON.parse(fs.readFileSync(metadataPath, 'utf-8')); + expect(metadata).toEqual({ + source: sourceExtDir, + type: 'local', + }); + fs.rmSync(targetExtDir, { recursive: true, force: true }); + }); + + it('should throw an error if the extension already exists', async () => { + const sourceExtDir = createExtension({ + extensionsDir: tempHomeDir, + name: 'my-local-extension', + version: '1.0.0', + }); + await installExtension({ source: sourceExtDir, type: 'local' }); + await expect( + installExtension({ source: sourceExtDir, type: 'local' }), + ).rejects.toThrow( + 'Extension "my-local-extension" is already installed. Please uninstall it first.', + ); + }); + + it('should throw an error and cleanup if gemini-extension.json is missing', async () => { + const sourceExtDir = path.join(tempHomeDir, 'bad-extension'); + fs.mkdirSync(sourceExtDir, { recursive: true }); + + await expect( + installExtension({ source: sourceExtDir, type: 'local' }), + ).rejects.toThrow( + `Invalid extension at ${sourceExtDir}. Please make sure it has a valid gemini-extension.json file.`, + ); + + const targetExtDir = path.join(userExtensionsDir, 'bad-extension'); + expect(fs.existsSync(targetExtDir)).toBe(false); + }); + + it('should install an extension from a git URL', async () => { + const gitUrl = 'https://github.com/google/gemini-extensions.git'; + const extensionName = 'gemini-extensions'; + const targetExtDir = path.join(userExtensionsDir, extensionName); + const metadataPath = path.join(targetExtDir, INSTALL_METADATA_FILENAME); + + const clone = vi.fn().mockImplementation(async (_, destination) => { + fs.mkdirSync(destination, { recursive: true }); + fs.writeFileSync( + path.join(destination, EXTENSIONS_CONFIG_FILENAME), + JSON.stringify({ name: extensionName, version: '1.0.0' }), + ); + }); + + const mockedSimpleGit = simpleGit as vi.MockedFunction; + mockedSimpleGit.mockReturnValue({ clone } as unknown as SimpleGit); + + await installExtension({ source: gitUrl, type: 'git' }); + + expect(fs.existsSync(targetExtDir)).toBe(true); + expect(fs.existsSync(metadataPath)).toBe(true); + const metadata = JSON.parse(fs.readFileSync(metadataPath, 'utf-8')); + expect(metadata).toEqual({ + source: gitUrl, + type: 'git', + }); + fs.rmSync(targetExtDir, { recursive: true, force: true }); + }); +}); + +describe('uninstallExtension', () => { + let tempHomeDir: string; + let userExtensionsDir: string; + + beforeEach(() => { + tempHomeDir = fs.mkdtempSync( + path.join(os.tmpdir(), 'gemini-cli-test-home-'), + ); + vi.mocked(os.homedir).mockReturnValue(tempHomeDir); + userExtensionsDir = path.join(tempHomeDir, GEMINI_DIR, 'extensions'); + // Clean up before each test + fs.rmSync(userExtensionsDir, { recursive: true, force: true }); + fs.mkdirSync(userExtensionsDir, { recursive: true }); + + vi.mocked(execSync).mockClear(); + }); + + afterEach(() => { + fs.rmSync(tempHomeDir, { recursive: true, force: true }); + }); + + it('should uninstall an extension by name', async () => { + const sourceExtDir = createExtension({ + extensionsDir: userExtensionsDir, + name: 'my-local-extension', + version: '1.0.0', + }); + + await uninstallExtension('my-local-extension'); + + expect(fs.existsSync(sourceExtDir)).toBe(false); + }); + + it('should uninstall an extension by name and retain existing extensions', async () => { + const sourceExtDir = createExtension({ + extensionsDir: userExtensionsDir, + name: 'my-local-extension', + version: '1.0.0', + }); + const otherExtDir = createExtension({ + extensionsDir: userExtensionsDir, + name: 'other-extension', + version: '1.0.0', + }); + + await uninstallExtension('my-local-extension'); + + expect(fs.existsSync(sourceExtDir)).toBe(false); + expect(loadExtensions(tempHomeDir)).toHaveLength(1); + expect(fs.existsSync(otherExtDir)).toBe(true); + }); + + it('should throw an error if the extension does not exist', async () => { + await expect(uninstallExtension('nonexistent-extension')).rejects.toThrow( + 'Extension "nonexistent-extension" not found.', + ); + }); +}); + +describe('performWorkspaceExtensionMigration', () => { + let tempWorkspaceDir: string; + let tempHomeDir: string; + let workspaceExtensionsDir: string; + + beforeEach(() => { + tempWorkspaceDir = fs.mkdtempSync( + path.join(os.tmpdir(), 'gemini-cli-test-workspace-'), + ); + tempHomeDir = fs.mkdtempSync( + path.join(os.tmpdir(), 'gemini-cli-test-home-'), + ); + vi.mocked(os.homedir).mockReturnValue(tempHomeDir); + vi.mocked(isWorkspaceTrusted).mockReturnValue(true); + + workspaceExtensionsDir = path.join( + tempWorkspaceDir, + EXTENSIONS_DIRECTORY_NAME, + ); + fs.mkdirSync(workspaceExtensionsDir, { recursive: true }); + }); + + afterEach(() => { + fs.rmSync(tempWorkspaceDir, { recursive: true, force: true }); + fs.rmSync(tempHomeDir, { recursive: true, force: true }); + vi.restoreAllMocks(); + }); + + describe('folder trust', () => { + it('refuses to install extensions from untrusted folders', async () => { + vi.mocked(isWorkspaceTrusted).mockReturnValue(false); + const ext1Path = createExtension({ + extensionsDir: workspaceExtensionsDir, + name: 'ext1', + version: '1.0.0', + }); + + const failed = await performWorkspaceExtensionMigration([ + loadExtension(ext1Path)!, + ]); + + expect(failed).toEqual(['ext1']); + }); + + it('does not copy extensions to the user dir', async () => { + vi.mocked(isWorkspaceTrusted).mockReturnValue(false); + const ext1Path = createExtension({ + extensionsDir: workspaceExtensionsDir, + name: 'ext1', + version: '1.0.0', + }); + + await performWorkspaceExtensionMigration([loadExtension(ext1Path)!]); + + const userExtensionsDir = path.join( + tempHomeDir, + GEMINI_DIR, + 'extensions', + ); + + expect(() => fs.readdirSync(userExtensionsDir)).toThrow(); + }); + + it('does not load any extensions in the workspace config', async () => { + vi.mocked(isWorkspaceTrusted).mockReturnValue(false); + const ext1Path = createExtension({ + extensionsDir: workspaceExtensionsDir, + name: 'ext1', + version: '1.0.0', + }); + + await performWorkspaceExtensionMigration([loadExtension(ext1Path)!]); + const extensions = loadExtensions(tempWorkspaceDir); + + expect(extensions).toEqual([]); + }); + }); + + it('should install the extensions in the user directory', async () => { + const ext1Path = createExtension({ + extensionsDir: workspaceExtensionsDir, + name: 'ext1', + version: '1.0.0', + }); + const ext2Path = createExtension({ + extensionsDir: workspaceExtensionsDir, + name: 'ext2', + version: '1.0.0', + }); + const extensionsToMigrate: Extension[] = [ + loadExtension(ext1Path)!, + loadExtension(ext2Path)!, + ]; + const failed = + await performWorkspaceExtensionMigration(extensionsToMigrate); + + expect(failed).toEqual([]); + + const userExtensionsDir = path.join(tempHomeDir, GEMINI_DIR, 'extensions'); + const userExt1Path = path.join(userExtensionsDir, 'ext1'); + const extensions = loadExtensions(tempWorkspaceDir); + + expect(extensions).toHaveLength(2); + const metadataPath = path.join(userExt1Path, INSTALL_METADATA_FILENAME); + expect(fs.existsSync(metadataPath)).toBe(true); + const metadata = JSON.parse(fs.readFileSync(metadataPath, 'utf-8')); + expect(metadata).toEqual({ + source: ext1Path, + type: 'local', + }); + }); + + it('should return the names of failed installations', async () => { + const ext1Path = createExtension({ + extensionsDir: workspaceExtensionsDir, + name: 'ext1', + version: '1.0.0', + }); + + const extensions: Extension[] = [ + loadExtension(ext1Path)!, + { + path: '/ext/path/1', + config: { name: 'ext2', version: '1.0.0' }, + contextFiles: [], + }, + ]; + + const failed = await performWorkspaceExtensionMigration(extensions); + expect(failed).toEqual(['ext2']); + }); +}); + +function createExtension({ + extensionsDir = 'extensions-dir', + name = 'my-extension', + version = '1.0.0', + addContextFile = false, + contextFileName = undefined as string | undefined, + mcpServers = {} as Record, +} = {}): string { + const extDir = path.join(extensionsDir, name); + fs.mkdirSync(extDir, { recursive: true }); + fs.writeFileSync( + path.join(extDir, EXTENSIONS_CONFIG_FILENAME), + JSON.stringify({ name, version, contextFileName, mcpServers }), + ); + + if (addContextFile) { + fs.writeFileSync(path.join(extDir, 'GEMINI.md'), 'context'); + } + + if (contextFileName) { + fs.writeFileSync(path.join(extDir, contextFileName), 'context'); + } + return extDir; +} + +describe('updateExtension', () => { + let tempHomeDir: string; + let userExtensionsDir: string; + + beforeEach(() => { + tempHomeDir = fs.mkdtempSync( + path.join(os.tmpdir(), 'gemini-cli-test-home-'), + ); + vi.mocked(os.homedir).mockReturnValue(tempHomeDir); + userExtensionsDir = path.join(tempHomeDir, GEMINI_DIR, 'extensions'); + // Clean up before each test + fs.rmSync(userExtensionsDir, { recursive: true, force: true }); + fs.mkdirSync(userExtensionsDir, { recursive: true }); + vi.mocked(isWorkspaceTrusted).mockReturnValue(true); + + vi.mocked(execSync).mockClear(); + }); + + afterEach(() => { + fs.rmSync(tempHomeDir, { recursive: true, force: true }); + }); + + it('should update a git-installed extension', async () => { + const gitUrl = 'https://github.com/google/gemini-extensions.git'; + const extensionName = 'gemini-extensions'; + const targetExtDir = path.join(userExtensionsDir, extensionName); + const metadataPath = path.join(targetExtDir, INSTALL_METADATA_FILENAME); + + fs.mkdirSync(targetExtDir, { recursive: true }); + fs.writeFileSync( + path.join(targetExtDir, EXTENSIONS_CONFIG_FILENAME), + JSON.stringify({ name: extensionName, version: '1.0.0' }), + ); + fs.writeFileSync( + metadataPath, + JSON.stringify({ source: gitUrl, type: 'git' }), + ); + + const clone = vi.fn().mockImplementation(async (_, destination) => { + fs.mkdirSync(destination, { recursive: true }); + fs.writeFileSync( + path.join(destination, EXTENSIONS_CONFIG_FILENAME), + JSON.stringify({ name: extensionName, version: '1.1.0' }), + ); + }); + + const mockedSimpleGit = simpleGit as vi.MockedFunction; + mockedSimpleGit.mockReturnValue({ + clone, + } as unknown as SimpleGit); + + const updateInfo = await updateExtension(loadExtension(targetExtDir)); + + expect(updateInfo).toEqual({ + name: 'gemini-extensions', + originalVersion: '1.0.0', + updatedVersion: '1.1.0', + }); + + const updatedConfig = JSON.parse( + fs.readFileSync( + path.join(targetExtDir, EXTENSIONS_CONFIG_FILENAME), + 'utf-8', + ), + ); + expect(updatedConfig.version).toBe('1.1.0'); + }); +}); + +describe('disableExtension', () => { + let tempWorkspaceDir: string; + let tempHomeDir: string; + + beforeEach(() => { + tempWorkspaceDir = fs.mkdtempSync( + path.join(os.tmpdir(), 'gemini-cli-test-workspace-'), + ); + tempHomeDir = fs.mkdtempSync( + path.join(os.tmpdir(), 'gemini-cli-test-home-'), + ); + vi.mocked(os.homedir).mockReturnValue(tempHomeDir); + vi.spyOn(process, 'cwd').mockReturnValue(tempWorkspaceDir); + }); + + afterEach(() => { + fs.rmSync(tempWorkspaceDir, { recursive: true, force: true }); + fs.rmSync(tempHomeDir, { recursive: true, force: true }); + }); + + it('should disable an extension at the user scope', () => { + disableExtension('my-extension', SettingScope.User); + const settings = loadSettings(tempWorkspaceDir); + expect( + settings.forScope(SettingScope.User).settings.extensions?.disabled, + ).toEqual(['my-extension']); + }); + + it('should disable an extension at the workspace scope', () => { + disableExtension('my-extension', SettingScope.Workspace); + const settings = loadSettings(tempWorkspaceDir); + expect( + settings.forScope(SettingScope.Workspace).settings.extensions?.disabled, + ).toEqual(['my-extension']); + }); + + it('should handle disabling the same extension twice', () => { + disableExtension('my-extension', SettingScope.User); + disableExtension('my-extension', SettingScope.User); + const settings = loadSettings(tempWorkspaceDir); + expect( + settings.forScope(SettingScope.User).settings.extensions?.disabled, + ).toEqual(['my-extension']); + }); + + it('should throw an error if you request system scope', () => { + expect(() => disableExtension('my-extension', SettingScope.System)).toThrow( + 'System and SystemDefaults scopes are not supported.', + ); + }); +}); + +describe('enableExtension', () => { + let tempWorkspaceDir: string; + let tempHomeDir: string; + let userExtensionsDir: string; + + beforeEach(() => { + tempWorkspaceDir = fs.mkdtempSync( + path.join(os.tmpdir(), 'gemini-cli-test-workspace-'), + ); + tempHomeDir = fs.mkdtempSync( + path.join(os.tmpdir(), 'gemini-cli-test-home-'), + ); + userExtensionsDir = path.join(tempHomeDir, GEMINI_DIR, 'extensions'); + vi.mocked(os.homedir).mockReturnValue(tempHomeDir); + vi.spyOn(process, 'cwd').mockReturnValue(tempWorkspaceDir); + }); + + afterEach(() => { + fs.rmSync(tempWorkspaceDir, { recursive: true, force: true }); + fs.rmSync(tempHomeDir, { recursive: true, force: true }); + fs.rmSync(userExtensionsDir, { recursive: true, force: true }); + }); + + afterAll(() => { + vi.restoreAllMocks(); + }); + + const getActiveExtensions = (): GeminiCLIExtension[] => { + const extensions = loadExtensions(tempWorkspaceDir); + const activeExtensions = annotateActiveExtensions( + extensions, + [], + tempWorkspaceDir, + ); + return activeExtensions.filter((e) => e.isActive); + }; + + it('should enable an extension at the user scope', () => { + createExtension({ + extensionsDir: userExtensionsDir, + name: 'ext1', + version: '1.0.0', + }); + disableExtension('ext1', SettingScope.User); + let activeExtensions = getActiveExtensions(); + expect(activeExtensions).toHaveLength(0); + + enableExtension('ext1', [SettingScope.User]); + activeExtensions = getActiveExtensions(); + expect(activeExtensions).toHaveLength(1); + expect(activeExtensions[0].name).toBe('ext1'); + }); + + it('should enable an extension at the workspace scope', () => { + createExtension({ + extensionsDir: userExtensionsDir, + name: 'ext1', + version: '1.0.0', + }); + disableExtension('ext1', SettingScope.Workspace); + let activeExtensions = getActiveExtensions(); + expect(activeExtensions).toHaveLength(0); + + enableExtension('ext1', [SettingScope.Workspace]); + activeExtensions = getActiveExtensions(); + expect(activeExtensions).toHaveLength(1); + expect(activeExtensions[0].name).toBe('ext1'); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/config/extension.ts b/projects/gemini-cli/packages/cli/src/config/extension.ts new file mode 100644 index 0000000000000000000000000000000000000000..f8bd250d8706ac8581271437c62328aea45b4f67 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/extension.ts @@ -0,0 +1,560 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { + MCPServerConfig, + GeminiCLIExtension, +} from '@google/gemini-cli-core'; +import { GEMINI_DIR, Storage } from '@google/gemini-cli-core'; +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import * as os from 'node:os'; +import { simpleGit } from 'simple-git'; +import { SettingScope, loadSettings } from '../config/settings.js'; +import { getErrorMessage } from '../utils/errors.js'; +import { recursivelyHydrateStrings } from './extensions/variables.js'; +import { isWorkspaceTrusted } from './trustedFolders.js'; +import { resolveEnvVarsInObject } from '../utils/envVarResolver.js'; + +export const EXTENSIONS_DIRECTORY_NAME = path.join(GEMINI_DIR, 'extensions'); + +export const EXTENSIONS_CONFIG_FILENAME = 'gemini-extension.json'; +export const INSTALL_METADATA_FILENAME = '.gemini-extension-install.json'; + +export interface Extension { + path: string; + config: ExtensionConfig; + contextFiles: string[]; + installMetadata?: ExtensionInstallMetadata | undefined; +} + +export interface ExtensionConfig { + name: string; + version: string; + mcpServers?: Record; + contextFileName?: string | string[]; + excludeTools?: string[]; +} + +export interface ExtensionInstallMetadata { + source: string; + type: 'git' | 'local'; +} + +export interface ExtensionUpdateInfo { + name: string; + originalVersion: string; + updatedVersion: string; +} + +export class ExtensionStorage { + private readonly extensionName: string; + + constructor(extensionName: string) { + this.extensionName = extensionName; + } + + getExtensionDir(): string { + return path.join( + ExtensionStorage.getUserExtensionsDir(), + this.extensionName, + ); + } + + getConfigPath(): string { + return path.join(this.getExtensionDir(), EXTENSIONS_CONFIG_FILENAME); + } + + static getUserExtensionsDir(): string { + const storage = new Storage(os.homedir()); + return storage.getExtensionsDir(); + } + + static async createTmpDir(): Promise { + return await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'gemini-extension'), + ); + } +} + +export function getWorkspaceExtensions(workspaceDir: string): Extension[] { + return loadExtensionsFromDir(workspaceDir); +} + +async function copyExtension( + source: string, + destination: string, +): Promise { + await fs.promises.cp(source, destination, { recursive: true }); +} + +export async function performWorkspaceExtensionMigration( + extensions: Extension[], +): Promise { + const failedInstallNames: string[] = []; + + for (const extension of extensions) { + try { + const installMetadata: ExtensionInstallMetadata = { + source: extension.path, + type: 'local', + }; + await installExtension(installMetadata); + } catch (_) { + failedInstallNames.push(extension.config.name); + } + } + return failedInstallNames; +} + +export function loadExtensions(workspaceDir: string): Extension[] { + const settings = loadSettings(workspaceDir).merged; + const disabledExtensions = settings.extensions?.disabled ?? []; + const allExtensions = [...loadUserExtensions()]; + + if ( + (isWorkspaceTrusted(settings) ?? true) && + !settings.experimental?.extensionManagement + ) { + allExtensions.push(...getWorkspaceExtensions(workspaceDir)); + } + + const uniqueExtensions = new Map(); + for (const extension of allExtensions) { + if ( + !uniqueExtensions.has(extension.config.name) && + !disabledExtensions.includes(extension.config.name) + ) { + uniqueExtensions.set(extension.config.name, extension); + } + } + + return Array.from(uniqueExtensions.values()); +} + +export function loadUserExtensions(): Extension[] { + const userExtensions = loadExtensionsFromDir(os.homedir()); + + const uniqueExtensions = new Map(); + for (const extension of userExtensions) { + if (!uniqueExtensions.has(extension.config.name)) { + uniqueExtensions.set(extension.config.name, extension); + } + } + + return Array.from(uniqueExtensions.values()); +} + +export function loadExtensionsFromDir(dir: string): Extension[] { + const storage = new Storage(dir); + const extensionsDir = storage.getExtensionsDir(); + if (!fs.existsSync(extensionsDir)) { + return []; + } + + const extensions: Extension[] = []; + for (const subdir of fs.readdirSync(extensionsDir)) { + const extensionDir = path.join(extensionsDir, subdir); + + const extension = loadExtension(extensionDir); + if (extension != null) { + extensions.push(extension); + } + } + return extensions; +} + +export function loadExtension(extensionDir: string): Extension | null { + if (!fs.statSync(extensionDir).isDirectory()) { + console.error( + `Warning: unexpected file ${extensionDir} in extensions directory.`, + ); + return null; + } + + const configFilePath = path.join(extensionDir, EXTENSIONS_CONFIG_FILENAME); + if (!fs.existsSync(configFilePath)) { + console.error( + `Warning: extension directory ${extensionDir} does not contain a config file ${configFilePath}.`, + ); + return null; + } + + try { + const configContent = fs.readFileSync(configFilePath, 'utf-8'); + let config = recursivelyHydrateStrings(JSON.parse(configContent), { + extensionPath: extensionDir, + '/': path.sep, + pathSeparator: path.sep, + }) as unknown as ExtensionConfig; + if (!config.name || !config.version) { + console.error( + `Invalid extension config in ${configFilePath}: missing name or version.`, + ); + return null; + } + + config = resolveEnvVarsInObject(config); + + const contextFiles = getContextFileNames(config) + .map((contextFileName) => path.join(extensionDir, contextFileName)) + .filter((contextFilePath) => fs.existsSync(contextFilePath)); + + return { + path: extensionDir, + config, + contextFiles, + installMetadata: loadInstallMetadata(extensionDir), + }; + } catch (e) { + console.error( + `Warning: error parsing extension config in ${configFilePath}: ${getErrorMessage( + e, + )}`, + ); + return null; + } +} + +function loadInstallMetadata( + extensionDir: string, +): ExtensionInstallMetadata | undefined { + const metadataFilePath = path.join(extensionDir, INSTALL_METADATA_FILENAME); + try { + const configContent = fs.readFileSync(metadataFilePath, 'utf-8'); + const metadata = JSON.parse(configContent) as ExtensionInstallMetadata; + return metadata; + } catch (_e) { + return undefined; + } +} + +function getContextFileNames(config: ExtensionConfig): string[] { + if (!config.contextFileName) { + return ['GEMINI.md']; + } else if (!Array.isArray(config.contextFileName)) { + return [config.contextFileName]; + } + return config.contextFileName; +} + +/** + * Returns an annotated list of extensions. If an extension is listed in enabledExtensionNames, it will be active. + * If enabledExtensionNames is empty, an extension is active unless it is in list of disabled extensions in settings. + * @param extensions The base list of extensions. + * @param enabledExtensionNames The names of explicitly enabled extensions. + * @param workspaceDir The current workspace directory. + */ +export function annotateActiveExtensions( + extensions: Extension[], + enabledExtensionNames: string[], + workspaceDir: string, +): GeminiCLIExtension[] { + const settings = loadSettings(workspaceDir).merged; + const disabledExtensions = settings.extensions?.disabled ?? []; + + const annotatedExtensions: GeminiCLIExtension[] = []; + + if (enabledExtensionNames.length === 0) { + return extensions.map((extension) => ({ + name: extension.config.name, + version: extension.config.version, + isActive: !disabledExtensions.includes(extension.config.name), + path: extension.path, + })); + } + + const lowerCaseEnabledExtensions = new Set( + enabledExtensionNames.map((e) => e.trim().toLowerCase()), + ); + + if ( + lowerCaseEnabledExtensions.size === 1 && + lowerCaseEnabledExtensions.has('none') + ) { + return extensions.map((extension) => ({ + name: extension.config.name, + version: extension.config.version, + isActive: false, + path: extension.path, + })); + } + + const notFoundNames = new Set(lowerCaseEnabledExtensions); + + for (const extension of extensions) { + const lowerCaseName = extension.config.name.toLowerCase(); + const isActive = lowerCaseEnabledExtensions.has(lowerCaseName); + + if (isActive) { + notFoundNames.delete(lowerCaseName); + } + + annotatedExtensions.push({ + name: extension.config.name, + version: extension.config.version, + isActive, + path: extension.path, + }); + } + + for (const requestedName of notFoundNames) { + console.error(`Extension not found: ${requestedName}`); + } + + return annotatedExtensions; +} + +/** + * Clones a Git repository to a specified local path. + * @param gitUrl The Git URL to clone. + * @param destination The destination path to clone the repository to. + */ +async function cloneFromGit( + gitUrl: string, + destination: string, +): Promise { + try { + // TODO(chrstnb): Download the archive instead to avoid unnecessary .git info. + await simpleGit().clone(gitUrl, destination, ['--depth', '1']); + } catch (error) { + throw new Error(`Failed to clone Git repository from ${gitUrl}`, { + cause: error, + }); + } +} + +export async function installExtension( + installMetadata: ExtensionInstallMetadata, + cwd: string = process.cwd(), +): Promise { + const settings = loadSettings(cwd).merged; + if (!isWorkspaceTrusted(settings)) { + throw new Error( + `Could not install extension from untrusted folder at ${installMetadata.source}`, + ); + } + + const extensionsDir = ExtensionStorage.getUserExtensionsDir(); + await fs.promises.mkdir(extensionsDir, { recursive: true }); + + // Convert relative paths to absolute paths for the metadata file. + if ( + installMetadata.type === 'local' && + !path.isAbsolute(installMetadata.source) + ) { + installMetadata.source = path.resolve(cwd, installMetadata.source); + } + + let localSourcePath: string; + let tempDir: string | undefined; + if (installMetadata.type === 'git') { + tempDir = await ExtensionStorage.createTmpDir(); + await cloneFromGit(installMetadata.source, tempDir); + localSourcePath = tempDir; + } else { + localSourcePath = installMetadata.source; + } + let newExtensionName: string | undefined; + try { + const newExtension = loadExtension(localSourcePath); + if (!newExtension) { + throw new Error( + `Invalid extension at ${installMetadata.source}. Please make sure it has a valid gemini-extension.json file.`, + ); + } + + // ~/.gemini/extensions/{ExtensionConfig.name}. + newExtensionName = newExtension.config.name; + const extensionStorage = new ExtensionStorage(newExtensionName); + const destinationPath = extensionStorage.getExtensionDir(); + + const installedExtensions = loadUserExtensions(); + if ( + installedExtensions.some( + (installed) => installed.config.name === newExtensionName, + ) + ) { + throw new Error( + `Extension "${newExtensionName}" is already installed. Please uninstall it first.`, + ); + } + + await copyExtension(localSourcePath, destinationPath); + + const metadataString = JSON.stringify(installMetadata, null, 2); + const metadataPath = path.join(destinationPath, INSTALL_METADATA_FILENAME); + await fs.promises.writeFile(metadataPath, metadataString); + } finally { + if (tempDir) { + await fs.promises.rm(tempDir, { recursive: true, force: true }); + } + } + + return newExtensionName; +} + +export async function uninstallExtension( + extensionName: string, + cwd: string = process.cwd(), +): Promise { + const installedExtensions = loadUserExtensions(); + if ( + !installedExtensions.some( + (installed) => installed.config.name === extensionName, + ) + ) { + throw new Error(`Extension "${extensionName}" not found.`); + } + removeFromDisabledExtensions( + extensionName, + [SettingScope.User, SettingScope.Workspace], + cwd, + ); + const storage = new ExtensionStorage(extensionName); + return await fs.promises.rm(storage.getExtensionDir(), { + recursive: true, + force: true, + }); +} + +export function toOutputString(extension: Extension): string { + let output = `${extension.config.name} (${extension.config.version})`; + output += `\n Path: ${extension.path}`; + if (extension.installMetadata) { + output += `\n Source: ${extension.installMetadata.source}`; + } + if (extension.contextFiles.length > 0) { + output += `\n Context files:`; + extension.contextFiles.forEach((contextFile) => { + output += `\n ${contextFile}`; + }); + } + if (extension.config.mcpServers) { + output += `\n MCP servers:`; + Object.keys(extension.config.mcpServers).forEach((key) => { + output += `\n ${key}`; + }); + } + if (extension.config.excludeTools) { + output += `\n Excluded tools:`; + extension.config.excludeTools.forEach((tool) => { + output += `\n ${tool}`; + }); + } + return output; +} + +export async function updateExtensionByName( + extensionName: string, + cwd: string = process.cwd(), +): Promise { + const installedExtensions = loadUserExtensions(); + const extension = installedExtensions.find( + (installed) => installed.config.name === extensionName, + ); + if (!extension) { + throw new Error( + `Extension "${extensionName}" not found. Run gemini extensions list to see available extensions.`, + ); + } + return await updateExtension(extension, cwd); +} + +export async function updateExtension( + extension: Extension, + cwd: string = process.cwd(), +): Promise { + if (!extension.installMetadata) { + throw new Error(`Extension ${extension.config.name} cannot be updated.`); + } + const originalVersion = extension.config.version; + const tempDir = await ExtensionStorage.createTmpDir(); + try { + await copyExtension(extension.path, tempDir); + await uninstallExtension(extension.config.name, cwd); + await installExtension(extension.installMetadata, cwd); + + const updatedExtension = loadExtension(extension.path); + if (!updatedExtension) { + throw new Error('Updated extension not found after installation.'); + } + const updatedVersion = updatedExtension.config.version; + return { + name: extension.config.name, + originalVersion, + updatedVersion, + }; + } catch (e) { + console.error( + `Error updating extension, rolling back. ${getErrorMessage(e)}`, + ); + await copyExtension(tempDir, extension.path); + throw e; + } finally { + await fs.promises.rm(tempDir, { recursive: true, force: true }); + } +} + +export function disableExtension( + name: string, + scope: SettingScope, + cwd: string = process.cwd(), +) { + if (scope === SettingScope.System || scope === SettingScope.SystemDefaults) { + throw new Error('System and SystemDefaults scopes are not supported.'); + } + const settings = loadSettings(cwd); + const settingsFile = settings.forScope(scope); + const extensionSettings = settingsFile.settings.extensions || { + disabled: [], + }; + const disabledExtensions = extensionSettings.disabled || []; + if (!disabledExtensions.includes(name)) { + disabledExtensions.push(name); + extensionSettings.disabled = disabledExtensions; + settings.setValue(scope, 'extensions', extensionSettings); + } +} + +export function enableExtension(name: string, scopes: SettingScope[]) { + removeFromDisabledExtensions(name, scopes); +} + +/** + * Removes an extension from the list of disabled extensions. + * @param name The name of the extension to remove. + * @param scope The scopes to remove the name from. + */ +function removeFromDisabledExtensions( + name: string, + scopes: SettingScope[], + cwd: string = process.cwd(), +) { + const settings = loadSettings(cwd); + for (const scope of scopes) { + const settingsFile = settings.forScope(scope); + const extensionSettings = settingsFile.settings.extensions || { + disabled: [], + }; + const disabledExtensions = extensionSettings.disabled || []; + extensionSettings.disabled = disabledExtensions.filter( + (extension) => extension !== name, + ); + settings.setValue(scope, 'extensions', extensionSettings); + } +} + +export async function updateAllUpdatableExtensions( + cwd: string = process.cwd(), +): Promise { + const extensions = loadExtensions(cwd).filter( + (extension) => !!extension.installMetadata, + ); + return await Promise.all( + extensions.map((extension) => updateExtension(extension, cwd)), + ); +} diff --git a/projects/gemini-cli/packages/cli/src/config/extensions/variableSchema.ts b/projects/gemini-cli/packages/cli/src/config/extensions/variableSchema.ts new file mode 100644 index 0000000000000000000000000000000000000000..e55f2a52580ff589bc75c0728e06c48d04b30b96 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/extensions/variableSchema.ts @@ -0,0 +1,30 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +export interface VariableDefinition { + type: 'string'; + description: string; + default?: string; + required?: boolean; +} + +export interface VariableSchema { + [key: string]: VariableDefinition; +} + +const PATH_SEPARATOR_DEFINITION = { + type: 'string', + description: 'The path separator.', +} as const; + +export const VARIABLE_SCHEMA = { + extensionPath: { + type: 'string', + description: 'The path of the extension in the filesystem.', + }, + '/': PATH_SEPARATOR_DEFINITION, + pathSeparator: PATH_SEPARATOR_DEFINITION, +} as const; diff --git a/projects/gemini-cli/packages/cli/src/config/extensions/variables.test.ts b/projects/gemini-cli/packages/cli/src/config/extensions/variables.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..d2015f4f9e2c3b74aa10044142f43025e95abb75 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/extensions/variables.test.ts @@ -0,0 +1,18 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { expect, describe, it } from 'vitest'; +import { hydrateString } from './variables.js'; + +describe('hydrateString', () => { + it('should replace a single variable', () => { + const context = { + extensionPath: 'path/my-extension', + }; + const result = hydrateString('Hello, ${extensionPath}!', context); + expect(result).toBe('Hello, path/my-extension!'); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/config/extensions/variables.ts b/projects/gemini-cli/packages/cli/src/config/extensions/variables.ts new file mode 100644 index 0000000000000000000000000000000000000000..7c6ef8469242c17e08182d57f755d640736e832f --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/extensions/variables.ts @@ -0,0 +1,65 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { type VariableSchema, VARIABLE_SCHEMA } from './variableSchema.js'; + +export type JsonObject = { [key: string]: JsonValue }; +export type JsonArray = JsonValue[]; +export type JsonValue = + | string + | number + | boolean + | null + | JsonObject + | JsonArray; + +export type VariableContext = { + [key in keyof typeof VARIABLE_SCHEMA]?: string; +}; + +export function validateVariables( + variables: VariableContext, + schema: VariableSchema, +) { + for (const key in schema) { + const definition = schema[key]; + if (definition.required && !variables[key as keyof VariableContext]) { + throw new Error(`Missing required variable: ${key}`); + } + } +} + +export function hydrateString(str: string, context: VariableContext): string { + validateVariables(context, VARIABLE_SCHEMA); + const regex = /\${(.*?)}/g; + return str.replace(regex, (match, key) => + context[key as keyof VariableContext] == null + ? match + : (context[key as keyof VariableContext] as string), + ); +} + +export function recursivelyHydrateStrings( + obj: JsonValue, + values: VariableContext, +): JsonValue { + if (typeof obj === 'string') { + return hydrateString(obj, values); + } + if (Array.isArray(obj)) { + return obj.map((item) => recursivelyHydrateStrings(item, values)); + } + if (typeof obj === 'object' && obj !== null) { + const newObj: JsonObject = {}; + for (const key in obj) { + if (Object.prototype.hasOwnProperty.call(obj, key)) { + newObj[key] = recursivelyHydrateStrings(obj[key], values); + } + } + return newObj; + } + return obj; +} diff --git a/projects/gemini-cli/packages/cli/src/config/keyBindings.test.ts b/projects/gemini-cli/packages/cli/src/config/keyBindings.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..1003290b8c95e30dbc6d205f591c4c619d0622ab --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/keyBindings.test.ts @@ -0,0 +1,59 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import type { KeyBindingConfig } from './keyBindings.js'; +import { Command, defaultKeyBindings } from './keyBindings.js'; + +describe('keyBindings config', () => { + describe('defaultKeyBindings', () => { + it('should have bindings for all commands', () => { + const commands = Object.values(Command); + + for (const command of commands) { + expect(defaultKeyBindings[command]).toBeDefined(); + expect(Array.isArray(defaultKeyBindings[command])).toBe(true); + } + }); + + it('should have valid key binding structures', () => { + for (const [_, bindings] of Object.entries(defaultKeyBindings)) { + for (const binding of bindings) { + // Each binding should have either key or sequence, but not both + const hasKey = binding.key !== undefined; + const hasSequence = binding.sequence !== undefined; + + expect(hasKey || hasSequence).toBe(true); + expect(hasKey && hasSequence).toBe(false); + + // Modifier properties should be boolean or undefined + if (binding.ctrl !== undefined) { + expect(typeof binding.ctrl).toBe('boolean'); + } + if (binding.shift !== undefined) { + expect(typeof binding.shift).toBe('boolean'); + } + if (binding.command !== undefined) { + expect(typeof binding.command).toBe('boolean'); + } + if (binding.paste !== undefined) { + expect(typeof binding.paste).toBe('boolean'); + } + } + } + }); + + it('should export all required types', () => { + // Basic type checks + expect(typeof Command.HOME).toBe('string'); + expect(typeof Command.END).toBe('string'); + + // Config should be readonly + const config: KeyBindingConfig = defaultKeyBindings; + expect(config[Command.HOME]).toBeDefined(); + }); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/config/keyBindings.ts b/projects/gemini-cli/packages/cli/src/config/keyBindings.ts new file mode 100644 index 0000000000000000000000000000000000000000..ed1301ea070090b1cf140e30f9c27f489186c020 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/keyBindings.ts @@ -0,0 +1,184 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * Command enum for all available keyboard shortcuts + */ +export enum Command { + // Basic bindings + RETURN = 'return', + ESCAPE = 'escape', + + // Cursor movement + HOME = 'home', + END = 'end', + + // Text deletion + KILL_LINE_RIGHT = 'killLineRight', + KILL_LINE_LEFT = 'killLineLeft', + CLEAR_INPUT = 'clearInput', + + // Screen control + CLEAR_SCREEN = 'clearScreen', + + // History navigation + HISTORY_UP = 'historyUp', + HISTORY_DOWN = 'historyDown', + NAVIGATION_UP = 'navigationUp', + NAVIGATION_DOWN = 'navigationDown', + + // Auto-completion + ACCEPT_SUGGESTION = 'acceptSuggestion', + COMPLETION_UP = 'completionUp', + COMPLETION_DOWN = 'completionDown', + + // Text input + SUBMIT = 'submit', + NEWLINE = 'newline', + + // External tools + OPEN_EXTERNAL_EDITOR = 'openExternalEditor', + PASTE_CLIPBOARD_IMAGE = 'pasteClipboardImage', + + // App level bindings + SHOW_ERROR_DETAILS = 'showErrorDetails', + TOGGLE_TOOL_DESCRIPTIONS = 'toggleToolDescriptions', + TOGGLE_IDE_CONTEXT_DETAIL = 'toggleIDEContextDetail', + QUIT = 'quit', + EXIT = 'exit', + SHOW_MORE_LINES = 'showMoreLines', + + // Shell commands + REVERSE_SEARCH = 'reverseSearch', + SUBMIT_REVERSE_SEARCH = 'submitReverseSearch', + ACCEPT_SUGGESTION_REVERSE_SEARCH = 'acceptSuggestionReverseSearch', +} + +/** + * Data-driven key binding structure for user configuration + */ +export interface KeyBinding { + /** The key name (e.g., 'a', 'return', 'tab', 'escape') */ + key?: string; + /** The key sequence (e.g., '\x18' for Ctrl+X) - alternative to key name */ + sequence?: string; + /** Control key requirement: true=must be pressed, false=must not be pressed, undefined=ignore */ + ctrl?: boolean; + /** Shift key requirement: true=must be pressed, false=must not be pressed, undefined=ignore */ + shift?: boolean; + /** Command/meta key requirement: true=must be pressed, false=must not be pressed, undefined=ignore */ + command?: boolean; + /** Paste operation requirement: true=must be paste, false=must not be paste, undefined=ignore */ + paste?: boolean; +} + +/** + * Configuration type mapping commands to their key bindings + */ +export type KeyBindingConfig = { + readonly [C in Command]: readonly KeyBinding[]; +}; + +/** + * Default key binding configuration + * Matches the original hard-coded logic exactly + */ +export const defaultKeyBindings: KeyBindingConfig = { + // Basic bindings + [Command.RETURN]: [{ key: 'return' }], + // Original: key.name === 'escape' + [Command.ESCAPE]: [{ key: 'escape' }], + + // Cursor movement + // Original: key.ctrl && key.name === 'a' + [Command.HOME]: [{ key: 'a', ctrl: true }], + // Original: key.ctrl && key.name === 'e' + [Command.END]: [{ key: 'e', ctrl: true }], + + // Text deletion + // Original: key.ctrl && key.name === 'k' + [Command.KILL_LINE_RIGHT]: [{ key: 'k', ctrl: true }], + // Original: key.ctrl && key.name === 'u' + [Command.KILL_LINE_LEFT]: [{ key: 'u', ctrl: true }], + // Original: key.ctrl && key.name === 'c' + [Command.CLEAR_INPUT]: [{ key: 'c', ctrl: true }], + + // Screen control + // Original: key.ctrl && key.name === 'l' + [Command.CLEAR_SCREEN]: [{ key: 'l', ctrl: true }], + + // History navigation + // Original: key.ctrl && key.name === 'p' + [Command.HISTORY_UP]: [{ key: 'p', ctrl: true }], + // Original: key.ctrl && key.name === 'n' + [Command.HISTORY_DOWN]: [{ key: 'n', ctrl: true }], + // Original: key.name === 'up' + [Command.NAVIGATION_UP]: [{ key: 'up' }], + // Original: key.name === 'down' + [Command.NAVIGATION_DOWN]: [{ key: 'down' }], + + // Auto-completion + // Original: key.name === 'tab' || (key.name === 'return' && !key.ctrl) + [Command.ACCEPT_SUGGESTION]: [{ key: 'tab' }, { key: 'return', ctrl: false }], + // Completion navigation (arrow or Ctrl+P/N) + [Command.COMPLETION_UP]: [{ key: 'up' }, { key: 'p', ctrl: true }], + [Command.COMPLETION_DOWN]: [{ key: 'down' }, { key: 'n', ctrl: true }], + + // Text input + // Original: key.name === 'return' && !key.ctrl && !key.meta && !key.paste + // Must also exclude shift to allow shift+enter for newline + [Command.SUBMIT]: [ + { + key: 'return', + ctrl: false, + command: false, + paste: false, + shift: false, + }, + ], + // Original: key.name === 'return' && (key.ctrl || key.meta || key.paste) + // Split into multiple data-driven bindings + // Now also includes shift+enter for multi-line input + [Command.NEWLINE]: [ + { key: 'return', ctrl: true }, + { key: 'return', command: true }, + { key: 'return', paste: true }, + { key: 'return', shift: true }, + { key: 'j', ctrl: true }, + ], + + // External tools + // Original: key.ctrl && (key.name === 'x' || key.sequence === '\x18') + [Command.OPEN_EXTERNAL_EDITOR]: [ + { key: 'x', ctrl: true }, + { sequence: '\x18', ctrl: true }, + ], + // Original: key.ctrl && key.name === 'v' + [Command.PASTE_CLIPBOARD_IMAGE]: [{ key: 'v', ctrl: true }], + + // App level bindings + // Original: key.ctrl && key.name === 'o' + [Command.SHOW_ERROR_DETAILS]: [{ key: 'o', ctrl: true }], + // Original: key.ctrl && key.name === 't' + [Command.TOGGLE_TOOL_DESCRIPTIONS]: [{ key: 't', ctrl: true }], + // Original: key.ctrl && key.name === 'g' + [Command.TOGGLE_IDE_CONTEXT_DETAIL]: [{ key: 'g', ctrl: true }], + // Original: key.ctrl && (key.name === 'c' || key.name === 'C') + [Command.QUIT]: [{ key: 'c', ctrl: true }], + // Original: key.ctrl && (key.name === 'd' || key.name === 'D') + [Command.EXIT]: [{ key: 'd', ctrl: true }], + // Original: key.ctrl && key.name === 's' + [Command.SHOW_MORE_LINES]: [{ key: 's', ctrl: true }], + + // Shell commands + // Original: key.ctrl && key.name === 'r' + [Command.REVERSE_SEARCH]: [{ key: 'r', ctrl: true }], + // Original: key.name === 'return' && !key.ctrl + // Note: original logic ONLY checked ctrl=false, ignored meta/shift/paste + [Command.SUBMIT_REVERSE_SEARCH]: [{ key: 'return', ctrl: false }], + // Original: key.name === 'tab' + [Command.ACCEPT_SUGGESTION_REVERSE_SEARCH]: [{ key: 'tab' }], +}; diff --git a/projects/gemini-cli/packages/cli/src/config/sandboxConfig.ts b/projects/gemini-cli/packages/cli/src/config/sandboxConfig.ts new file mode 100644 index 0000000000000000000000000000000000000000..8404e589eb02386208793ec522396f046bdb4341 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/sandboxConfig.ts @@ -0,0 +1,105 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { SandboxConfig } from '@google/gemini-cli-core'; +import { FatalSandboxError } from '@google/gemini-cli-core'; +import commandExists from 'command-exists'; +import * as os from 'node:os'; +import { getPackageJson } from '../utils/package.js'; +import type { Settings } from './settings.js'; + +// This is a stripped-down version of the CliArgs interface from config.ts +// to avoid circular dependencies. +interface SandboxCliArgs { + sandbox?: boolean | string; + sandboxImage?: string; +} + +const VALID_SANDBOX_COMMANDS: ReadonlyArray = [ + 'docker', + 'podman', + 'sandbox-exec', +]; + +function isSandboxCommand(value: string): value is SandboxConfig['command'] { + return (VALID_SANDBOX_COMMANDS as readonly string[]).includes(value); +} + +function getSandboxCommand( + sandbox?: boolean | string, +): SandboxConfig['command'] | '' { + // If the SANDBOX env var is set, we're already inside the sandbox. + if (process.env['SANDBOX']) { + return ''; + } + + // note environment variable takes precedence over argument (from command line or settings) + const environmentConfiguredSandbox = + process.env['GEMINI_SANDBOX']?.toLowerCase().trim() ?? ''; + sandbox = + environmentConfiguredSandbox?.length > 0 + ? environmentConfiguredSandbox + : sandbox; + if (sandbox === '1' || sandbox === 'true') sandbox = true; + else if (sandbox === '0' || sandbox === 'false' || !sandbox) sandbox = false; + + if (sandbox === false) { + return ''; + } + + if (typeof sandbox === 'string' && sandbox) { + if (!isSandboxCommand(sandbox)) { + throw new FatalSandboxError( + `Invalid sandbox command '${sandbox}'. Must be one of ${VALID_SANDBOX_COMMANDS.join( + ', ', + )}`, + ); + } + // confirm that specified command exists + if (commandExists.sync(sandbox)) { + return sandbox; + } + throw new FatalSandboxError( + `Missing sandbox command '${sandbox}' (from GEMINI_SANDBOX)`, + ); + } + + // look for seatbelt, docker, or podman, in that order + // for container-based sandboxing, require sandbox to be enabled explicitly + if (os.platform() === 'darwin' && commandExists.sync('sandbox-exec')) { + return 'sandbox-exec'; + } else if (commandExists.sync('docker') && sandbox === true) { + return 'docker'; + } else if (commandExists.sync('podman') && sandbox === true) { + return 'podman'; + } + + // throw an error if user requested sandbox but no command was found + if (sandbox === true) { + throw new FatalSandboxError( + 'GEMINI_SANDBOX is true but failed to determine command for sandbox; ' + + 'install docker or podman or specify command in GEMINI_SANDBOX', + ); + } + + return ''; +} + +export async function loadSandboxConfig( + settings: Settings, + argv: SandboxCliArgs, +): Promise { + const sandboxOption = argv.sandbox ?? settings.tools?.sandbox; + const command = getSandboxCommand(sandboxOption); + + const packageJson = await getPackageJson(); + const image = + argv.sandboxImage ?? + process.env['GEMINI_SANDBOX_IMAGE'] ?? + packageJson?.config?.sandboxImageUri; + + return command && image ? { command, image } : undefined; +} diff --git a/projects/gemini-cli/packages/cli/src/config/settings.test.ts b/projects/gemini-cli/packages/cli/src/config/settings.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..012f84fe9a81b1da231fc1dc01564bf2a16605f9 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/settings.test.ts @@ -0,0 +1,2448 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/// + +// Mock 'os' first. +import * as osActual from 'node:os'; // Import for type info for the mock factory + +vi.mock('os', async (importOriginal) => { + const actualOs = await importOriginal(); + return { + ...actualOs, + homedir: vi.fn(() => '/mock/home/user'), + platform: vi.fn(() => 'linux'), + }; +}); + +// Mock './settings.js' to ensure it uses the mocked 'os.homedir()' for its internal constants. +vi.mock('./settings.js', async (importActual) => { + const originalModule = await importActual(); + return { + __esModule: true, // Ensure correct module shape + ...originalModule, // Re-export all original members + // We are relying on originalModule's USER_SETTINGS_PATH being constructed with mocked os.homedir() + }; +}); + +// Mock trustedFolders +vi.mock('./trustedFolders.js', () => ({ + isWorkspaceTrusted: vi.fn(), +})); + +// NOW import everything else, including the (now effectively re-exported) settings.js +import path, * as pathActual from 'node:path'; // Restored for MOCK_WORKSPACE_SETTINGS_PATH +import { + describe, + it, + expect, + vi, + beforeEach, + afterEach, + type Mocked, + type Mock, +} from 'vitest'; +import * as fs from 'node:fs'; // fs will be mocked separately +import stripJsonComments from 'strip-json-comments'; // Will be mocked separately +import { isWorkspaceTrusted } from './trustedFolders.js'; + +// These imports will get the versions from the vi.mock('./settings.js', ...) factory. +import { + loadSettings, + USER_SETTINGS_PATH, // This IS the mocked path. + getSystemSettingsPath, + getSystemDefaultsPath, + SETTINGS_DIRECTORY_NAME, // This is from the original module, but used by the mock. + migrateSettingsToV1, + type Settings, + loadEnvironment, +} from './settings.js'; +import { GEMINI_DIR } from '@google/gemini-cli-core'; + +const MOCK_WORKSPACE_DIR = '/mock/workspace'; +// Use the (mocked) SETTINGS_DIRECTORY_NAME for consistency +const MOCK_WORKSPACE_SETTINGS_PATH = pathActual.join( + MOCK_WORKSPACE_DIR, + SETTINGS_DIRECTORY_NAME, + 'settings.json', +); + +// A more flexible type for test data that allows arbitrary properties. +type TestSettings = Settings & { [key: string]: unknown }; + +vi.mock('fs', async (importOriginal) => { + // Get all the functions from the real 'fs' module + const actualFs = await importOriginal(); + + return { + ...actualFs, // Keep all the real functions + // Now, just override the ones we need for the test + existsSync: vi.fn(), + readFileSync: vi.fn(), + writeFileSync: vi.fn(), + mkdirSync: vi.fn(), + realpathSync: (p: string) => p, + }; +}); + +vi.mock('strip-json-comments', () => ({ + default: vi.fn((content) => content), +})); + +describe('Settings Loading and Merging', () => { + let mockFsExistsSync: Mocked; + let mockStripJsonComments: Mocked; + let mockFsMkdirSync: Mocked; + + beforeEach(() => { + vi.resetAllMocks(); + + mockFsExistsSync = vi.mocked(fs.existsSync); + mockFsMkdirSync = vi.mocked(fs.mkdirSync); + mockStripJsonComments = vi.mocked(stripJsonComments); + + vi.mocked(osActual.homedir).mockReturnValue('/mock/home/user'); + (mockStripJsonComments as unknown as Mock).mockImplementation( + (jsonString: string) => jsonString, + ); + (mockFsExistsSync as Mock).mockReturnValue(false); + (fs.readFileSync as Mock).mockReturnValue('{}'); // Return valid empty JSON + (mockFsMkdirSync as Mock).mockImplementation(() => undefined); + vi.mocked(isWorkspaceTrusted).mockReturnValue(true); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('loadSettings', () => { + it('should load empty settings if no files exist', () => { + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.system.settings).toEqual({}); + expect(settings.user.settings).toEqual({}); + expect(settings.workspace.settings).toEqual({}); + expect(settings.merged).toEqual({ + general: {}, + ui: { + customThemes: {}, + }, + mcp: {}, + mcpServers: {}, + context: { + includeDirectories: [], + }, + model: { + chatCompression: {}, + }, + advanced: { + excludedEnvVars: [], + }, + extensions: { + disabled: [], + workspacesWithMigrationNudge: [], + }, + security: {}, + }); + expect(settings.errors.length).toBe(0); + }); + + it('should load system settings if only system file exists', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === getSystemSettingsPath(), + ); + const systemSettingsContent = { + ui: { + theme: 'system-default', + }, + tools: { + sandbox: false, + }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === getSystemSettingsPath()) + return JSON.stringify(systemSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(fs.readFileSync).toHaveBeenCalledWith( + getSystemSettingsPath(), + 'utf-8', + ); + expect(settings.system.settings).toEqual(systemSettingsContent); + expect(settings.user.settings).toEqual({}); + expect(settings.workspace.settings).toEqual({}); + expect(settings.merged).toEqual({ + ...systemSettingsContent, + general: {}, + ui: { + ...systemSettingsContent.ui, + customThemes: {}, + }, + mcp: {}, + mcpServers: {}, + context: { + includeDirectories: [], + }, + model: { + chatCompression: {}, + }, + advanced: { + excludedEnvVars: [], + }, + extensions: { + disabled: [], + workspacesWithMigrationNudge: [], + }, + security: {}, + }); + }); + + it('should load user settings if only user file exists', () => { + const expectedUserSettingsPath = USER_SETTINGS_PATH; // Use the path actually resolved by the (mocked) module + + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === expectedUserSettingsPath, + ); + const userSettingsContent = { + ui: { + theme: 'dark', + }, + context: { + fileName: 'USER_CONTEXT.md', + }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === expectedUserSettingsPath) + return JSON.stringify(userSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(fs.readFileSync).toHaveBeenCalledWith( + expectedUserSettingsPath, + 'utf-8', + ); + expect(settings.user.settings).toEqual(userSettingsContent); + expect(settings.workspace.settings).toEqual({}); + expect(settings.merged).toEqual({ + ...userSettingsContent, + general: {}, + ui: { + ...userSettingsContent.ui, + customThemes: {}, + }, + mcp: {}, + mcpServers: {}, + context: { + ...userSettingsContent.context, + includeDirectories: [], + }, + model: { + chatCompression: {}, + }, + advanced: { + excludedEnvVars: [], + }, + extensions: { + disabled: [], + workspacesWithMigrationNudge: [], + }, + security: {}, + }); + }); + + it('should load workspace settings if only workspace file exists', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === MOCK_WORKSPACE_SETTINGS_PATH, + ); + const workspaceSettingsContent = { + tools: { + sandbox: true, + }, + context: { + fileName: 'WORKSPACE_CONTEXT.md', + }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(fs.readFileSync).toHaveBeenCalledWith( + MOCK_WORKSPACE_SETTINGS_PATH, + 'utf-8', + ); + expect(settings.user.settings).toEqual({}); + expect(settings.workspace.settings).toEqual(workspaceSettingsContent); + expect(settings.merged).toEqual({ + tools: { + sandbox: true, + }, + context: { + fileName: 'WORKSPACE_CONTEXT.md', + includeDirectories: [], + }, + general: {}, + ui: { + customThemes: {}, + }, + mcp: {}, + mcpServers: {}, + model: { + chatCompression: {}, + }, + advanced: { + excludedEnvVars: [], + }, + extensions: { + disabled: [], + workspacesWithMigrationNudge: [], + }, + security: {}, + }); + }); + + it('should merge user and workspace settings, with workspace taking precedence', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const userSettingsContent = { + ui: { + theme: 'dark', + }, + tools: { + sandbox: false, + }, + context: { + fileName: 'USER_CONTEXT.md', + }, + }; + const workspaceSettingsContent = { + tools: { + sandbox: true, + core: ['tool1'], + }, + context: { + fileName: 'WORKSPACE_CONTEXT.md', + }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(settings.user.settings).toEqual(userSettingsContent); + expect(settings.workspace.settings).toEqual(workspaceSettingsContent); + expect(settings.merged).toEqual({ + general: {}, + ui: { + theme: 'dark', + customThemes: {}, + }, + tools: { + sandbox: true, + core: ['tool1'], + }, + context: { + fileName: 'WORKSPACE_CONTEXT.md', + includeDirectories: [], + }, + advanced: { + excludedEnvVars: [], + }, + extensions: { + disabled: [], + workspacesWithMigrationNudge: [], + }, + mcp: {}, + mcpServers: {}, + model: { + chatCompression: {}, + }, + security: {}, + }); + }); + + it('should merge system, user and workspace settings, with system taking precedence over workspace, and workspace over user', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const systemSettingsContent = { + ui: { + theme: 'system-theme', + }, + tools: { + sandbox: false, + }, + mcp: { + allowed: ['server1', 'server2'], + }, + telemetry: { enabled: false }, + }; + const userSettingsContent = { + ui: { + theme: 'dark', + }, + tools: { + sandbox: true, + }, + context: { + fileName: 'USER_CONTEXT.md', + }, + }; + const workspaceSettingsContent = { + tools: { + sandbox: false, + core: ['tool1'], + }, + context: { + fileName: 'WORKSPACE_CONTEXT.md', + }, + mcp: { + allowed: ['server1', 'server2', 'server3'], + }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === getSystemSettingsPath()) + return JSON.stringify(systemSettingsContent); + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(settings.system.settings).toEqual(systemSettingsContent); + expect(settings.user.settings).toEqual(userSettingsContent); + expect(settings.workspace.settings).toEqual(workspaceSettingsContent); + expect(settings.merged).toEqual({ + general: {}, + ui: { + theme: 'system-theme', + customThemes: {}, + }, + tools: { + sandbox: false, + }, + telemetry: { enabled: false }, + context: { + fileName: 'WORKSPACE_CONTEXT.md', + includeDirectories: [], + }, + mcp: { + allowed: ['server1', 'server2'], + }, + advanced: { + excludedEnvVars: [], + }, + extensions: { + disabled: [], + workspacesWithMigrationNudge: [], + }, + mcpServers: {}, + model: { + chatCompression: {}, + }, + security: {}, + }); + }); + + it('should correctly migrate a complex legacy (v1) settings file', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + const legacySettingsContent = { + theme: 'legacy-dark', + vimMode: true, + contextFileName: 'LEGACY_CONTEXT.md', + model: 'gemini-pro', + mcpServers: { + 'legacy-server-1': { + command: 'npm', + args: ['run', 'start:server1'], + description: 'Legacy Server 1', + }, + 'legacy-server-2': { + command: 'node', + args: ['server2.js'], + description: 'Legacy Server 2', + }, + }, + allowMCPServers: ['legacy-server-1'], + someUnrecognizedSetting: 'should-be-preserved', + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(legacySettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(settings.merged).toEqual({ + ui: { + theme: 'legacy-dark', + customThemes: {}, + }, + general: { + vimMode: true, + }, + context: { + fileName: 'LEGACY_CONTEXT.md', + includeDirectories: [], + }, + model: { + name: 'gemini-pro', + chatCompression: {}, + }, + mcpServers: { + 'legacy-server-1': { + command: 'npm', + args: ['run', 'start:server1'], + description: 'Legacy Server 1', + }, + 'legacy-server-2': { + command: 'node', + args: ['server2.js'], + description: 'Legacy Server 2', + }, + }, + mcp: { + allowed: ['legacy-server-1'], + }, + someUnrecognizedSetting: 'should-be-preserved', + advanced: { + excludedEnvVars: [], + }, + extensions: { + disabled: [], + workspacesWithMigrationNudge: [], + }, + security: {}, + }); + }); + + it('should rewrite allowedTools to tools.allowed during migration', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + const legacySettingsContent = { + allowedTools: ['fs', 'shell'], + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(legacySettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(settings.merged.tools?.allowed).toEqual(['fs', 'shell']); + expect((settings.merged as TestSettings)['allowedTools']).toBeUndefined(); + }); + + it('should correctly merge and migrate legacy array properties from multiple scopes', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const legacyUserSettings = { + includeDirectories: ['/user/dir'], + excludeTools: ['user-tool'], + excludedProjectEnvVars: ['USER_VAR'], + }; + const legacyWorkspaceSettings = { + includeDirectories: ['/workspace/dir'], + excludeTools: ['workspace-tool'], + excludedProjectEnvVars: ['WORKSPACE_VAR', 'USER_VAR'], + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(legacyUserSettings); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(legacyWorkspaceSettings); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + // Verify includeDirectories are concatenated + expect(settings.merged.context?.includeDirectories).toEqual([ + '/user/dir', + '/workspace/dir', + ]); + + // Verify excludeTools are overwritten by workspace + expect(settings.merged.tools?.exclude).toEqual(['workspace-tool']); + + // Verify excludedProjectEnvVars are concatenated and de-duped + expect(settings.merged.advanced?.excludedEnvVars).toEqual( + expect.arrayContaining(['USER_VAR', 'WORKSPACE_VAR']), + ); + expect(settings.merged.advanced?.excludedEnvVars).toHaveLength(2); + }); + + it('should merge all settings files with the correct precedence', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const systemDefaultsContent = { + ui: { + theme: 'default-theme', + }, + tools: { + sandbox: true, + }, + telemetry: true, + context: { + includeDirectories: ['/system/defaults/dir'], + }, + }; + const userSettingsContent = { + ui: { + theme: 'user-theme', + }, + context: { + fileName: 'USER_CONTEXT.md', + includeDirectories: ['/user/dir1', '/user/dir2'], + }, + }; + const workspaceSettingsContent = { + tools: { + sandbox: false, + }, + context: { + fileName: 'WORKSPACE_CONTEXT.md', + includeDirectories: ['/workspace/dir'], + }, + }; + const systemSettingsContent = { + ui: { + theme: 'system-theme', + }, + telemetry: false, + context: { + includeDirectories: ['/system/dir'], + }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === getSystemDefaultsPath()) + return JSON.stringify(systemDefaultsContent); + if (p === getSystemSettingsPath()) + return JSON.stringify(systemSettingsContent); + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(settings.systemDefaults.settings).toEqual(systemDefaultsContent); + expect(settings.system.settings).toEqual(systemSettingsContent); + expect(settings.user.settings).toEqual(userSettingsContent); + expect(settings.workspace.settings).toEqual(workspaceSettingsContent); + expect(settings.merged).toEqual({ + advanced: { + excludedEnvVars: [], + }, + context: { + fileName: 'WORKSPACE_CONTEXT.md', + includeDirectories: [ + '/system/defaults/dir', + '/user/dir1', + '/user/dir2', + '/workspace/dir', + '/system/dir', + ], + }, + extensions: { + disabled: [], + workspacesWithMigrationNudge: [], + }, + mcp: {}, + mcpServers: {}, + model: { + chatCompression: {}, + }, + security: {}, + telemetry: false, + tools: { + sandbox: false, + }, + general: {}, + ui: { + customThemes: {}, + theme: 'system-theme', + }, + }); + }); + + it('should ignore folderTrust from workspace settings', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const userSettingsContent = { + security: { + folderTrust: { + enabled: true, + }, + }, + }; + const workspaceSettingsContent = { + security: { + folderTrust: { + enabled: false, // This should be ignored + }, + }, + }; + const systemSettingsContent = { + // No folderTrust here + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === getSystemSettingsPath()) + return JSON.stringify(systemSettingsContent); + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.security?.folderTrust?.enabled).toBe(true); // User setting should be used + }); + + it('should use system folderTrust over user setting', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const userSettingsContent = { + security: { + folderTrust: { + enabled: false, + }, + }, + }; + const workspaceSettingsContent = { + security: { + folderTrust: { + enabled: true, // This should be ignored + }, + }, + }; + const systemSettingsContent = { + security: { + folderTrust: { + enabled: true, + }, + }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === getSystemSettingsPath()) + return JSON.stringify(systemSettingsContent); + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.security?.folderTrust?.enabled).toBe(true); // System setting should be used + }); + + it('should handle contextFileName correctly when only in user settings', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + const userSettingsContent = { context: { fileName: 'CUSTOM.md' } }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.context?.fileName).toBe('CUSTOM.md'); + }); + + it('should handle contextFileName correctly when only in workspace settings', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === MOCK_WORKSPACE_SETTINGS_PATH, + ); + const workspaceSettingsContent = { + context: { fileName: 'PROJECT_SPECIFIC.md' }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.context?.fileName).toBe('PROJECT_SPECIFIC.md'); + }); + + it('should handle excludedProjectEnvVars correctly when only in user settings', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + const userSettingsContent = { + general: {}, + advanced: { excludedEnvVars: ['DEBUG', 'NODE_ENV', 'CUSTOM_VAR'] }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.advanced?.excludedEnvVars).toEqual([ + 'DEBUG', + 'NODE_ENV', + 'CUSTOM_VAR', + ]); + }); + + it('should handle excludedProjectEnvVars correctly when only in workspace settings', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === MOCK_WORKSPACE_SETTINGS_PATH, + ); + const workspaceSettingsContent = { + general: {}, + advanced: { excludedEnvVars: ['WORKSPACE_DEBUG', 'WORKSPACE_VAR'] }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.advanced?.excludedEnvVars).toEqual([ + 'WORKSPACE_DEBUG', + 'WORKSPACE_VAR', + ]); + }); + + it('should merge excludedProjectEnvVars with workspace taking precedence over user', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const userSettingsContent = { + general: {}, + advanced: { excludedEnvVars: ['DEBUG', 'NODE_ENV', 'USER_VAR'] }, + }; + const workspaceSettingsContent = { + general: {}, + advanced: { excludedEnvVars: ['WORKSPACE_DEBUG', 'WORKSPACE_VAR'] }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(settings.user.settings.advanced?.excludedEnvVars).toEqual([ + 'DEBUG', + 'NODE_ENV', + 'USER_VAR', + ]); + expect(settings.workspace.settings.advanced?.excludedEnvVars).toEqual([ + 'WORKSPACE_DEBUG', + 'WORKSPACE_VAR', + ]); + expect(settings.merged.advanced?.excludedEnvVars).toEqual([ + 'DEBUG', + 'NODE_ENV', + 'USER_VAR', + 'WORKSPACE_DEBUG', + 'WORKSPACE_VAR', + ]); + }); + + it('should default contextFileName to undefined if not in any settings file', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const userSettingsContent = { ui: { theme: 'dark' } }; + const workspaceSettingsContent = { tools: { sandbox: true } }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.context?.fileName).toBeUndefined(); + }); + + it('should load telemetry setting from user settings', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + const userSettingsContent = { telemetry: true }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return '{}'; + }, + ); + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.telemetry).toBe(true); + }); + + it('should load telemetry setting from workspace settings', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === MOCK_WORKSPACE_SETTINGS_PATH, + ); + const workspaceSettingsContent = { telemetry: false }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.telemetry).toBe(false); + }); + + it('should prioritize workspace telemetry setting over user setting', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const userSettingsContent = { telemetry: true }; + const workspaceSettingsContent = { telemetry: false }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.telemetry).toBe(false); + }); + + it('should have telemetry as undefined if not in any settings file', () => { + (mockFsExistsSync as Mock).mockReturnValue(false); // No settings files exist + (fs.readFileSync as Mock).mockReturnValue('{}'); + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.telemetry).toBeUndefined(); + expect(settings.merged.ui?.customThemes).toEqual({}); + expect(settings.merged.mcpServers).toEqual({}); + }); + + it('should merge MCP servers correctly, with workspace taking precedence', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const userSettingsContent = { + mcpServers: { + 'user-server': { + command: 'user-command', + args: ['--user-arg'], + description: 'User MCP server', + }, + 'shared-server': { + command: 'user-shared-command', + description: 'User shared server config', + }, + }, + }; + const workspaceSettingsContent = { + mcpServers: { + 'workspace-server': { + command: 'workspace-command', + args: ['--workspace-arg'], + description: 'Workspace MCP server', + }, + 'shared-server': { + command: 'workspace-shared-command', + description: 'Workspace shared server config', + }, + }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(settings.user.settings).toEqual(userSettingsContent); + expect(settings.workspace.settings).toEqual(workspaceSettingsContent); + expect(settings.merged.mcpServers).toEqual({ + 'user-server': { + command: 'user-command', + args: ['--user-arg'], + description: 'User MCP server', + }, + 'workspace-server': { + command: 'workspace-command', + args: ['--workspace-arg'], + description: 'Workspace MCP server', + }, + 'shared-server': { + command: 'workspace-shared-command', + description: 'Workspace shared server config', + }, + }); + }); + + it('should handle MCP servers when only in user settings', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + const userSettingsContent = { + mcpServers: { + 'user-only-server': { + command: 'user-only-command', + description: 'User only server', + }, + }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.mcpServers).toEqual({ + 'user-only-server': { + command: 'user-only-command', + description: 'User only server', + }, + }); + }); + + it('should handle MCP servers when only in workspace settings', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === MOCK_WORKSPACE_SETTINGS_PATH, + ); + const workspaceSettingsContent = { + mcpServers: { + 'workspace-only-server': { + command: 'workspace-only-command', + description: 'Workspace only server', + }, + }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.mcpServers).toEqual({ + 'workspace-only-server': { + command: 'workspace-only-command', + description: 'Workspace only server', + }, + }); + }); + + it('should have mcpServers as empty object if not in any settings file', () => { + (mockFsExistsSync as Mock).mockReturnValue(false); // No settings files exist + (fs.readFileSync as Mock).mockReturnValue('{}'); + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.mcpServers).toEqual({}); + }); + + it('should merge MCP servers from system, user, and workspace with system taking precedence', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const systemSettingsContent = { + mcpServers: { + 'shared-server': { + command: 'system-command', + args: ['--system-arg'], + }, + 'system-only-server': { + command: 'system-only-command', + }, + }, + }; + const userSettingsContent = { + mcpServers: { + 'user-server': { + command: 'user-command', + }, + 'shared-server': { + command: 'user-command', + description: 'from user', + }, + }, + }; + const workspaceSettingsContent = { + mcpServers: { + 'workspace-server': { + command: 'workspace-command', + }, + 'shared-server': { + command: 'workspace-command', + args: ['--workspace-arg'], + }, + }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === getSystemSettingsPath()) + return JSON.stringify(systemSettingsContent); + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(settings.merged.mcpServers).toEqual({ + 'user-server': { + command: 'user-command', + }, + 'workspace-server': { + command: 'workspace-command', + }, + 'system-only-server': { + command: 'system-only-command', + }, + 'shared-server': { + command: 'system-command', + args: ['--system-arg'], + }, + }); + }); + + it('should merge mcp allowed/excluded lists with system taking precedence over workspace', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const systemSettingsContent = { + mcp: { + allowed: ['system-allowed'], + }, + }; + const userSettingsContent = { + mcp: { + allowed: ['user-allowed'], + excluded: ['user-excluded'], + }, + }; + const workspaceSettingsContent = { + mcp: { + allowed: ['workspace-allowed'], + excluded: ['workspace-excluded'], + }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === getSystemSettingsPath()) + return JSON.stringify(systemSettingsContent); + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(settings.merged.mcp).toEqual({ + allowed: ['system-allowed'], + excluded: ['workspace-excluded'], + }); + }); + + it('should merge chatCompression settings, with workspace taking precedence', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const userSettingsContent = { + general: {}, + model: { chatCompression: { contextPercentageThreshold: 0.5 } }, + }; + const workspaceSettingsContent = { + general: {}, + model: { chatCompression: { contextPercentageThreshold: 0.8 } }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + const e = settings.user.settings.model?.chatCompression; + console.log(e); + + expect(settings.user.settings.model?.chatCompression).toEqual({ + contextPercentageThreshold: 0.5, + }); + expect(settings.workspace.settings.model?.chatCompression).toEqual({ + contextPercentageThreshold: 0.8, + }); + expect(settings.merged.model?.chatCompression).toEqual({ + contextPercentageThreshold: 0.8, + }); + }); + + it('should handle chatCompression when only in user settings', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + const userSettingsContent = { + general: {}, + model: { chatCompression: { contextPercentageThreshold: 0.5 } }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.model?.chatCompression).toEqual({ + contextPercentageThreshold: 0.5, + }); + }); + + it('should have chatCompression as an empty object if not in any settings file', () => { + (mockFsExistsSync as Mock).mockReturnValue(false); // No settings files exist + (fs.readFileSync as Mock).mockReturnValue('{}'); + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.model?.chatCompression).toEqual({}); + }); + + it('should ignore chatCompression if contextPercentageThreshold is invalid', () => { + const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {}); + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + const userSettingsContent = { + general: {}, + model: { chatCompression: { contextPercentageThreshold: 1.5 } }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.model?.chatCompression).toEqual({ + contextPercentageThreshold: 1.5, + }); + warnSpy.mockRestore(); + }); + + it('should deep merge chatCompression settings', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const userSettingsContent = { + general: {}, + model: { chatCompression: { contextPercentageThreshold: 0.5 } }, + }; + const workspaceSettingsContent = { + general: {}, + model: { chatCompression: {} }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(settings.merged.model?.chatCompression).toEqual({ + contextPercentageThreshold: 0.5, + }); + }); + + it('should merge includeDirectories from all scopes', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const systemSettingsContent = { + context: { includeDirectories: ['/system/dir'] }, + }; + const systemDefaultsContent = { + context: { includeDirectories: ['/system/defaults/dir'] }, + }; + const userSettingsContent = { + context: { includeDirectories: ['/user/dir1', '/user/dir2'] }, + }; + const workspaceSettingsContent = { + context: { includeDirectories: ['/workspace/dir'] }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === getSystemSettingsPath()) + return JSON.stringify(systemSettingsContent); + if (p === getSystemDefaultsPath()) + return JSON.stringify(systemDefaultsContent); + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(settings.merged.context?.includeDirectories).toEqual([ + '/system/defaults/dir', + '/user/dir1', + '/user/dir2', + '/workspace/dir', + '/system/dir', + ]); + }); + + it('should handle JSON parsing errors gracefully', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); // Both files "exist" + const invalidJsonContent = 'invalid json'; + const userReadError = new SyntaxError( + "Expected ',' or '}' after property value in JSON at position 10", + ); + const workspaceReadError = new SyntaxError( + 'Unexpected token i in JSON at position 0', + ); + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) { + // Simulate JSON.parse throwing for user settings + vi.spyOn(JSON, 'parse').mockImplementationOnce(() => { + throw userReadError; + }); + return invalidJsonContent; // Content that would cause JSON.parse to throw + } + if (p === MOCK_WORKSPACE_SETTINGS_PATH) { + // Simulate JSON.parse throwing for workspace settings + vi.spyOn(JSON, 'parse').mockImplementationOnce(() => { + throw workspaceReadError; + }); + return invalidJsonContent; + } + return '{}'; // Default for other reads + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + // Check that settings are empty due to parsing errors + expect(settings.user.settings).toEqual({}); + expect(settings.workspace.settings).toEqual({}); + expect(settings.merged).toEqual({ + general: {}, + ui: { + customThemes: {}, + }, + mcp: {}, + mcpServers: {}, + context: { + includeDirectories: [], + }, + model: { + chatCompression: {}, + }, + advanced: { + excludedEnvVars: [], + }, + extensions: { + disabled: [], + workspacesWithMigrationNudge: [], + }, + security: {}, + }); + + // Check that error objects are populated in settings.errors + expect(settings.errors).toBeDefined(); + // Assuming both user and workspace files cause errors and are added in order + expect(settings.errors.length).toEqual(2); + + const userError = settings.errors.find( + (e) => e.path === USER_SETTINGS_PATH, + ); + expect(userError).toBeDefined(); + expect(userError?.message).toBe(userReadError.message); + + const workspaceError = settings.errors.find( + (e) => e.path === MOCK_WORKSPACE_SETTINGS_PATH, + ); + expect(workspaceError).toBeDefined(); + expect(workspaceError?.message).toBe(workspaceReadError.message); + + // Restore JSON.parse mock if it was spied on specifically for this test + vi.restoreAllMocks(); // Or more targeted restore if needed + }); + + it('should resolve environment variables in user settings', () => { + process.env['TEST_API_KEY'] = 'user_api_key_from_env'; + const userSettingsContent: TestSettings = { + apiKey: '$TEST_API_KEY', + someUrl: 'https://test.com/${TEST_API_KEY}', + }; + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect((settings.user.settings as TestSettings)['apiKey']).toBe( + 'user_api_key_from_env', + ); + expect((settings.user.settings as TestSettings)['someUrl']).toBe( + 'https://test.com/user_api_key_from_env', + ); + expect((settings.merged as TestSettings)['apiKey']).toBe( + 'user_api_key_from_env', + ); + delete process.env['TEST_API_KEY']; + }); + + it('should resolve environment variables in workspace settings', () => { + process.env['WORKSPACE_ENDPOINT'] = 'workspace_endpoint_from_env'; + const workspaceSettingsContent: TestSettings = { + endpoint: '${WORKSPACE_ENDPOINT}/api', + nested: { value: '$WORKSPACE_ENDPOINT' }, + }; + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === MOCK_WORKSPACE_SETTINGS_PATH, + ); + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect((settings.workspace.settings as TestSettings)['endpoint']).toBe( + 'workspace_endpoint_from_env/api', + ); + expect( + (settings.workspace.settings as TestSettings)['nested']['value'], + ).toBe('workspace_endpoint_from_env'); + expect((settings.merged as TestSettings)['endpoint']).toBe( + 'workspace_endpoint_from_env/api', + ); + delete process.env['WORKSPACE_ENDPOINT']; + }); + + it('should correctly resolve and merge env variables from different scopes', () => { + process.env['SYSTEM_VAR'] = 'system_value'; + process.env['USER_VAR'] = 'user_value'; + process.env['WORKSPACE_VAR'] = 'workspace_value'; + process.env['SHARED_VAR'] = 'final_value'; + + const systemSettingsContent: TestSettings = { + configValue: '$SHARED_VAR', + systemOnly: '$SYSTEM_VAR', + }; + const userSettingsContent: TestSettings = { + configValue: '$SHARED_VAR', + userOnly: '$USER_VAR', + ui: { + theme: 'dark', + }, + }; + const workspaceSettingsContent: TestSettings = { + configValue: '$SHARED_VAR', + workspaceOnly: '$WORKSPACE_VAR', + ui: { + theme: 'light', + }, + }; + + (mockFsExistsSync as Mock).mockReturnValue(true); + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === getSystemSettingsPath()) { + return JSON.stringify(systemSettingsContent); + } + if (p === USER_SETTINGS_PATH) { + return JSON.stringify(userSettingsContent); + } + if (p === MOCK_WORKSPACE_SETTINGS_PATH) { + return JSON.stringify(workspaceSettingsContent); + } + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + // Check resolved values in individual scopes + expect((settings.system.settings as TestSettings)['configValue']).toBe( + 'final_value', + ); + expect((settings.system.settings as TestSettings)['systemOnly']).toBe( + 'system_value', + ); + expect((settings.user.settings as TestSettings)['configValue']).toBe( + 'final_value', + ); + expect((settings.user.settings as TestSettings)['userOnly']).toBe( + 'user_value', + ); + expect((settings.workspace.settings as TestSettings)['configValue']).toBe( + 'final_value', + ); + expect( + (settings.workspace.settings as TestSettings)['workspaceOnly'], + ).toBe('workspace_value'); + + // Check merged values (system > workspace > user) + expect((settings.merged as TestSettings)['configValue']).toBe( + 'final_value', + ); + expect((settings.merged as TestSettings)['systemOnly']).toBe( + 'system_value', + ); + expect((settings.merged as TestSettings)['userOnly']).toBe('user_value'); + expect((settings.merged as TestSettings)['workspaceOnly']).toBe( + 'workspace_value', + ); + expect(settings.merged.ui?.theme).toBe('light'); // workspace overrides user + + delete process.env['SYSTEM_VAR']; + delete process.env['USER_VAR']; + delete process.env['WORKSPACE_VAR']; + delete process.env['SHARED_VAR']; + }); + + it('should correctly merge dnsResolutionOrder with workspace taking precedence', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const userSettingsContent = { + advanced: { dnsResolutionOrder: 'ipv4first' }, + }; + const workspaceSettingsContent = { + advanced: { dnsResolutionOrder: 'verbatim' }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.advanced?.dnsResolutionOrder).toBe('verbatim'); + }); + + it('should use user dnsResolutionOrder if workspace is not defined', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + const userSettingsContent = { + advanced: { dnsResolutionOrder: 'verbatim' }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.advanced?.dnsResolutionOrder).toBe('verbatim'); + }); + + it('should leave unresolved environment variables as is', () => { + const userSettingsContent: TestSettings = { apiKey: '$UNDEFINED_VAR' }; + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect((settings.user.settings as TestSettings)['apiKey']).toBe( + '$UNDEFINED_VAR', + ); + expect((settings.merged as TestSettings)['apiKey']).toBe( + '$UNDEFINED_VAR', + ); + }); + + it('should resolve multiple environment variables in a single string', () => { + process.env['VAR_A'] = 'valueA'; + process.env['VAR_B'] = 'valueB'; + const userSettingsContent: TestSettings = { + path: '/path/$VAR_A/${VAR_B}/end', + }; + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return '{}'; + }, + ); + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect((settings.user.settings as TestSettings)['path']).toBe( + '/path/valueA/valueB/end', + ); + delete process.env['VAR_A']; + delete process.env['VAR_B']; + }); + + it('should resolve environment variables in arrays', () => { + process.env['ITEM_1'] = 'item1_env'; + process.env['ITEM_2'] = 'item2_env'; + const userSettingsContent: TestSettings = { + list: ['$ITEM_1', '${ITEM_2}', 'literal'], + }; + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return '{}'; + }, + ); + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect((settings.user.settings as TestSettings)['list']).toEqual([ + 'item1_env', + 'item2_env', + 'literal', + ]); + delete process.env['ITEM_1']; + delete process.env['ITEM_2']; + }); + + it('should correctly pass through null, boolean, and number types, and handle undefined properties', () => { + process.env['MY_ENV_STRING'] = 'env_string_value'; + process.env['MY_ENV_STRING_NESTED'] = 'env_string_nested_value'; + + const userSettingsContent: TestSettings = { + nullVal: null, + trueVal: true, + falseVal: false, + numberVal: 123.45, + stringVal: '$MY_ENV_STRING', + nestedObj: { + nestedNull: null, + nestedBool: true, + nestedNum: 0, + nestedString: 'literal', + anotherEnv: '${MY_ENV_STRING_NESTED}', + }, + }; + + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect((settings.user.settings as TestSettings)['nullVal']).toBeNull(); + expect((settings.user.settings as TestSettings)['trueVal']).toBe(true); + expect((settings.user.settings as TestSettings)['falseVal']).toBe(false); + expect((settings.user.settings as TestSettings)['numberVal']).toBe( + 123.45, + ); + expect((settings.user.settings as TestSettings)['stringVal']).toBe( + 'env_string_value', + ); + expect( + (settings.user.settings as TestSettings)['undefinedVal'], + ).toBeUndefined(); + + expect( + (settings.user.settings as TestSettings)['nestedObj']['nestedNull'], + ).toBeNull(); + expect( + (settings.user.settings as TestSettings)['nestedObj']['nestedBool'], + ).toBe(true); + expect( + (settings.user.settings as TestSettings)['nestedObj']['nestedNum'], + ).toBe(0); + expect( + (settings.user.settings as TestSettings)['nestedObj']['nestedString'], + ).toBe('literal'); + expect( + (settings.user.settings as TestSettings)['nestedObj']['anotherEnv'], + ).toBe('env_string_nested_value'); + + delete process.env['MY_ENV_STRING']; + delete process.env['MY_ENV_STRING_NESTED']; + }); + + it('should resolve multiple concatenated environment variables in a single string value', () => { + process.env['TEST_HOST'] = 'myhost'; + process.env['TEST_PORT'] = '9090'; + const userSettingsContent: TestSettings = { + serverAddress: '${TEST_HOST}:${TEST_PORT}/api', + }; + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect((settings.user.settings as TestSettings)['serverAddress']).toBe( + 'myhost:9090/api', + ); + + delete process.env['TEST_HOST']; + delete process.env['TEST_PORT']; + }); + + describe('when GEMINI_CLI_SYSTEM_SETTINGS_PATH is set', () => { + const MOCK_ENV_SYSTEM_SETTINGS_PATH = '/mock/env/system/settings.json'; + + beforeEach(() => { + process.env['GEMINI_CLI_SYSTEM_SETTINGS_PATH'] = + MOCK_ENV_SYSTEM_SETTINGS_PATH; + }); + + afterEach(() => { + delete process.env['GEMINI_CLI_SYSTEM_SETTINGS_PATH']; + }); + + it('should load system settings from the path specified in the environment variable', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === MOCK_ENV_SYSTEM_SETTINGS_PATH, + ); + const systemSettingsContent = { + ui: { theme: 'env-var-theme' }, + tools: { sandbox: true }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === MOCK_ENV_SYSTEM_SETTINGS_PATH) + return JSON.stringify(systemSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(fs.readFileSync).toHaveBeenCalledWith( + MOCK_ENV_SYSTEM_SETTINGS_PATH, + 'utf-8', + ); + expect(settings.system.path).toBe(MOCK_ENV_SYSTEM_SETTINGS_PATH); + expect(settings.system.settings).toEqual(systemSettingsContent); + expect(settings.merged).toEqual({ + ...systemSettingsContent, + general: {}, + ui: { + ...systemSettingsContent.ui, + customThemes: {}, + }, + mcp: {}, + mcpServers: {}, + context: { + includeDirectories: [], + }, + model: { + chatCompression: {}, + }, + advanced: { + excludedEnvVars: [], + }, + extensions: { + disabled: [], + workspacesWithMigrationNudge: [], + }, + security: {}, + }); + }); + }); + }); + + describe('excludedProjectEnvVars integration', () => { + const originalEnv = { ...process.env }; + + beforeEach(() => { + process.env = { ...originalEnv }; + }); + + afterEach(() => { + process.env = originalEnv; + }); + + it('should exclude DEBUG and DEBUG_MODE from project .env files by default', () => { + // Create a workspace settings file with excludedProjectEnvVars + const workspaceSettingsContent = { + general: {}, + advanced: { excludedEnvVars: ['DEBUG', 'DEBUG_MODE'] }, + }; + + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === MOCK_WORKSPACE_SETTINGS_PATH, + ); + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + + // Mock findEnvFile to return a project .env file + const originalFindEnvFile = ( + loadSettings as unknown as { findEnvFile: () => string } + ).findEnvFile; + (loadSettings as unknown as { findEnvFile: () => string }).findEnvFile = + () => '/mock/project/.env'; + + // Mock fs.readFileSync for .env file content + const originalReadFileSync = fs.readFileSync; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === '/mock/project/.env') { + return 'DEBUG=true\nDEBUG_MODE=1\nGEMINI_API_KEY=test-key'; + } + if (p === MOCK_WORKSPACE_SETTINGS_PATH) { + return JSON.stringify(workspaceSettingsContent); + } + return '{}'; + }, + ); + + try { + // This will call loadEnvironment internally with the merged settings + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + // Verify the settings were loaded correctly + expect(settings.merged.advanced?.excludedEnvVars).toEqual([ + 'DEBUG', + 'DEBUG_MODE', + ]); + + // Note: We can't directly test process.env changes here because the mocking + // prevents the actual file system operations, but we can verify the settings + // are correctly merged and passed to loadEnvironment + } finally { + (loadSettings as unknown as { findEnvFile: () => string }).findEnvFile = + originalFindEnvFile; + (fs.readFileSync as Mock).mockImplementation(originalReadFileSync); + } + }); + + it('should respect custom excludedProjectEnvVars from user settings', () => { + const userSettingsContent = { + general: {}, + advanced: { excludedEnvVars: ['NODE_ENV', 'DEBUG'] }, + }; + + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.user.settings.advanced?.excludedEnvVars).toEqual([ + 'NODE_ENV', + 'DEBUG', + ]); + expect(settings.merged.advanced?.excludedEnvVars).toEqual([ + 'NODE_ENV', + 'DEBUG', + ]); + }); + + it('should merge excludedProjectEnvVars with workspace taking precedence', () => { + const userSettingsContent = { + general: {}, + advanced: { excludedEnvVars: ['DEBUG', 'NODE_ENV', 'USER_VAR'] }, + }; + const workspaceSettingsContent = { + general: {}, + advanced: { excludedEnvVars: ['WORKSPACE_DEBUG', 'WORKSPACE_VAR'] }, + }; + + (mockFsExistsSync as Mock).mockReturnValue(true); + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(settings.user.settings.advanced?.excludedEnvVars).toEqual([ + 'DEBUG', + 'NODE_ENV', + 'USER_VAR', + ]); + expect(settings.workspace.settings.advanced?.excludedEnvVars).toEqual([ + 'WORKSPACE_DEBUG', + 'WORKSPACE_VAR', + ]); + expect(settings.merged.advanced?.excludedEnvVars).toEqual([ + 'DEBUG', + 'NODE_ENV', + 'USER_VAR', + 'WORKSPACE_DEBUG', + 'WORKSPACE_VAR', + ]); + }); + }); + + describe('with workspace trust', () => { + it('should merge workspace settings when workspace is trusted', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const userSettingsContent = { + ui: { theme: 'dark' }, + tools: { sandbox: false }, + }; + const workspaceSettingsContent = { + tools: { sandbox: true }, + context: { fileName: 'WORKSPACE.md' }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.tools?.sandbox).toBe(true); + expect(settings.merged.context?.fileName).toBe('WORKSPACE.md'); + expect(settings.merged.ui?.theme).toBe('dark'); + }); + + it('should NOT merge workspace settings when workspace is not trusted', () => { + vi.mocked(isWorkspaceTrusted).mockReturnValue(false); + (mockFsExistsSync as Mock).mockReturnValue(true); + const userSettingsContent = { + ui: { theme: 'dark' }, + tools: { sandbox: false }, + context: { fileName: 'USER.md' }, + }; + const workspaceSettingsContent = { + tools: { sandbox: true }, + context: { fileName: 'WORKSPACE.md' }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(settings.merged.tools?.sandbox).toBe(false); // User setting + expect(settings.merged.context?.fileName).toBe('USER.md'); // User setting + expect(settings.merged.ui?.theme).toBe('dark'); // User setting + }); + }); + + describe('migrateSettingsToV1', () => { + it('should handle an empty object', () => { + const v2Settings = {}; + const v1Settings = migrateSettingsToV1(v2Settings); + expect(v1Settings).toEqual({}); + }); + + it('should migrate a simple v2 settings object to v1', () => { + const v2Settings = { + general: { + preferredEditor: 'vscode', + vimMode: true, + }, + ui: { + theme: 'dark', + }, + }; + const v1Settings = migrateSettingsToV1(v2Settings); + expect(v1Settings).toEqual({ + preferredEditor: 'vscode', + vimMode: true, + theme: 'dark', + }); + }); + + it('should handle nested properties correctly', () => { + const v2Settings = { + security: { + folderTrust: { + enabled: true, + }, + auth: { + selectedType: 'oauth', + }, + }, + advanced: { + autoConfigureMemory: true, + }, + }; + const v1Settings = migrateSettingsToV1(v2Settings); + expect(v1Settings).toEqual({ + folderTrust: true, + selectedAuthType: 'oauth', + autoConfigureMaxOldSpaceSize: true, + }); + }); + + it('should preserve mcpServers at the top level', () => { + const v2Settings = { + general: { + preferredEditor: 'vscode', + }, + mcpServers: { + 'my-server': { + command: 'npm start', + }, + }, + }; + const v1Settings = migrateSettingsToV1(v2Settings); + expect(v1Settings).toEqual({ + preferredEditor: 'vscode', + mcpServers: { + 'my-server': { + command: 'npm start', + }, + }, + }); + }); + + it('should carry over unrecognized top-level properties', () => { + const v2Settings = { + general: { + vimMode: false, + }, + unrecognized: 'value', + another: { + nested: true, + }, + }; + const v1Settings = migrateSettingsToV1(v2Settings); + expect(v1Settings).toEqual({ + vimMode: false, + unrecognized: 'value', + another: { + nested: true, + }, + }); + }); + + it('should handle a complex object with mixed properties', () => { + const v2Settings = { + general: { + disableAutoUpdate: true, + }, + ui: { + hideBanner: true, + customThemes: { + myTheme: {}, + }, + }, + model: { + name: 'gemini-pro', + chatCompression: { + contextPercentageThreshold: 0.5, + }, + }, + mcpServers: { + 'server-1': { + command: 'node server.js', + }, + }, + unrecognized: { + should: 'be-preserved', + }, + }; + const v1Settings = migrateSettingsToV1(v2Settings); + expect(v1Settings).toEqual({ + disableAutoUpdate: true, + hideBanner: true, + customThemes: { + myTheme: {}, + }, + model: 'gemini-pro', + chatCompression: { + contextPercentageThreshold: 0.5, + }, + mcpServers: { + 'server-1': { + command: 'node server.js', + }, + }, + unrecognized: { + should: 'be-preserved', + }, + }); + }); + + it('should not migrate a v1 settings object', () => { + const v1Settings = { + preferredEditor: 'vscode', + vimMode: true, + theme: 'dark', + }; + const migratedSettings = migrateSettingsToV1(v1Settings); + expect(migratedSettings).toEqual({ + preferredEditor: 'vscode', + vimMode: true, + theme: 'dark', + }); + }); + + it('should migrate a full v2 settings object to v1', () => { + const v2Settings: TestSettings = { + general: { + preferredEditor: 'code', + vimMode: true, + }, + ui: { + theme: 'dark', + }, + privacy: { + usageStatisticsEnabled: false, + }, + model: { + name: 'gemini-pro', + chatCompression: { + contextPercentageThreshold: 0.8, + }, + }, + context: { + fileName: 'CONTEXT.md', + includeDirectories: ['/src'], + }, + tools: { + sandbox: true, + exclude: ['toolA'], + }, + mcp: { + allowed: ['server1'], + }, + security: { + folderTrust: { + enabled: true, + }, + }, + advanced: { + dnsResolutionOrder: 'ipv4first', + excludedEnvVars: ['SECRET'], + }, + mcpServers: { + 'my-server': { + command: 'npm start', + }, + }, + unrecognizedTopLevel: { + value: 'should be preserved', + }, + }; + + const v1Settings = migrateSettingsToV1(v2Settings); + + expect(v1Settings).toEqual({ + preferredEditor: 'code', + vimMode: true, + theme: 'dark', + usageStatisticsEnabled: false, + model: 'gemini-pro', + chatCompression: { + contextPercentageThreshold: 0.8, + }, + contextFileName: 'CONTEXT.md', + includeDirectories: ['/src'], + sandbox: true, + excludeTools: ['toolA'], + allowMCPServers: ['server1'], + folderTrust: true, + dnsResolutionOrder: 'ipv4first', + excludedProjectEnvVars: ['SECRET'], + mcpServers: { + 'my-server': { + command: 'npm start', + }, + }, + unrecognizedTopLevel: { + value: 'should be preserved', + }, + }); + }); + + it('should handle partial v2 settings', () => { + const v2Settings: TestSettings = { + general: { + vimMode: false, + }, + ui: {}, + model: { + name: 'gemini-1.5-pro', + }, + unrecognized: 'value', + }; + + const v1Settings = migrateSettingsToV1(v2Settings); + + expect(v1Settings).toEqual({ + vimMode: false, + model: 'gemini-1.5-pro', + unrecognized: 'value', + }); + }); + + it('should handle settings with different data types', () => { + const v2Settings: TestSettings = { + general: { + vimMode: false, + }, + model: { + maxSessionTurns: 0, + }, + context: { + includeDirectories: [], + }, + security: { + folderTrust: { + enabled: null, + }, + }, + }; + + const v1Settings = migrateSettingsToV1(v2Settings); + + expect(v1Settings).toEqual({ + vimMode: false, + maxSessionTurns: 0, + includeDirectories: [], + folderTrust: null, + }); + }); + + it('should preserve unrecognized top-level keys', () => { + const v2Settings: TestSettings = { + general: { + vimMode: true, + }, + customTopLevel: { + a: 1, + b: [2], + }, + anotherOne: 'hello', + }; + + const v1Settings = migrateSettingsToV1(v2Settings); + + expect(v1Settings).toEqual({ + vimMode: true, + customTopLevel: { + a: 1, + b: [2], + }, + anotherOne: 'hello', + }); + }); + + it('should handle an empty v2 settings object', () => { + const v2Settings = {}; + const v1Settings = migrateSettingsToV1(v2Settings); + expect(v1Settings).toEqual({}); + }); + + it('should correctly handle mcpServers at the top level', () => { + const v2Settings: TestSettings = { + mcpServers: { + serverA: { command: 'a' }, + }, + mcp: { + allowed: ['serverA'], + }, + }; + + const v1Settings = migrateSettingsToV1(v2Settings); + + expect(v1Settings).toEqual({ + mcpServers: { + serverA: { command: 'a' }, + }, + allowMCPServers: ['serverA'], + }); + }); + }); + + describe('loadEnvironment', () => { + function setup({ + isFolderTrustEnabled = true, + isWorkspaceTrustedValue = true, + }) { + delete process.env['TESTTEST']; // reset + const geminiEnvPath = path.resolve(path.join(GEMINI_DIR, '.env')); + + vi.mocked(isWorkspaceTrusted).mockReturnValue(isWorkspaceTrustedValue); + (mockFsExistsSync as Mock).mockImplementation((p: fs.PathLike) => + [USER_SETTINGS_PATH, geminiEnvPath].includes(p.toString()), + ); + const userSettingsContent: Settings = { + ui: { + theme: 'dark', + }, + security: { + folderTrust: { + enabled: isFolderTrustEnabled, + }, + }, + context: { + fileName: 'USER_CONTEXT.md', + }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === geminiEnvPath) return 'TESTTEST=1234'; + return '{}'; + }, + ); + } + + it('sets environment variables from .env files', () => { + setup({ isFolderTrustEnabled: false, isWorkspaceTrustedValue: true }); + loadEnvironment(loadSettings(MOCK_WORKSPACE_DIR).merged); + + expect(process.env['TESTTEST']).toEqual('1234'); + }); + + it('does not load env files from untrusted spaces', () => { + setup({ isFolderTrustEnabled: true, isWorkspaceTrustedValue: false }); + loadEnvironment(loadSettings(MOCK_WORKSPACE_DIR).merged); + + expect(process.env['TESTTEST']).not.toEqual('1234'); + }); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/config/settings.ts b/projects/gemini-cli/packages/cli/src/config/settings.ts new file mode 100644 index 0000000000000000000000000000000000000000..205cfc6c0adcdd3ca79ebdbf618498440fcbe0e4 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/settings.ts @@ -0,0 +1,751 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import { homedir, platform } from 'node:os'; +import * as dotenv from 'dotenv'; +import process from 'node:process'; +import { + GEMINI_CONFIG_DIR as GEMINI_DIR, + getErrorMessage, + Storage, +} from '@google/gemini-cli-core'; +import stripJsonComments from 'strip-json-comments'; +import { DefaultLight } from '../ui/themes/default-light.js'; +import { DefaultDark } from '../ui/themes/default.js'; +import { isWorkspaceTrusted } from './trustedFolders.js'; +import type { Settings, MemoryImportFormat } from './settingsSchema.js'; +import { resolveEnvVarsInObject } from '../utils/envVarResolver.js'; +import { mergeWith } from 'lodash-es'; + +export type { Settings, MemoryImportFormat }; + +export const SETTINGS_DIRECTORY_NAME = '.gemini'; + +export const USER_SETTINGS_PATH = Storage.getGlobalSettingsPath(); +export const USER_SETTINGS_DIR = path.dirname(USER_SETTINGS_PATH); +export const DEFAULT_EXCLUDED_ENV_VARS = ['DEBUG', 'DEBUG_MODE']; + +const MIGRATE_V2_OVERWRITE = false; + +// As defined in spec.md +const MIGRATION_MAP: Record = { + preferredEditor: 'general.preferredEditor', + vimMode: 'general.vimMode', + disableAutoUpdate: 'general.disableAutoUpdate', + disableUpdateNag: 'general.disableUpdateNag', + checkpointing: 'general.checkpointing', + theme: 'ui.theme', + customThemes: 'ui.customThemes', + hideWindowTitle: 'ui.hideWindowTitle', + hideTips: 'ui.hideTips', + hideBanner: 'ui.hideBanner', + hideFooter: 'ui.hideFooter', + showMemoryUsage: 'ui.showMemoryUsage', + showLineNumbers: 'ui.showLineNumbers', + showCitations: 'ui.showCitations', + accessibility: 'ui.accessibility', + ideMode: 'ide.enabled', + hasSeenIdeIntegrationNudge: 'ide.hasSeenNudge', + usageStatisticsEnabled: 'privacy.usageStatisticsEnabled', + telemetry: 'telemetry', + model: 'model.name', + maxSessionTurns: 'model.maxSessionTurns', + summarizeToolOutput: 'model.summarizeToolOutput', + chatCompression: 'model.chatCompression', + skipNextSpeakerCheck: 'model.skipNextSpeakerCheck', + contextFileName: 'context.fileName', + memoryImportFormat: 'context.importFormat', + memoryDiscoveryMaxDirs: 'context.discoveryMaxDirs', + includeDirectories: 'context.includeDirectories', + loadMemoryFromIncludeDirectories: 'context.loadFromIncludeDirectories', + fileFiltering: 'context.fileFiltering', + sandbox: 'tools.sandbox', + shouldUseNodePtyShell: 'tools.usePty', + allowedTools: 'tools.allowed', + coreTools: 'tools.core', + excludeTools: 'tools.exclude', + toolDiscoveryCommand: 'tools.discoveryCommand', + toolCallCommand: 'tools.callCommand', + mcpServerCommand: 'mcp.serverCommand', + allowMCPServers: 'mcp.allowed', + excludeMCPServers: 'mcp.excluded', + folderTrustFeature: 'security.folderTrust.featureEnabled', + folderTrust: 'security.folderTrust.enabled', + selectedAuthType: 'security.auth.selectedType', + useExternalAuth: 'security.auth.useExternal', + autoConfigureMaxOldSpaceSize: 'advanced.autoConfigureMemory', + dnsResolutionOrder: 'advanced.dnsResolutionOrder', + excludedProjectEnvVars: 'advanced.excludedEnvVars', + bugCommand: 'advanced.bugCommand', +}; + +export function getSystemSettingsPath(): string { + if (process.env['GEMINI_CLI_SYSTEM_SETTINGS_PATH']) { + return process.env['GEMINI_CLI_SYSTEM_SETTINGS_PATH']; + } + if (platform() === 'darwin') { + return '/Library/Application Support/GeminiCli/settings.json'; + } else if (platform() === 'win32') { + return 'C:\\ProgramData\\gemini-cli\\settings.json'; + } else { + return '/etc/gemini-cli/settings.json'; + } +} + +export function getSystemDefaultsPath(): string { + if (process.env['GEMINI_CLI_SYSTEM_DEFAULTS_PATH']) { + return process.env['GEMINI_CLI_SYSTEM_DEFAULTS_PATH']; + } + return path.join( + path.dirname(getSystemSettingsPath()), + 'system-defaults.json', + ); +} + +export type { DnsResolutionOrder } from './settingsSchema.js'; + +export enum SettingScope { + User = 'User', + Workspace = 'Workspace', + System = 'System', + SystemDefaults = 'SystemDefaults', +} + +export interface CheckpointingSettings { + enabled?: boolean; +} + +export interface SummarizeToolOutputSettings { + tokenBudget?: number; +} + +export interface AccessibilitySettings { + disableLoadingPhrases?: boolean; + screenReader?: boolean; +} + +export interface SettingsError { + message: string; + path: string; +} + +export interface SettingsFile { + settings: Settings; + path: string; +} + +function setNestedProperty( + obj: Record, + path: string, + value: unknown, +) { + const keys = path.split('.'); + const lastKey = keys.pop(); + if (!lastKey) return; + + let current: Record = obj; + for (const key of keys) { + if (current[key] === undefined) { + current[key] = {}; + } + const next = current[key]; + if (typeof next === 'object' && next !== null) { + current = next as Record; + } else { + // This path is invalid, so we stop. + return; + } + } + current[lastKey] = value; +} + +function needsMigration(settings: Record): boolean { + return !('general' in settings); +} + +function migrateSettingsToV2( + flatSettings: Record, +): Record | null { + if (!needsMigration(flatSettings)) { + return null; + } + + const v2Settings: Record = {}; + const flatKeys = new Set(Object.keys(flatSettings)); + + for (const [oldKey, newPath] of Object.entries(MIGRATION_MAP)) { + if (flatKeys.has(oldKey)) { + setNestedProperty(v2Settings, newPath, flatSettings[oldKey]); + flatKeys.delete(oldKey); + } + } + + // Preserve mcpServers at the top level + if (flatSettings['mcpServers']) { + v2Settings['mcpServers'] = flatSettings['mcpServers']; + flatKeys.delete('mcpServers'); + } + + // Carry over any unrecognized keys + for (const remainingKey of flatKeys) { + v2Settings[remainingKey] = flatSettings[remainingKey]; + } + + return v2Settings; +} + +function getNestedProperty( + obj: Record, + path: string, +): unknown { + const keys = path.split('.'); + let current: unknown = obj; + for (const key of keys) { + if (typeof current !== 'object' || current === null || !(key in current)) { + return undefined; + } + current = (current as Record)[key]; + } + return current; +} + +const REVERSE_MIGRATION_MAP: Record = Object.fromEntries( + Object.entries(MIGRATION_MAP).map(([key, value]) => [value, key]), +); + +// Dynamically determine the top-level keys from the V2 settings structure. +const KNOWN_V2_CONTAINERS = new Set( + Object.values(MIGRATION_MAP).map((path) => path.split('.')[0]), +); + +export function migrateSettingsToV1( + v2Settings: Record, +): Record { + const v1Settings: Record = {}; + const v2Keys = new Set(Object.keys(v2Settings)); + + for (const [newPath, oldKey] of Object.entries(REVERSE_MIGRATION_MAP)) { + const value = getNestedProperty(v2Settings, newPath); + if (value !== undefined) { + v1Settings[oldKey] = value; + v2Keys.delete(newPath.split('.')[0]); + } + } + + // Preserve mcpServers at the top level + if (v2Settings['mcpServers']) { + v1Settings['mcpServers'] = v2Settings['mcpServers']; + v2Keys.delete('mcpServers'); + } + + // Carry over any unrecognized keys + for (const remainingKey of v2Keys) { + const value = v2Settings[remainingKey]; + if (value === undefined) { + continue; + } + + // Don't carry over empty objects that were just containers for migrated settings. + if ( + KNOWN_V2_CONTAINERS.has(remainingKey) && + typeof value === 'object' && + value !== null && + !Array.isArray(value) && + Object.keys(value).length === 0 + ) { + continue; + } + + v1Settings[remainingKey] = value; + } + + return v1Settings; +} + +function mergeSettings( + system: Settings, + systemDefaults: Settings, + user: Settings, + workspace: Settings, + isTrusted: boolean, +): Settings { + const safeWorkspace = isTrusted ? workspace : ({} as Settings); + + // folderTrust is not supported at workspace level. + const { security, ...restOfWorkspace } = safeWorkspace; + const safeWorkspaceWithoutFolderTrust = security + ? { + ...restOfWorkspace, + // eslint-disable-next-line @typescript-eslint/no-unused-vars + security: (({ folderTrust, ...rest }) => rest)(security), + } + : { + ...restOfWorkspace, + security: {}, + }; + + // Settings are merged with the following precedence (last one wins for + // single values): + // 1. System Defaults + // 2. User Settings + // 3. Workspace Settings + // 4. System Settings (as overrides) + // + // For properties that are arrays (e.g., includeDirectories), the arrays + // are concatenated. For objects (e.g., customThemes), they are merged. + return { + ...systemDefaults, + ...user, + ...safeWorkspaceWithoutFolderTrust, + ...system, + general: { + ...(systemDefaults.general || {}), + ...(user.general || {}), + ...(safeWorkspaceWithoutFolderTrust.general || {}), + ...(system.general || {}), + }, + ui: { + ...(systemDefaults.ui || {}), + ...(user.ui || {}), + ...(safeWorkspaceWithoutFolderTrust.ui || {}), + ...(system.ui || {}), + customThemes: { + ...(systemDefaults.ui?.customThemes || {}), + ...(user.ui?.customThemes || {}), + ...(safeWorkspaceWithoutFolderTrust.ui?.customThemes || {}), + ...(system.ui?.customThemes || {}), + }, + }, + security: { + ...(systemDefaults.security || {}), + ...(user.security || {}), + ...(safeWorkspaceWithoutFolderTrust.security || {}), + ...(system.security || {}), + }, + mcp: { + ...(systemDefaults.mcp || {}), + ...(user.mcp || {}), + ...(safeWorkspaceWithoutFolderTrust.mcp || {}), + ...(system.mcp || {}), + }, + mcpServers: { + ...(systemDefaults.mcpServers || {}), + ...(user.mcpServers || {}), + ...(safeWorkspaceWithoutFolderTrust.mcpServers || {}), + ...(system.mcpServers || {}), + }, + context: { + ...(systemDefaults.context || {}), + ...(user.context || {}), + ...(safeWorkspaceWithoutFolderTrust.context || {}), + ...(system.context || {}), + includeDirectories: [ + ...(systemDefaults.context?.includeDirectories || []), + ...(user.context?.includeDirectories || []), + ...(safeWorkspaceWithoutFolderTrust.context?.includeDirectories || []), + ...(system.context?.includeDirectories || []), + ], + }, + model: { + ...(systemDefaults.model || {}), + ...(user.model || {}), + ...(safeWorkspaceWithoutFolderTrust.model || {}), + ...(system.model || {}), + chatCompression: { + ...(systemDefaults.model?.chatCompression || {}), + ...(user.model?.chatCompression || {}), + ...(safeWorkspaceWithoutFolderTrust.model?.chatCompression || {}), + ...(system.model?.chatCompression || {}), + }, + }, + advanced: { + ...(systemDefaults.advanced || {}), + ...(user.advanced || {}), + ...(safeWorkspaceWithoutFolderTrust.advanced || {}), + ...(system.advanced || {}), + excludedEnvVars: [ + ...new Set([ + ...(systemDefaults.advanced?.excludedEnvVars || []), + ...(user.advanced?.excludedEnvVars || []), + ...(safeWorkspaceWithoutFolderTrust.advanced?.excludedEnvVars || []), + ...(system.advanced?.excludedEnvVars || []), + ]), + ], + }, + extensions: { + ...(systemDefaults.extensions || {}), + ...(user.extensions || {}), + ...(safeWorkspaceWithoutFolderTrust.extensions || {}), + ...(system.extensions || {}), + disabled: [ + ...new Set([ + ...(systemDefaults.extensions?.disabled || []), + ...(user.extensions?.disabled || []), + ...(safeWorkspaceWithoutFolderTrust.extensions?.disabled || []), + ...(system.extensions?.disabled || []), + ]), + ], + workspacesWithMigrationNudge: [ + ...new Set([ + ...(systemDefaults.extensions?.workspacesWithMigrationNudge || []), + ...(user.extensions?.workspacesWithMigrationNudge || []), + ...(safeWorkspaceWithoutFolderTrust.extensions + ?.workspacesWithMigrationNudge || []), + ...(system.extensions?.workspacesWithMigrationNudge || []), + ]), + ], + }, + }; +} + +export class LoadedSettings { + constructor( + system: SettingsFile, + systemDefaults: SettingsFile, + user: SettingsFile, + workspace: SettingsFile, + errors: SettingsError[], + isTrusted: boolean, + migratedInMemorScopes: Set, + ) { + this.system = system; + this.systemDefaults = systemDefaults; + this.user = user; + this.workspace = workspace; + this.errors = errors; + this.isTrusted = isTrusted; + this.migratedInMemorScopes = migratedInMemorScopes; + this._merged = this.computeMergedSettings(); + } + + readonly system: SettingsFile; + readonly systemDefaults: SettingsFile; + readonly user: SettingsFile; + readonly workspace: SettingsFile; + readonly errors: SettingsError[]; + readonly isTrusted: boolean; + readonly migratedInMemorScopes: Set; + + private _merged: Settings; + + get merged(): Settings { + return this._merged; + } + + private computeMergedSettings(): Settings { + return mergeSettings( + this.system.settings, + this.systemDefaults.settings, + this.user.settings, + this.workspace.settings, + this.isTrusted, + ); + } + + forScope(scope: SettingScope): SettingsFile { + switch (scope) { + case SettingScope.User: + return this.user; + case SettingScope.Workspace: + return this.workspace; + case SettingScope.System: + return this.system; + case SettingScope.SystemDefaults: + return this.systemDefaults; + default: + throw new Error(`Invalid scope: ${scope}`); + } + } + + setValue(scope: SettingScope, key: string, value: unknown): void { + const settingsFile = this.forScope(scope); + setNestedProperty(settingsFile.settings, key, value); + this._merged = this.computeMergedSettings(); + saveSettings(settingsFile); + } +} + +function findEnvFile(startDir: string): string | null { + let currentDir = path.resolve(startDir); + while (true) { + // prefer gemini-specific .env under GEMINI_DIR + const geminiEnvPath = path.join(currentDir, GEMINI_DIR, '.env'); + if (fs.existsSync(geminiEnvPath)) { + return geminiEnvPath; + } + const envPath = path.join(currentDir, '.env'); + if (fs.existsSync(envPath)) { + return envPath; + } + const parentDir = path.dirname(currentDir); + if (parentDir === currentDir || !parentDir) { + // check .env under home as fallback, again preferring gemini-specific .env + const homeGeminiEnvPath = path.join(homedir(), GEMINI_DIR, '.env'); + if (fs.existsSync(homeGeminiEnvPath)) { + return homeGeminiEnvPath; + } + const homeEnvPath = path.join(homedir(), '.env'); + if (fs.existsSync(homeEnvPath)) { + return homeEnvPath; + } + return null; + } + currentDir = parentDir; + } +} + +export function setUpCloudShellEnvironment(envFilePath: string | null): void { + // Special handling for GOOGLE_CLOUD_PROJECT in Cloud Shell: + // Because GOOGLE_CLOUD_PROJECT in Cloud Shell tracks the project + // set by the user using "gcloud config set project" we do not want to + // use its value. So, unless the user overrides GOOGLE_CLOUD_PROJECT in + // one of the .env files, we set the Cloud Shell-specific default here. + if (envFilePath && fs.existsSync(envFilePath)) { + const envFileContent = fs.readFileSync(envFilePath); + const parsedEnv = dotenv.parse(envFileContent); + if (parsedEnv['GOOGLE_CLOUD_PROJECT']) { + // .env file takes precedence in Cloud Shell + process.env['GOOGLE_CLOUD_PROJECT'] = parsedEnv['GOOGLE_CLOUD_PROJECT']; + } else { + // If not in .env, set to default and override global + process.env['GOOGLE_CLOUD_PROJECT'] = 'cloudshell-gca'; + } + } else { + // If no .env file, set to default and override global + process.env['GOOGLE_CLOUD_PROJECT'] = 'cloudshell-gca'; + } +} + +export function loadEnvironment(settings: Settings): void { + const envFilePath = findEnvFile(process.cwd()); + + if (!isWorkspaceTrusted(settings)) { + return; + } + + // Cloud Shell environment variable handling + if (process.env['CLOUD_SHELL'] === 'true') { + setUpCloudShellEnvironment(envFilePath); + } + + if (envFilePath) { + // Manually parse and load environment variables to handle exclusions correctly. + // This avoids modifying environment variables that were already set from the shell. + try { + const envFileContent = fs.readFileSync(envFilePath, 'utf-8'); + const parsedEnv = dotenv.parse(envFileContent); + + const excludedVars = + settings?.advanced?.excludedEnvVars || DEFAULT_EXCLUDED_ENV_VARS; + const isProjectEnvFile = !envFilePath.includes(GEMINI_DIR); + + for (const key in parsedEnv) { + if (Object.hasOwn(parsedEnv, key)) { + // If it's a project .env file, skip loading excluded variables. + if (isProjectEnvFile && excludedVars.includes(key)) { + continue; + } + + // Load variable only if it's not already set in the environment. + if (!Object.hasOwn(process.env, key)) { + process.env[key] = parsedEnv[key]; + } + } + } + } catch (_e) { + // Errors are ignored to match the behavior of `dotenv.config({ quiet: true })`. + } + } +} + +/** + * Loads settings from user and workspace directories. + * Project settings override user settings. + */ +export function loadSettings(workspaceDir: string): LoadedSettings { + let systemSettings: Settings = {}; + let systemDefaultSettings: Settings = {}; + let userSettings: Settings = {}; + let workspaceSettings: Settings = {}; + const settingsErrors: SettingsError[] = []; + const systemSettingsPath = getSystemSettingsPath(); + const systemDefaultsPath = getSystemDefaultsPath(); + const migratedInMemorScopes = new Set(); + + // Resolve paths to their canonical representation to handle symlinks + const resolvedWorkspaceDir = path.resolve(workspaceDir); + const resolvedHomeDir = path.resolve(homedir()); + + let realWorkspaceDir = resolvedWorkspaceDir; + try { + // fs.realpathSync gets the "true" path, resolving any symlinks + realWorkspaceDir = fs.realpathSync(resolvedWorkspaceDir); + } catch (_e) { + // This is okay. The path might not exist yet, and that's a valid state. + } + + // We expect homedir to always exist and be resolvable. + const realHomeDir = fs.realpathSync(resolvedHomeDir); + + const workspaceSettingsPath = new Storage( + workspaceDir, + ).getWorkspaceSettingsPath(); + + const loadAndMigrate = (filePath: string, scope: SettingScope): Settings => { + try { + if (fs.existsSync(filePath)) { + const content = fs.readFileSync(filePath, 'utf-8'); + const rawSettings: unknown = JSON.parse(stripJsonComments(content)); + + if ( + typeof rawSettings !== 'object' || + rawSettings === null || + Array.isArray(rawSettings) + ) { + settingsErrors.push({ + message: 'Settings file is not a valid JSON object.', + path: filePath, + }); + return {}; + } + + let settingsObject = rawSettings as Record; + if (needsMigration(settingsObject)) { + const migratedSettings = migrateSettingsToV2(settingsObject); + if (migratedSettings) { + if (MIGRATE_V2_OVERWRITE) { + try { + fs.renameSync(filePath, `${filePath}.orig`); + fs.writeFileSync( + filePath, + JSON.stringify(migratedSettings, null, 2), + 'utf-8', + ); + } catch (e) { + console.error( + `Error migrating settings file on disk: ${getErrorMessage( + e, + )}`, + ); + } + } else { + migratedInMemorScopes.add(scope); + } + settingsObject = migratedSettings; + } + } + return settingsObject as Settings; + } + } catch (error: unknown) { + settingsErrors.push({ + message: getErrorMessage(error), + path: filePath, + }); + } + return {}; + }; + + systemSettings = loadAndMigrate(systemSettingsPath, SettingScope.System); + systemDefaultSettings = loadAndMigrate( + systemDefaultsPath, + SettingScope.SystemDefaults, + ); + userSettings = loadAndMigrate(USER_SETTINGS_PATH, SettingScope.User); + + if (realWorkspaceDir !== realHomeDir) { + workspaceSettings = loadAndMigrate( + workspaceSettingsPath, + SettingScope.Workspace, + ); + } + + // Support legacy theme names + if (userSettings.ui?.theme === 'VS') { + userSettings.ui.theme = DefaultLight.name; + } else if (userSettings.ui?.theme === 'VS2015') { + userSettings.ui.theme = DefaultDark.name; + } + if (workspaceSettings.ui?.theme === 'VS') { + workspaceSettings.ui.theme = DefaultLight.name; + } else if (workspaceSettings.ui?.theme === 'VS2015') { + workspaceSettings.ui.theme = DefaultDark.name; + } + + // For the initial trust check, we can only use user and system settings. + const initialTrustCheckSettings = mergeWith({}, systemSettings, userSettings); + const isTrusted = + isWorkspaceTrusted(initialTrustCheckSettings as Settings) ?? true; + + // Create a temporary merged settings object to pass to loadEnvironment. + const tempMergedSettings = mergeSettings( + systemSettings, + systemDefaultSettings, + userSettings, + workspaceSettings, + isTrusted, + ); + + // loadEnviroment depends on settings so we have to create a temp version of + // the settings to avoid a cycle + loadEnvironment(tempMergedSettings); + + // Now that the environment is loaded, resolve variables in the settings. + systemSettings = resolveEnvVarsInObject(systemSettings); + userSettings = resolveEnvVarsInObject(userSettings); + workspaceSettings = resolveEnvVarsInObject(workspaceSettings); + + // Create LoadedSettings first + const loadedSettings = new LoadedSettings( + { + path: systemSettingsPath, + settings: systemSettings, + }, + { + path: systemDefaultsPath, + settings: systemDefaultSettings, + }, + { + path: USER_SETTINGS_PATH, + settings: userSettings, + }, + { + path: workspaceSettingsPath, + settings: workspaceSettings, + }, + settingsErrors, + isTrusted, + migratedInMemorScopes, + ); + + return loadedSettings; +} + +export function saveSettings(settingsFile: SettingsFile): void { + try { + // Ensure the directory exists + const dirPath = path.dirname(settingsFile.path); + if (!fs.existsSync(dirPath)) { + fs.mkdirSync(dirPath, { recursive: true }); + } + + let settingsToSave = settingsFile.settings; + if (!MIGRATE_V2_OVERWRITE) { + settingsToSave = migrateSettingsToV1( + settingsToSave as Record, + ) as Settings; + } + + fs.writeFileSync( + settingsFile.path, + JSON.stringify(settingsToSave, null, 2), + 'utf-8', + ); + } catch (error) { + console.error('Error saving user settings file:', error); + } +} diff --git a/projects/gemini-cli/packages/cli/src/config/settingsSchema.test.ts b/projects/gemini-cli/packages/cli/src/config/settingsSchema.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..e182e49ef89063497724c2f837d8964d344289d4 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/settingsSchema.test.ts @@ -0,0 +1,308 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import type { Settings } from './settingsSchema.js'; +import { SETTINGS_SCHEMA } from './settingsSchema.js'; + +describe('SettingsSchema', () => { + describe('SETTINGS_SCHEMA', () => { + it('should contain all expected top-level settings', () => { + const expectedSettings = [ + 'mcpServers', + 'general', + 'ui', + 'ide', + 'privacy', + 'telemetry', + 'model', + 'context', + 'tools', + 'mcp', + 'security', + 'advanced', + ]; + + expectedSettings.forEach((setting) => { + expect( + SETTINGS_SCHEMA[setting as keyof typeof SETTINGS_SCHEMA], + ).toBeDefined(); + }); + }); + + it('should have correct structure for each setting', () => { + Object.entries(SETTINGS_SCHEMA).forEach(([_key, definition]) => { + expect(definition).toHaveProperty('type'); + expect(definition).toHaveProperty('label'); + expect(definition).toHaveProperty('category'); + expect(definition).toHaveProperty('requiresRestart'); + expect(definition).toHaveProperty('default'); + expect(typeof definition.type).toBe('string'); + expect(typeof definition.label).toBe('string'); + expect(typeof definition.category).toBe('string'); + expect(typeof definition.requiresRestart).toBe('boolean'); + }); + }); + + it('should have correct nested setting structure', () => { + const nestedSettings = [ + 'general', + 'ui', + 'ide', + 'privacy', + 'model', + 'context', + 'tools', + 'mcp', + 'security', + 'advanced', + ]; + + nestedSettings.forEach((setting) => { + const definition = SETTINGS_SCHEMA[ + setting as keyof typeof SETTINGS_SCHEMA + ] as (typeof SETTINGS_SCHEMA)[keyof typeof SETTINGS_SCHEMA] & { + properties: unknown; + }; + expect(definition.type).toBe('object'); + expect(definition.properties).toBeDefined(); + expect(typeof definition.properties).toBe('object'); + }); + }); + + it('should have accessibility nested properties', () => { + expect( + SETTINGS_SCHEMA.ui?.properties?.accessibility?.properties, + ).toBeDefined(); + expect( + SETTINGS_SCHEMA.ui?.properties?.accessibility.properties + ?.disableLoadingPhrases.type, + ).toBe('boolean'); + }); + + it('should have checkpointing nested properties', () => { + expect( + SETTINGS_SCHEMA.general?.properties?.checkpointing.properties?.enabled, + ).toBeDefined(); + expect( + SETTINGS_SCHEMA.general?.properties?.checkpointing.properties?.enabled + .type, + ).toBe('boolean'); + }); + + it('should have fileFiltering nested properties', () => { + expect( + SETTINGS_SCHEMA.context.properties.fileFiltering.properties + ?.respectGitIgnore, + ).toBeDefined(); + expect( + SETTINGS_SCHEMA.context.properties.fileFiltering.properties + ?.respectGeminiIgnore, + ).toBeDefined(); + expect( + SETTINGS_SCHEMA.context.properties.fileFiltering.properties + ?.enableRecursiveFileSearch, + ).toBeDefined(); + }); + + it('should have unique categories', () => { + const categories = new Set(); + + // Collect categories from top-level settings + Object.values(SETTINGS_SCHEMA).forEach((definition) => { + categories.add(definition.category); + // Also collect from nested properties + const defWithProps = definition as typeof definition & { + properties?: Record; + }; + if (defWithProps.properties) { + Object.values(defWithProps.properties).forEach( + (nestedDef: unknown) => { + const nestedDefTyped = nestedDef as { category?: string }; + if (nestedDefTyped.category) { + categories.add(nestedDefTyped.category); + } + }, + ); + } + }); + + expect(categories.size).toBeGreaterThan(0); + expect(categories).toContain('General'); + expect(categories).toContain('UI'); + expect(categories).toContain('Advanced'); + }); + + it('should have consistent default values for boolean settings', () => { + const checkBooleanDefaults = (schema: Record) => { + Object.entries(schema).forEach( + ([_key, definition]: [string, unknown]) => { + const def = definition as { + type?: string; + default?: unknown; + properties?: Record; + }; + if (def.type === 'boolean') { + // Boolean settings can have boolean or undefined defaults (for optional settings) + expect(['boolean', 'undefined']).toContain(typeof def.default); + } + if (def.properties) { + checkBooleanDefaults(def.properties); + } + }, + ); + }; + + checkBooleanDefaults(SETTINGS_SCHEMA as Record); + }); + + it('should have showInDialog property configured', () => { + // Check that user-facing settings are marked for dialog display + expect(SETTINGS_SCHEMA.ui.properties.showMemoryUsage.showInDialog).toBe( + true, + ); + expect(SETTINGS_SCHEMA.general.properties.vimMode.showInDialog).toBe( + true, + ); + expect(SETTINGS_SCHEMA.ide.properties.enabled.showInDialog).toBe(true); + expect( + SETTINGS_SCHEMA.general.properties.disableAutoUpdate.showInDialog, + ).toBe(true); + expect(SETTINGS_SCHEMA.ui.properties.hideWindowTitle.showInDialog).toBe( + true, + ); + expect(SETTINGS_SCHEMA.ui.properties.hideTips.showInDialog).toBe(true); + expect(SETTINGS_SCHEMA.ui.properties.hideBanner.showInDialog).toBe(true); + expect( + SETTINGS_SCHEMA.privacy.properties.usageStatisticsEnabled.showInDialog, + ).toBe(false); + + // Check that advanced settings are hidden from dialog + expect(SETTINGS_SCHEMA.security.properties.auth.showInDialog).toBe(false); + expect(SETTINGS_SCHEMA.tools.properties.core.showInDialog).toBe(false); + expect(SETTINGS_SCHEMA.mcpServers.showInDialog).toBe(false); + expect(SETTINGS_SCHEMA.telemetry.showInDialog).toBe(false); + + // Check that some settings are appropriately hidden + expect(SETTINGS_SCHEMA.ui.properties.theme.showInDialog).toBe(false); // Changed to false + expect(SETTINGS_SCHEMA.ui.properties.customThemes.showInDialog).toBe( + false, + ); // Managed via theme editor + expect( + SETTINGS_SCHEMA.general.properties.checkpointing.showInDialog, + ).toBe(false); // Experimental feature + expect(SETTINGS_SCHEMA.ui.properties.accessibility.showInDialog).toBe( + false, + ); // Changed to false + expect( + SETTINGS_SCHEMA.context.properties.fileFiltering.showInDialog, + ).toBe(false); // Changed to false + expect( + SETTINGS_SCHEMA.general.properties.preferredEditor.showInDialog, + ).toBe(false); // Changed to false + expect( + SETTINGS_SCHEMA.advanced.properties.autoConfigureMemory.showInDialog, + ).toBe(false); + }); + + it('should infer Settings type correctly', () => { + // This test ensures that the Settings type is properly inferred from the schema + const settings: Settings = { + ui: { + theme: 'dark', + }, + context: { + includeDirectories: ['/path/to/dir'], + loadMemoryFromIncludeDirectories: true, + }, + }; + + // TypeScript should not complain about these properties + expect(settings.ui?.theme).toBe('dark'); + expect(settings.context?.includeDirectories).toEqual(['/path/to/dir']); + expect(settings.context?.loadMemoryFromIncludeDirectories).toBe(true); + }); + + it('should have includeDirectories setting in schema', () => { + expect( + SETTINGS_SCHEMA.context?.properties.includeDirectories, + ).toBeDefined(); + expect(SETTINGS_SCHEMA.context?.properties.includeDirectories.type).toBe( + 'array', + ); + expect( + SETTINGS_SCHEMA.context?.properties.includeDirectories.category, + ).toBe('Context'); + expect( + SETTINGS_SCHEMA.context?.properties.includeDirectories.default, + ).toEqual([]); + }); + + it('should have loadMemoryFromIncludeDirectories setting in schema', () => { + expect( + SETTINGS_SCHEMA.context?.properties.loadMemoryFromIncludeDirectories, + ).toBeDefined(); + expect( + SETTINGS_SCHEMA.context?.properties.loadMemoryFromIncludeDirectories + .type, + ).toBe('boolean'); + expect( + SETTINGS_SCHEMA.context?.properties.loadMemoryFromIncludeDirectories + .category, + ).toBe('Context'); + expect( + SETTINGS_SCHEMA.context?.properties.loadMemoryFromIncludeDirectories + .default, + ).toBe(false); + }); + + it('should have folderTrustFeature setting in schema', () => { + expect( + SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled, + ).toBeDefined(); + expect( + SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled.type, + ).toBe('boolean'); + expect( + SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled + .category, + ).toBe('Security'); + expect( + SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled + .default, + ).toBe(false); + expect( + SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled + .showInDialog, + ).toBe(true); + }); + + it('should have debugKeystrokeLogging setting in schema', () => { + expect( + SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging, + ).toBeDefined(); + expect( + SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.type, + ).toBe('boolean'); + expect( + SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.category, + ).toBe('General'); + expect( + SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.default, + ).toBe(false); + expect( + SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging + .requiresRestart, + ).toBe(false); + expect( + SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.showInDialog, + ).toBe(true); + expect( + SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.description, + ).toBe('Enable debug logging of keystrokes to the console.'); + }); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/config/settingsSchema.ts b/projects/gemini-cli/packages/cli/src/config/settingsSchema.ts new file mode 100644 index 0000000000000000000000000000000000000000..f884a332afa7154b49a7d7ca286c96fa129c6988 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/settingsSchema.ts @@ -0,0 +1,804 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { + MCPServerConfig, + BugCommandSettings, + TelemetrySettings, + AuthType, + ChatCompressionSettings, +} from '@google/gemini-cli-core'; +import type { CustomTheme } from '../ui/themes/theme.js'; + +export interface SettingDefinition { + type: 'boolean' | 'string' | 'number' | 'array' | 'object'; + label: string; + category: string; + requiresRestart: boolean; + default: boolean | string | number | string[] | object | undefined; + description?: string; + parentKey?: string; + childKey?: string; + key?: string; + properties?: SettingsSchema; + showInDialog?: boolean; +} + +export interface SettingsSchema { + [key: string]: SettingDefinition; +} + +export type MemoryImportFormat = 'tree' | 'flat'; +export type DnsResolutionOrder = 'ipv4first' | 'verbatim'; + +/** + * The canonical schema for all settings. + * The structure of this object defines the structure of the `Settings` type. + * `as const` is crucial for TypeScript to infer the most specific types possible. + */ +export const SETTINGS_SCHEMA = { + // Maintained for compatibility/criticality + mcpServers: { + type: 'object', + label: 'MCP Servers', + category: 'Advanced', + requiresRestart: true, + default: {} as Record, + description: 'Configuration for MCP servers.', + showInDialog: false, + }, + + general: { + type: 'object', + label: 'General', + category: 'General', + requiresRestart: false, + default: {}, + description: 'General application settings.', + showInDialog: false, + properties: { + preferredEditor: { + type: 'string', + label: 'Preferred Editor', + category: 'General', + requiresRestart: false, + default: undefined as string | undefined, + description: 'The preferred editor to open files in.', + showInDialog: false, + }, + vimMode: { + type: 'boolean', + label: 'Vim Mode', + category: 'General', + requiresRestart: false, + default: false, + description: 'Enable Vim keybindings', + showInDialog: true, + }, + disableAutoUpdate: { + type: 'boolean', + label: 'Disable Auto Update', + category: 'General', + requiresRestart: false, + default: false, + description: 'Disable automatic updates', + showInDialog: true, + }, + disableUpdateNag: { + type: 'boolean', + label: 'Disable Update Nag', + category: 'General', + requiresRestart: false, + default: false, + description: 'Disable update notification prompts.', + showInDialog: false, + }, + checkpointing: { + type: 'object', + label: 'Checkpointing', + category: 'General', + requiresRestart: true, + default: {}, + description: 'Session checkpointing settings.', + showInDialog: false, + properties: { + enabled: { + type: 'boolean', + label: 'Enable Checkpointing', + category: 'General', + requiresRestart: true, + default: false, + description: 'Enable session checkpointing for recovery', + showInDialog: false, + }, + }, + }, + enablePromptCompletion: { + type: 'boolean', + label: 'Enable Prompt Completion', + category: 'General', + requiresRestart: true, + default: false, + description: + 'Enable AI-powered prompt completion suggestions while typing.', + showInDialog: true, + }, + debugKeystrokeLogging: { + type: 'boolean', + label: 'Debug Keystroke Logging', + category: 'General', + requiresRestart: false, + default: false, + description: 'Enable debug logging of keystrokes to the console.', + showInDialog: true, + }, + }, + }, + + ui: { + type: 'object', + label: 'UI', + category: 'UI', + requiresRestart: false, + default: {}, + description: 'User interface settings.', + showInDialog: false, + properties: { + theme: { + type: 'string', + label: 'Theme', + category: 'UI', + requiresRestart: false, + default: undefined as string | undefined, + description: 'The color theme for the UI.', + showInDialog: false, + }, + customThemes: { + type: 'object', + label: 'Custom Themes', + category: 'UI', + requiresRestart: false, + default: {} as Record, + description: 'Custom theme definitions.', + showInDialog: false, + }, + hideWindowTitle: { + type: 'boolean', + label: 'Hide Window Title', + category: 'UI', + requiresRestart: true, + default: false, + description: 'Hide the window title bar', + showInDialog: true, + }, + hideTips: { + type: 'boolean', + label: 'Hide Tips', + category: 'UI', + requiresRestart: false, + default: false, + description: 'Hide helpful tips in the UI', + showInDialog: true, + }, + hideBanner: { + type: 'boolean', + label: 'Hide Banner', + category: 'UI', + requiresRestart: false, + default: false, + description: 'Hide the application banner', + showInDialog: true, + }, + hideFooter: { + type: 'boolean', + label: 'Hide Footer', + category: 'UI', + requiresRestart: false, + default: false, + description: 'Hide the footer from the UI', + showInDialog: true, + }, + showMemoryUsage: { + type: 'boolean', + label: 'Show Memory Usage', + category: 'UI', + requiresRestart: false, + default: false, + description: 'Display memory usage information in the UI', + showInDialog: true, + }, + showLineNumbers: { + type: 'boolean', + label: 'Show Line Numbers', + category: 'UI', + requiresRestart: false, + default: false, + description: 'Show line numbers in the chat.', + showInDialog: true, + }, + showCitations: { + type: 'boolean', + label: 'Show Citations', + category: 'UI', + requiresRestart: false, + default: false, + description: 'Show citations for generated text in the chat.', + showInDialog: true, + }, + accessibility: { + type: 'object', + label: 'Accessibility', + category: 'UI', + requiresRestart: true, + default: {}, + description: 'Accessibility settings.', + showInDialog: false, + properties: { + disableLoadingPhrases: { + type: 'boolean', + label: 'Disable Loading Phrases', + category: 'UI', + requiresRestart: true, + default: false, + description: 'Disable loading phrases for accessibility', + showInDialog: true, + }, + screenReader: { + type: 'boolean', + label: 'Screen Reader Mode', + category: 'UI', + requiresRestart: true, + default: false, + description: + 'Render output in plain-text to be more screen reader accessible', + showInDialog: true, + }, + }, + }, + }, + }, + + ide: { + type: 'object', + label: 'IDE', + category: 'IDE', + requiresRestart: true, + default: {}, + description: 'IDE integration settings.', + showInDialog: false, + properties: { + enabled: { + type: 'boolean', + label: 'IDE Mode', + category: 'IDE', + requiresRestart: true, + default: false, + description: 'Enable IDE integration mode', + showInDialog: true, + }, + hasSeenNudge: { + type: 'boolean', + label: 'Has Seen IDE Integration Nudge', + category: 'IDE', + requiresRestart: false, + default: false, + description: 'Whether the user has seen the IDE integration nudge.', + showInDialog: false, + }, + }, + }, + + privacy: { + type: 'object', + label: 'Privacy', + category: 'Privacy', + requiresRestart: true, + default: {}, + description: 'Privacy-related settings.', + showInDialog: false, + properties: { + usageStatisticsEnabled: { + type: 'boolean', + label: 'Enable Usage Statistics', + category: 'Privacy', + requiresRestart: true, + default: true, + description: 'Enable collection of usage statistics', + showInDialog: false, + }, + }, + }, + + telemetry: { + type: 'object', + label: 'Telemetry', + category: 'Advanced', + requiresRestart: true, + default: undefined as TelemetrySettings | undefined, + description: 'Telemetry configuration.', + showInDialog: false, + }, + + model: { + type: 'object', + label: 'Model', + category: 'Model', + requiresRestart: false, + default: {}, + description: 'Settings related to the generative model.', + showInDialog: false, + properties: { + name: { + type: 'string', + label: 'Model', + category: 'Model', + requiresRestart: false, + default: undefined as string | undefined, + description: 'The Gemini model to use for conversations.', + showInDialog: false, + }, + maxSessionTurns: { + type: 'number', + label: 'Max Session Turns', + category: 'Model', + requiresRestart: false, + default: -1, + description: + 'Maximum number of user/model/tool turns to keep in a session. -1 means unlimited.', + showInDialog: true, + }, + summarizeToolOutput: { + type: 'object', + label: 'Summarize Tool Output', + category: 'Model', + requiresRestart: false, + default: undefined as + | Record + | undefined, + description: 'Settings for summarizing tool output.', + showInDialog: false, + }, + chatCompression: { + type: 'object', + label: 'Chat Compression', + category: 'Model', + requiresRestart: false, + default: undefined as ChatCompressionSettings | undefined, + description: 'Chat compression settings.', + showInDialog: false, + }, + skipNextSpeakerCheck: { + type: 'boolean', + label: 'Skip Next Speaker Check', + category: 'Model', + requiresRestart: false, + default: false, + description: 'Skip the next speaker check.', + showInDialog: true, + }, + }, + }, + + context: { + type: 'object', + label: 'Context', + category: 'Context', + requiresRestart: false, + default: {}, + description: 'Settings for managing context provided to the model.', + showInDialog: false, + properties: { + fileName: { + type: 'object', + label: 'Context File Name', + category: 'Context', + requiresRestart: false, + default: undefined as string | string[] | undefined, + description: 'The name of the context file.', + showInDialog: false, + }, + importFormat: { + type: 'string', + label: 'Memory Import Format', + category: 'Context', + requiresRestart: false, + default: undefined as MemoryImportFormat | undefined, + description: 'The format to use when importing memory.', + showInDialog: false, + }, + discoveryMaxDirs: { + type: 'number', + label: 'Memory Discovery Max Dirs', + category: 'Context', + requiresRestart: false, + default: 200, + description: 'Maximum number of directories to search for memory.', + showInDialog: true, + }, + includeDirectories: { + type: 'array', + label: 'Include Directories', + category: 'Context', + requiresRestart: false, + default: [] as string[], + description: + 'Additional directories to include in the workspace context. Missing directories will be skipped with a warning.', + showInDialog: false, + }, + loadMemoryFromIncludeDirectories: { + type: 'boolean', + label: 'Load Memory From Include Directories', + category: 'Context', + requiresRestart: false, + default: false, + description: 'Whether to load memory files from include directories.', + showInDialog: true, + }, + fileFiltering: { + type: 'object', + label: 'File Filtering', + category: 'Context', + requiresRestart: true, + default: {}, + description: 'Settings for git-aware file filtering.', + showInDialog: false, + properties: { + respectGitIgnore: { + type: 'boolean', + label: 'Respect .gitignore', + category: 'Context', + requiresRestart: true, + default: true, + description: 'Respect .gitignore files when searching', + showInDialog: true, + }, + respectGeminiIgnore: { + type: 'boolean', + label: 'Respect .geminiignore', + category: 'Context', + requiresRestart: true, + default: true, + description: 'Respect .geminiignore files when searching', + showInDialog: true, + }, + enableRecursiveFileSearch: { + type: 'boolean', + label: 'Enable Recursive File Search', + category: 'Context', + requiresRestart: true, + default: true, + description: 'Enable recursive file search functionality', + showInDialog: true, + }, + disableFuzzySearch: { + type: 'boolean', + label: 'Disable Fuzzy Search', + category: 'Context', + requiresRestart: true, + default: false, + description: 'Disable fuzzy search when searching for files.', + showInDialog: true, + }, + }, + }, + }, + }, + + tools: { + type: 'object', + label: 'Tools', + category: 'Tools', + requiresRestart: true, + default: {}, + description: 'Settings for built-in and custom tools.', + showInDialog: false, + properties: { + sandbox: { + type: 'object', + label: 'Sandbox', + category: 'Tools', + requiresRestart: true, + default: undefined as boolean | string | undefined, + description: + 'Sandbox execution environment (can be a boolean or a path string).', + showInDialog: false, + }, + usePty: { + type: 'boolean', + label: 'Use node-pty for Shell Execution', + category: 'Tools', + requiresRestart: true, + default: false, + description: + 'Use node-pty for shell command execution. Fallback to child_process still applies.', + showInDialog: true, + }, + core: { + type: 'array', + label: 'Core Tools', + category: 'Tools', + requiresRestart: true, + default: undefined as string[] | undefined, + description: 'Paths to core tool definitions.', + showInDialog: false, + }, + allowed: { + type: 'array', + label: 'Allowed Tools', + category: 'Advanced', + requiresRestart: true, + default: undefined as string[] | undefined, + description: + 'A list of tool names that will bypass the confirmation dialog.', + showInDialog: false, + }, + exclude: { + type: 'array', + label: 'Exclude Tools', + category: 'Tools', + requiresRestart: true, + default: undefined as string[] | undefined, + description: 'Tool names to exclude from discovery.', + showInDialog: false, + }, + discoveryCommand: { + type: 'string', + label: 'Tool Discovery Command', + category: 'Tools', + requiresRestart: true, + default: undefined as string | undefined, + description: 'Command to run for tool discovery.', + showInDialog: false, + }, + callCommand: { + type: 'string', + label: 'Tool Call Command', + category: 'Tools', + requiresRestart: true, + default: undefined as string | undefined, + description: 'Command to run for tool calls.', + showInDialog: false, + }, + useRipgrep: { + type: 'boolean', + label: 'Use Ripgrep', + category: 'Tools', + requiresRestart: false, + default: false, + description: + 'Use ripgrep for file content search instead of the fallback implementation. Provides faster search performance.', + showInDialog: true, + }, + }, + }, + + mcp: { + type: 'object', + label: 'MCP', + category: 'MCP', + requiresRestart: true, + default: {}, + description: 'Settings for Model Context Protocol (MCP) servers.', + showInDialog: false, + properties: { + serverCommand: { + type: 'string', + label: 'MCP Server Command', + category: 'MCP', + requiresRestart: true, + default: undefined as string | undefined, + description: 'Command to start an MCP server.', + showInDialog: false, + }, + allowed: { + type: 'array', + label: 'Allow MCP Servers', + category: 'MCP', + requiresRestart: true, + default: undefined as string[] | undefined, + description: 'A whitelist of MCP servers to allow.', + showInDialog: false, + }, + excluded: { + type: 'array', + label: 'Exclude MCP Servers', + category: 'MCP', + requiresRestart: true, + default: undefined as string[] | undefined, + description: 'A blacklist of MCP servers to exclude.', + showInDialog: false, + }, + }, + }, + useSmartEdit: { + type: 'boolean', + label: 'Use Smart Edit', + category: 'Advanced', + requiresRestart: false, + default: false, + description: 'Enable the smart-edit tool instead of the replace tool.', + showInDialog: false, + }, + security: { + type: 'object', + label: 'Security', + category: 'Security', + requiresRestart: true, + default: {}, + description: 'Security-related settings.', + showInDialog: false, + properties: { + folderTrust: { + type: 'object', + label: 'Folder Trust', + category: 'Security', + requiresRestart: false, + default: {}, + description: 'Settings for folder trust.', + showInDialog: false, + properties: { + featureEnabled: { + type: 'boolean', + label: 'Folder Trust Feature', + category: 'Security', + requiresRestart: true, + default: false, + description: 'Enable folder trust feature for enhanced security.', + showInDialog: true, + }, + enabled: { + type: 'boolean', + label: 'Folder Trust', + category: 'Security', + requiresRestart: true, + default: false, + description: 'Setting to track whether Folder trust is enabled.', + showInDialog: true, + }, + }, + }, + auth: { + type: 'object', + label: 'Authentication', + category: 'Security', + requiresRestart: true, + default: {}, + description: 'Authentication settings.', + showInDialog: false, + properties: { + selectedType: { + type: 'string', + label: 'Selected Auth Type', + category: 'Security', + requiresRestart: true, + default: undefined as AuthType | undefined, + description: 'The currently selected authentication type.', + showInDialog: false, + }, + useExternal: { + type: 'boolean', + label: 'Use External Auth', + category: 'Security', + requiresRestart: true, + default: undefined as boolean | undefined, + description: 'Whether to use an external authentication flow.', + showInDialog: false, + }, + }, + }, + }, + }, + + advanced: { + type: 'object', + label: 'Advanced', + category: 'Advanced', + requiresRestart: true, + default: {}, + description: 'Advanced settings for power users.', + showInDialog: false, + properties: { + autoConfigureMemory: { + type: 'boolean', + label: 'Auto Configure Max Old Space Size', + category: 'Advanced', + requiresRestart: true, + default: false, + description: 'Automatically configure Node.js memory limits', + showInDialog: false, + }, + dnsResolutionOrder: { + type: 'string', + label: 'DNS Resolution Order', + category: 'Advanced', + requiresRestart: true, + default: undefined as DnsResolutionOrder | undefined, + description: 'The DNS resolution order.', + showInDialog: false, + }, + excludedEnvVars: { + type: 'array', + label: 'Excluded Project Environment Variables', + category: 'Advanced', + requiresRestart: false, + default: ['DEBUG', 'DEBUG_MODE'] as string[], + description: 'Environment variables to exclude from project context.', + showInDialog: false, + }, + bugCommand: { + type: 'object', + label: 'Bug Command', + category: 'Advanced', + requiresRestart: false, + default: undefined as BugCommandSettings | undefined, + description: 'Configuration for the bug report command.', + showInDialog: false, + }, + }, + }, + + experimental: { + type: 'object', + label: 'Experimental', + category: 'Experimental', + requiresRestart: true, + default: {}, + description: 'Setting to enable experimental features', + showInDialog: false, + properties: { + extensionManagement: { + type: 'boolean', + label: 'Extension Management', + category: 'Experimental', + requiresRestart: true, + default: false, + description: 'Enable extension management features.', + showInDialog: false, + }, + }, + }, + + extensions: { + type: 'object', + label: 'Extensions', + category: 'Extensions', + requiresRestart: true, + default: {}, + description: 'Settings for extensions.', + showInDialog: false, + properties: { + disabled: { + type: 'array', + label: 'Disabled Extensions', + category: 'Extensions', + requiresRestart: true, + default: [] as string[], + description: 'List of disabled extensions.', + showInDialog: false, + }, + workspacesWithMigrationNudge: { + type: 'array', + label: 'Workspaces with Migration Nudge', + category: 'Extensions', + requiresRestart: false, + default: [] as string[], + description: + 'List of workspaces for which the migration nudge has been shown.', + showInDialog: false, + }, + }, + }, +} as const; + +type InferSettings = { + -readonly [K in keyof T]?: T[K] extends { properties: SettingsSchema } + ? InferSettings + : T[K]['default'] extends boolean + ? boolean + : T[K]['default']; +}; + +export type Settings = InferSettings; diff --git a/projects/gemini-cli/packages/cli/src/config/trustedFolders.test.ts b/projects/gemini-cli/packages/cli/src/config/trustedFolders.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..bf03682f59578ee02ed8733dcf1169eb94cf96c2 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/trustedFolders.test.ts @@ -0,0 +1,258 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// Mock 'os' first. +import * as osActual from 'node:os'; +vi.mock('os', async (importOriginal) => { + const actualOs = await importOriginal(); + return { + ...actualOs, + homedir: vi.fn(() => '/mock/home/user'), + platform: vi.fn(() => 'linux'), + }; +}); + +import { + describe, + it, + expect, + vi, + beforeEach, + afterEach, + type Mocked, + type Mock, +} from 'vitest'; +import * as fs from 'node:fs'; +import stripJsonComments from 'strip-json-comments'; +import * as path from 'node:path'; + +import { + loadTrustedFolders, + USER_TRUSTED_FOLDERS_PATH, + TrustLevel, + isWorkspaceTrusted, +} from './trustedFolders.js'; +import type { Settings } from './settings.js'; + +vi.mock('fs', async (importOriginal) => { + const actualFs = await importOriginal(); + return { + ...actualFs, + existsSync: vi.fn(), + readFileSync: vi.fn(), + writeFileSync: vi.fn(), + mkdirSync: vi.fn(), + }; +}); + +vi.mock('strip-json-comments', () => ({ + default: vi.fn((content) => content), +})); + +describe('Trusted Folders Loading', () => { + let mockFsExistsSync: Mocked; + let mockStripJsonComments: Mocked; + let mockFsWriteFileSync: Mocked; + + beforeEach(() => { + vi.resetAllMocks(); + mockFsExistsSync = vi.mocked(fs.existsSync); + mockStripJsonComments = vi.mocked(stripJsonComments); + mockFsWriteFileSync = vi.mocked(fs.writeFileSync); + vi.mocked(osActual.homedir).mockReturnValue('/mock/home/user'); + (mockStripJsonComments as unknown as Mock).mockImplementation( + (jsonString: string) => jsonString, + ); + (mockFsExistsSync as Mock).mockReturnValue(false); + (fs.readFileSync as Mock).mockReturnValue('{}'); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should load empty rules if no files exist', () => { + const { rules, errors } = loadTrustedFolders(); + expect(rules).toEqual([]); + expect(errors).toEqual([]); + }); + + describe('isPathTrusted', () => { + function setup({ config = {} as Record } = {}) { + (mockFsExistsSync as Mock).mockImplementation( + (p) => p === USER_TRUSTED_FOLDERS_PATH, + ); + (fs.readFileSync as Mock).mockImplementation((p) => { + if (p === USER_TRUSTED_FOLDERS_PATH) return JSON.stringify(config); + return '{}'; + }); + + const folders = loadTrustedFolders(); + + return { folders }; + } + + it('provides a method to determine if a path is trusted', () => { + const { folders } = setup({ + config: { + './myfolder': TrustLevel.TRUST_FOLDER, + '/trustedparent/trustme': TrustLevel.TRUST_PARENT, + '/user/folder': TrustLevel.TRUST_FOLDER, + '/secret': TrustLevel.DO_NOT_TRUST, + '/secret/publickeys': TrustLevel.TRUST_FOLDER, + }, + }); + expect(folders.isPathTrusted('/secret')).toBe(false); + expect(folders.isPathTrusted('/user/folder')).toBe(true); + expect(folders.isPathTrusted('/secret/publickeys/public.pem')).toBe(true); + expect(folders.isPathTrusted('/user/folder/harhar')).toBe(true); + expect(folders.isPathTrusted('myfolder/somefile.jpg')).toBe(true); + expect(folders.isPathTrusted('/trustedparent/someotherfolder')).toBe( + true, + ); + expect(folders.isPathTrusted('/trustedparent/trustme')).toBe(true); + + // No explicit rule covers this file + expect(folders.isPathTrusted('/secret/bankaccounts.json')).toBe( + undefined, + ); + expect(folders.isPathTrusted('/secret/mine/privatekey.pem')).toBe( + undefined, + ); + expect(folders.isPathTrusted('/user/someotherfolder')).toBe(undefined); + }); + }); + + it('should load user rules if only user file exists', () => { + const userPath = USER_TRUSTED_FOLDERS_PATH; + (mockFsExistsSync as Mock).mockImplementation((p) => p === userPath); + const userContent = { + '/user/folder': TrustLevel.TRUST_FOLDER, + }; + (fs.readFileSync as Mock).mockImplementation((p) => { + if (p === userPath) return JSON.stringify(userContent); + return '{}'; + }); + + const { rules, errors } = loadTrustedFolders(); + expect(rules).toEqual([ + { path: '/user/folder', trustLevel: TrustLevel.TRUST_FOLDER }, + ]); + expect(errors).toEqual([]); + }); + + it('should handle JSON parsing errors gracefully', () => { + const userPath = USER_TRUSTED_FOLDERS_PATH; + (mockFsExistsSync as Mock).mockImplementation((p) => p === userPath); + (fs.readFileSync as Mock).mockImplementation((p) => { + if (p === userPath) return 'invalid json'; + return '{}'; + }); + + const { rules, errors } = loadTrustedFolders(); + expect(rules).toEqual([]); + expect(errors.length).toBe(1); + expect(errors[0].path).toBe(userPath); + expect(errors[0].message).toContain('Unexpected token'); + }); + + it('setValue should update the user config and save it', () => { + const loadedFolders = loadTrustedFolders(); + loadedFolders.setValue('/new/path', TrustLevel.TRUST_FOLDER); + + expect(loadedFolders.user.config['/new/path']).toBe( + TrustLevel.TRUST_FOLDER, + ); + expect(mockFsWriteFileSync).toHaveBeenCalledWith( + USER_TRUSTED_FOLDERS_PATH, + JSON.stringify({ '/new/path': TrustLevel.TRUST_FOLDER }, null, 2), + 'utf-8', + ); + }); +}); + +describe('isWorkspaceTrusted', () => { + let mockCwd: string; + const mockRules: Record = {}; + const mockSettings: Settings = { + security: { + folderTrust: { + featureEnabled: true, + enabled: true, + }, + }, + }; + + beforeEach(() => { + vi.spyOn(process, 'cwd').mockImplementation(() => mockCwd); + vi.spyOn(fs, 'readFileSync').mockImplementation((p) => { + if (p === USER_TRUSTED_FOLDERS_PATH) { + return JSON.stringify(mockRules); + } + return '{}'; + }); + vi.spyOn(fs, 'existsSync').mockImplementation( + (p) => p === USER_TRUSTED_FOLDERS_PATH, + ); + }); + + afterEach(() => { + vi.restoreAllMocks(); + // Clear the object + Object.keys(mockRules).forEach((key) => delete mockRules[key]); + }); + + it('should return true for a directly trusted folder', () => { + mockCwd = '/home/user/projectA'; + mockRules['/home/user/projectA'] = TrustLevel.TRUST_FOLDER; + expect(isWorkspaceTrusted(mockSettings)).toBe(true); + }); + + it('should return true for a child of a trusted folder', () => { + mockCwd = '/home/user/projectA/src'; + mockRules['/home/user/projectA'] = TrustLevel.TRUST_FOLDER; + expect(isWorkspaceTrusted(mockSettings)).toBe(true); + }); + + it('should return true for a child of a trusted parent folder', () => { + mockCwd = '/home/user/projectB'; + mockRules['/home/user/projectB/somefile.txt'] = TrustLevel.TRUST_PARENT; + expect(isWorkspaceTrusted(mockSettings)).toBe(true); + }); + + it('should return false for a directly untrusted folder', () => { + mockCwd = '/home/user/untrusted'; + mockRules['/home/user/untrusted'] = TrustLevel.DO_NOT_TRUST; + expect(isWorkspaceTrusted(mockSettings)).toBe(false); + }); + + it('should return undefined for a child of an untrusted folder', () => { + mockCwd = '/home/user/untrusted/src'; + mockRules['/home/user/untrusted'] = TrustLevel.DO_NOT_TRUST; + expect(isWorkspaceTrusted(mockSettings)).toBeUndefined(); + }); + + it('should return undefined when no rules match', () => { + mockCwd = '/home/user/other'; + mockRules['/home/user/projectA'] = TrustLevel.TRUST_FOLDER; + mockRules['/home/user/untrusted'] = TrustLevel.DO_NOT_TRUST; + expect(isWorkspaceTrusted(mockSettings)).toBeUndefined(); + }); + + it('should prioritize trust over distrust', () => { + mockCwd = '/home/user/projectA/untrusted'; + mockRules['/home/user/projectA'] = TrustLevel.TRUST_FOLDER; + mockRules['/home/user/projectA/untrusted'] = TrustLevel.DO_NOT_TRUST; + expect(isWorkspaceTrusted(mockSettings)).toBe(true); + }); + + it('should handle path normalization', () => { + mockCwd = '/home/user/projectA'; + mockRules[`/home/user/../user/${path.basename('/home/user/projectA')}`] = + TrustLevel.TRUST_FOLDER; + expect(isWorkspaceTrusted(mockSettings)).toBe(true); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/config/trustedFolders.ts b/projects/gemini-cli/packages/cli/src/config/trustedFolders.ts new file mode 100644 index 0000000000000000000000000000000000000000..faec621d5fc363d042dfb3a2c8891297218b8f10 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/config/trustedFolders.ts @@ -0,0 +1,180 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import { homedir } from 'node:os'; +import { getErrorMessage, isWithinRoot } from '@google/gemini-cli-core'; +import type { Settings } from './settings.js'; +import stripJsonComments from 'strip-json-comments'; + +export const TRUSTED_FOLDERS_FILENAME = 'trustedFolders.json'; +export const SETTINGS_DIRECTORY_NAME = '.gemini'; +export const USER_SETTINGS_DIR = path.join(homedir(), SETTINGS_DIRECTORY_NAME); +export const USER_TRUSTED_FOLDERS_PATH = path.join( + USER_SETTINGS_DIR, + TRUSTED_FOLDERS_FILENAME, +); + +export enum TrustLevel { + TRUST_FOLDER = 'TRUST_FOLDER', + TRUST_PARENT = 'TRUST_PARENT', + DO_NOT_TRUST = 'DO_NOT_TRUST', +} + +export interface TrustRule { + path: string; + trustLevel: TrustLevel; +} + +export interface TrustedFoldersError { + message: string; + path: string; +} + +export interface TrustedFoldersFile { + config: Record; + path: string; +} + +export class LoadedTrustedFolders { + constructor( + readonly user: TrustedFoldersFile, + readonly errors: TrustedFoldersError[], + ) {} + + get rules(): TrustRule[] { + return Object.entries(this.user.config).map(([path, trustLevel]) => ({ + path, + trustLevel, + })); + } + + /** + * Returns true or false if the path should be "trusted". This function + * should only be invoked when the folder trust setting is active. + * + * @param location path + * @returns + */ + isPathTrusted(location: string): boolean | undefined { + const trustedPaths: string[] = []; + const untrustedPaths: string[] = []; + + for (const rule of this.rules) { + switch (rule.trustLevel) { + case TrustLevel.TRUST_FOLDER: + trustedPaths.push(rule.path); + break; + case TrustLevel.TRUST_PARENT: + trustedPaths.push(path.dirname(rule.path)); + break; + case TrustLevel.DO_NOT_TRUST: + untrustedPaths.push(rule.path); + break; + default: + // Do nothing for unknown trust levels. + break; + } + } + + for (const trustedPath of trustedPaths) { + if (isWithinRoot(location, trustedPath)) { + return true; + } + } + + for (const untrustedPath of untrustedPaths) { + if (path.normalize(location) === path.normalize(untrustedPath)) { + return false; + } + } + + return undefined; + } + + setValue(path: string, trustLevel: TrustLevel): void { + this.user.config[path] = trustLevel; + saveTrustedFolders(this.user); + } +} + +export function loadTrustedFolders(): LoadedTrustedFolders { + const errors: TrustedFoldersError[] = []; + const userConfig: Record = {}; + + const userPath = USER_TRUSTED_FOLDERS_PATH; + + // Load user trusted folders + try { + if (fs.existsSync(userPath)) { + const content = fs.readFileSync(userPath, 'utf-8'); + const parsed = JSON.parse(stripJsonComments(content)) as Record< + string, + TrustLevel + >; + if (parsed) { + Object.assign(userConfig, parsed); + } + } + } catch (error: unknown) { + errors.push({ + message: getErrorMessage(error), + path: userPath, + }); + } + + return new LoadedTrustedFolders( + { path: userPath, config: userConfig }, + errors, + ); +} + +export function saveTrustedFolders( + trustedFoldersFile: TrustedFoldersFile, +): void { + try { + // Ensure the directory exists + const dirPath = path.dirname(trustedFoldersFile.path); + if (!fs.existsSync(dirPath)) { + fs.mkdirSync(dirPath, { recursive: true }); + } + + fs.writeFileSync( + trustedFoldersFile.path, + JSON.stringify(trustedFoldersFile.config, null, 2), + 'utf-8', + ); + } catch (error) { + console.error('Error saving trusted folders file:', error); + } +} + +/** Is folder trust feature enabled per the current applied settings */ +export function isFolderTrustEnabled(settings: Settings): boolean { + const folderTrustFeature = + settings.security?.folderTrust?.featureEnabled ?? false; + const folderTrustSetting = settings.security?.folderTrust?.enabled ?? false; + return folderTrustFeature && folderTrustSetting; +} + +export function isWorkspaceTrusted(settings: Settings): boolean | undefined { + if (!isFolderTrustEnabled(settings)) { + return true; + } + + const folders = loadTrustedFolders(); + + if (folders.errors.length > 0) { + for (const error of folders.errors) { + console.error( + `Error loading trusted folders config from ${error.path}: ${error.message}`, + ); + } + } + + return folders.isPathTrusted(process.cwd()); +} diff --git a/projects/gemini-cli/packages/cli/src/gemini.test.tsx b/projects/gemini-cli/packages/cli/src/gemini.test.tsx new file mode 100644 index 0000000000000000000000000000000000000000..5b7cd7edcceadc2a4c6866ab21c77eed051eb204 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/gemini.test.tsx @@ -0,0 +1,332 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { + main, + setupUnhandledRejectionHandler, + validateDnsResolutionOrder, + startInteractiveUI, +} from './gemini.js'; +import type { SettingsFile } from './config/settings.js'; +import { LoadedSettings, loadSettings } from './config/settings.js'; +import { appEvents, AppEvent } from './utils/events.js'; +import type { Config } from '@google/gemini-cli-core'; +import { FatalConfigError } from '@google/gemini-cli-core'; + +// Custom error to identify mock process.exit calls +class MockProcessExitError extends Error { + constructor(readonly code?: string | number | null | undefined) { + super('PROCESS_EXIT_MOCKED'); + this.name = 'MockProcessExitError'; + } +} + +// Mock dependencies +vi.mock('./config/settings.js', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + loadSettings: vi.fn(), + }; +}); + +vi.mock('./config/config.js', () => ({ + loadCliConfig: vi.fn().mockResolvedValue({ + config: { + getSandbox: vi.fn(() => false), + getQuestion: vi.fn(() => ''), + }, + modelWasSwitched: false, + originalModelBeforeSwitch: null, + finalModel: 'test-model', + }), +})); + +vi.mock('read-package-up', () => ({ + readPackageUp: vi.fn().mockResolvedValue({ + packageJson: { name: 'test-pkg', version: 'test-version' }, + path: '/fake/path/package.json', + }), +})); + +vi.mock('update-notifier', () => ({ + default: vi.fn(() => ({ + notify: vi.fn(), + })), +})); + +vi.mock('./utils/events.js', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + appEvents: { + emit: vi.fn(), + }, + }; +}); + +vi.mock('./utils/sandbox.js', () => ({ + sandbox_command: vi.fn(() => ''), // Default to no sandbox command + start_sandbox: vi.fn(() => Promise.resolve()), // Mock as an async function that resolves +})); + +describe('gemini.tsx main function', () => { + let loadSettingsMock: ReturnType>; + let originalEnvGeminiSandbox: string | undefined; + let originalEnvSandbox: string | undefined; + let initialUnhandledRejectionListeners: NodeJS.UnhandledRejectionListener[] = + []; + + const processExitSpy = vi + .spyOn(process, 'exit') + .mockImplementation((code) => { + throw new MockProcessExitError(code); + }); + + beforeEach(() => { + loadSettingsMock = vi.mocked(loadSettings); + + // Store and clear sandbox-related env variables to ensure a consistent test environment + originalEnvGeminiSandbox = process.env['GEMINI_SANDBOX']; + originalEnvSandbox = process.env['SANDBOX']; + delete process.env['GEMINI_SANDBOX']; + delete process.env['SANDBOX']; + + initialUnhandledRejectionListeners = + process.listeners('unhandledRejection'); + }); + + afterEach(() => { + // Restore original env variables + if (originalEnvGeminiSandbox !== undefined) { + process.env['GEMINI_SANDBOX'] = originalEnvGeminiSandbox; + } else { + delete process.env['GEMINI_SANDBOX']; + } + if (originalEnvSandbox !== undefined) { + process.env['SANDBOX'] = originalEnvSandbox; + } else { + delete process.env['SANDBOX']; + } + + const currentListeners = process.listeners('unhandledRejection'); + const addedListener = currentListeners.find( + (listener) => !initialUnhandledRejectionListeners.includes(listener), + ); + + if (addedListener) { + process.removeListener('unhandledRejection', addedListener); + } + vi.restoreAllMocks(); + }); + + it('should throw InvalidConfigurationError if settings have errors', async () => { + const settingsError = { + message: 'Test settings error', + path: '/test/settings.json', + }; + const userSettingsFile: SettingsFile = { + path: '/user/settings.json', + settings: {}, + }; + const workspaceSettingsFile: SettingsFile = { + path: '/workspace/.gemini/settings.json', + settings: {}, + }; + const systemSettingsFile: SettingsFile = { + path: '/system/settings.json', + settings: {}, + }; + const systemDefaultsFile: SettingsFile = { + path: '/system/system-defaults.json', + settings: {}, + }; + const mockLoadedSettings = new LoadedSettings( + systemSettingsFile, + systemDefaultsFile, + userSettingsFile, + workspaceSettingsFile, + [settingsError], + true, + new Set(), + ); + + loadSettingsMock.mockReturnValue(mockLoadedSettings); + + await expect(main()).rejects.toThrow(FatalConfigError); + }); + + it('should log unhandled promise rejections and open debug console on first error', async () => { + const appEventsMock = vi.mocked(appEvents); + const rejectionError = new Error('Test unhandled rejection'); + + setupUnhandledRejectionHandler(); + // Simulate an unhandled rejection. + // We are not using Promise.reject here as vitest will catch it. + // Instead we will dispatch the event manually. + process.emit('unhandledRejection', rejectionError, Promise.resolve()); + + // We need to wait for the rejection handler to be called. + await new Promise(process.nextTick); + + expect(appEventsMock.emit).toHaveBeenCalledWith(AppEvent.OpenDebugConsole); + expect(appEventsMock.emit).toHaveBeenCalledWith( + AppEvent.LogError, + expect.stringContaining('Unhandled Promise Rejection'), + ); + expect(appEventsMock.emit).toHaveBeenCalledWith( + AppEvent.LogError, + expect.stringContaining('Please file a bug report using the /bug tool.'), + ); + + // Simulate a second rejection + const secondRejectionError = new Error('Second test unhandled rejection'); + process.emit('unhandledRejection', secondRejectionError, Promise.resolve()); + await new Promise(process.nextTick); + + // Ensure emit was only called once for OpenDebugConsole + const openDebugConsoleCalls = appEventsMock.emit.mock.calls.filter( + (call) => call[0] === AppEvent.OpenDebugConsole, + ); + expect(openDebugConsoleCalls.length).toBe(1); + + // Avoid the process.exit error from being thrown. + processExitSpy.mockRestore(); + }); +}); + +describe('validateDnsResolutionOrder', () => { + let consoleWarnSpy: ReturnType; + + beforeEach(() => { + consoleWarnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {}); + }); + + afterEach(() => { + consoleWarnSpy.mockRestore(); + }); + + it('should return "ipv4first" when the input is "ipv4first"', () => { + expect(validateDnsResolutionOrder('ipv4first')).toBe('ipv4first'); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + }); + + it('should return "verbatim" when the input is "verbatim"', () => { + expect(validateDnsResolutionOrder('verbatim')).toBe('verbatim'); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + }); + + it('should return the default "ipv4first" when the input is undefined', () => { + expect(validateDnsResolutionOrder(undefined)).toBe('ipv4first'); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + }); + + it('should return the default "ipv4first" and log a warning for an invalid string', () => { + expect(validateDnsResolutionOrder('invalid-value')).toBe('ipv4first'); + expect(consoleWarnSpy).toHaveBeenCalledOnce(); + expect(consoleWarnSpy).toHaveBeenCalledWith( + 'Invalid value for dnsResolutionOrder in settings: "invalid-value". Using default "ipv4first".', + ); + }); +}); + +describe('startInteractiveUI', () => { + // Mock dependencies + const mockConfig = { + getProjectRoot: () => '/root', + getScreenReader: () => false, + } as Config; + const mockSettings = { + merged: { + ui: { + hideWindowTitle: false, + }, + }, + } as LoadedSettings; + const mockStartupWarnings = ['warning1']; + const mockWorkspaceRoot = '/root'; + + vi.mock('./utils/version.js', () => ({ + getCliVersion: vi.fn(() => Promise.resolve('1.0.0')), + })); + + vi.mock('./ui/utils/kittyProtocolDetector.js', () => ({ + detectAndEnableKittyProtocol: vi.fn(() => Promise.resolve()), + })); + + vi.mock('./ui/utils/updateCheck.js', () => ({ + checkForUpdates: vi.fn(() => Promise.resolve(null)), + })); + + vi.mock('./utils/cleanup.js', () => ({ + cleanupCheckpoints: vi.fn(() => Promise.resolve()), + registerCleanup: vi.fn(), + })); + + vi.mock('ink', () => ({ + render: vi.fn().mockReturnValue({ unmount: vi.fn() }), + })); + + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('should render the UI with proper React context and exitOnCtrlC disabled', async () => { + const { render } = await import('ink'); + const renderSpy = vi.mocked(render); + + await startInteractiveUI( + mockConfig, + mockSettings, + mockStartupWarnings, + mockWorkspaceRoot, + ); + + // Verify render was called with correct options + expect(renderSpy).toHaveBeenCalledTimes(1); + const [reactElement, options] = renderSpy.mock.calls[0]; + + // Verify render options + expect(options).toEqual({ + exitOnCtrlC: false, + isScreenReaderEnabled: false, + }); + + // Verify React element structure is valid (but don't deep dive into JSX internals) + expect(reactElement).toBeDefined(); + }); + + it('should perform all startup tasks in correct order', async () => { + const { getCliVersion } = await import('./utils/version.js'); + const { detectAndEnableKittyProtocol } = await import( + './ui/utils/kittyProtocolDetector.js' + ); + const { checkForUpdates } = await import('./ui/utils/updateCheck.js'); + const { registerCleanup } = await import('./utils/cleanup.js'); + + await startInteractiveUI( + mockConfig, + mockSettings, + mockStartupWarnings, + mockWorkspaceRoot, + ); + + // Verify all startup tasks were called + expect(getCliVersion).toHaveBeenCalledTimes(1); + expect(detectAndEnableKittyProtocol).toHaveBeenCalledTimes(1); + expect(registerCleanup).toHaveBeenCalledTimes(1); + + // Verify cleanup handler is registered with unmount function + const cleanupFn = vi.mocked(registerCleanup).mock.calls[0][0]; + expect(typeof cleanupFn).toBe('function'); + + // checkForUpdates should be called asynchronously (not waited for) + // We need a small delay to let it execute + await new Promise((resolve) => setTimeout(resolve, 0)); + expect(checkForUpdates).toHaveBeenCalledTimes(1); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/gemini.tsx b/projects/gemini-cli/packages/cli/src/gemini.tsx new file mode 100644 index 0000000000000000000000000000000000000000..51291d6600eb15e2b44274238af9e373cf5815d1 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/gemini.tsx @@ -0,0 +1,465 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import React, { useState, useEffect } from 'react'; +import { render, Box, Text } from 'ink'; +import Spinner from 'ink-spinner'; +import { AppWrapper } from './ui/App.js'; +import { loadCliConfig, parseArguments } from './config/config.js'; +import { readStdin } from './utils/readStdin.js'; +import { basename } from 'node:path'; +import v8 from 'node:v8'; +import os from 'node:os'; +import dns from 'node:dns'; +import { spawn } from 'node:child_process'; +import { start_sandbox } from './utils/sandbox.js'; +import type { DnsResolutionOrder, LoadedSettings } from './config/settings.js'; +import { loadSettings, SettingScope } from './config/settings.js'; +import { themeManager } from './ui/themes/theme-manager.js'; +import { getStartupWarnings } from './utils/startupWarnings.js'; +import { getUserStartupWarnings } from './utils/userStartupWarnings.js'; +import { ConsolePatcher } from './ui/utils/ConsolePatcher.js'; +import { runNonInteractive } from './nonInteractiveCli.js'; +import { loadExtensions } from './config/extension.js'; +import { + cleanupCheckpoints, + registerCleanup, + runExitCleanup, +} from './utils/cleanup.js'; +import { getCliVersion } from './utils/version.js'; +import type { Config } from '@google/gemini-cli-core'; +import { + sessionId, + logUserPrompt, + AuthType, + getOauthClient, + logIdeConnection, + IdeConnectionEvent, + IdeConnectionType, + FatalConfigError, + uiTelemetryService, +} from '@google/gemini-cli-core'; +import { validateAuthMethod } from './config/auth.js'; +import { setMaxSizedBoxDebugging } from './ui/components/shared/MaxSizedBox.js'; +import { validateNonInteractiveAuth } from './validateNonInterActiveAuth.js'; +import { detectAndEnableKittyProtocol } from './ui/utils/kittyProtocolDetector.js'; +import { checkForUpdates } from './ui/utils/updateCheck.js'; +import { handleAutoUpdate } from './utils/handleAutoUpdate.js'; +import { appEvents, AppEvent } from './utils/events.js'; +import { SettingsContext } from './ui/contexts/SettingsContext.js'; +import { writeFileSync } from 'node:fs'; + +export function validateDnsResolutionOrder( + order: string | undefined, +): DnsResolutionOrder { + const defaultValue: DnsResolutionOrder = 'ipv4first'; + if (order === undefined) { + return defaultValue; + } + if (order === 'ipv4first' || order === 'verbatim') { + return order; + } + // We don't want to throw here, just warn and use the default. + console.warn( + `Invalid value for dnsResolutionOrder in settings: "${order}". Using default "${defaultValue}".`, + ); + return defaultValue; +} + +function getNodeMemoryArgs(config: Config): string[] { + const totalMemoryMB = os.totalmem() / (1024 * 1024); + const heapStats = v8.getHeapStatistics(); + const currentMaxOldSpaceSizeMb = Math.floor( + heapStats.heap_size_limit / 1024 / 1024, + ); + + // Set target to 50% of total memory + const targetMaxOldSpaceSizeInMB = Math.floor(totalMemoryMB * 0.5); + if (config.getDebugMode()) { + console.debug( + `Current heap size ${currentMaxOldSpaceSizeMb.toFixed(2)} MB`, + ); + } + + if (process.env['GEMINI_CLI_NO_RELAUNCH']) { + return []; + } + + if (targetMaxOldSpaceSizeInMB > currentMaxOldSpaceSizeMb) { + if (config.getDebugMode()) { + console.debug( + `Need to relaunch with more memory: ${targetMaxOldSpaceSizeInMB.toFixed(2)} MB`, + ); + } + return [`--max-old-space-size=${targetMaxOldSpaceSizeInMB}`]; + } + + return []; +} + +async function relaunchWithAdditionalArgs(additionalArgs: string[]) { + const nodeArgs = [...additionalArgs, ...process.argv.slice(1)]; + const newEnv = { ...process.env, GEMINI_CLI_NO_RELAUNCH: 'true' }; + + const child = spawn(process.execPath, nodeArgs, { + stdio: 'inherit', + env: newEnv, + }); + + await new Promise((resolve) => child.on('close', resolve)); + process.exit(0); +} + +const InitializingComponent = ({ initialTotal }: { initialTotal: number }) => { + const [total, setTotal] = useState(initialTotal); + const [connected, setConnected] = useState(0); + + useEffect(() => { + const onStart = ({ count }: { count: number }) => setTotal(count); + const onChange = () => { + setConnected((val) => val + 1); + }; + + appEvents.on('mcp-servers-discovery-start', onStart); + appEvents.on('mcp-server-connected', onChange); + appEvents.on('mcp-server-error', onChange); + + return () => { + appEvents.off('mcp-servers-discovery-start', onStart); + appEvents.off('mcp-server-connected', onChange); + appEvents.off('mcp-server-error', onChange); + }; + }, []); + + const message = `Connecting to MCP servers... (${connected}/${total})`; + + return ( + + + {message} + + + ); +}; + +import { runZedIntegration } from './zed-integration/zedIntegration.js'; + +export function setupUnhandledRejectionHandler() { + let unhandledRejectionOccurred = false; + process.on('unhandledRejection', (reason, _promise) => { + const errorMessage = `========================================= +This is an unexpected error. Please file a bug report using the /bug tool. +CRITICAL: Unhandled Promise Rejection! +========================================= +Reason: ${reason}${ + reason instanceof Error && reason.stack + ? ` +Stack trace: +${reason.stack}` + : '' + }`; + appEvents.emit(AppEvent.LogError, errorMessage); + if (!unhandledRejectionOccurred) { + unhandledRejectionOccurred = true; + appEvents.emit(AppEvent.OpenDebugConsole); + } + }); +} + +export async function startInteractiveUI( + config: Config, + settings: LoadedSettings, + startupWarnings: string[], + workspaceRoot: string, +) { + const version = await getCliVersion(); + // Detect and enable Kitty keyboard protocol once at startup + await detectAndEnableKittyProtocol(); + setWindowTitle(basename(workspaceRoot), settings); + const instance = render( + + + + + , + { exitOnCtrlC: false, isScreenReaderEnabled: config.getScreenReader() }, + ); + + checkForUpdates() + .then((info) => { + handleAutoUpdate(info, settings, config.getProjectRoot()); + }) + .catch((err) => { + // Silently ignore update check errors. + if (config.getDebugMode()) { + console.error('Update check failed:', err); + } + }); + + registerCleanup(() => instance.unmount()); +} + +export async function main() { + setupUnhandledRejectionHandler(); + const workspaceRoot = process.cwd(); + const settings = loadSettings(workspaceRoot); + + await cleanupCheckpoints(); + if (settings.errors.length > 0) { + const errorMessages = settings.errors.map( + (error) => `Error in ${error.path}: ${error.message}`, + ); + throw new FatalConfigError( + `${errorMessages.join('\n')}\nPlease fix the configuration file(s) and try again.`, + ); + } + + const argv = await parseArguments(settings.merged); + const extensions = loadExtensions(workspaceRoot); + const config = await loadCliConfig( + settings.merged, + extensions, + sessionId, + argv, + ); + + if (argv.sessionSummary) { + registerCleanup(() => { + const metrics = uiTelemetryService.getMetrics(); + writeFileSync( + argv.sessionSummary!, + JSON.stringify({ sessionMetrics: metrics }, null, 2), + ); + }); + } + + const consolePatcher = new ConsolePatcher({ + stderr: true, + debugMode: config.getDebugMode(), + }); + consolePatcher.patch(); + registerCleanup(consolePatcher.cleanup); + + dns.setDefaultResultOrder( + validateDnsResolutionOrder(settings.merged.advanced?.dnsResolutionOrder), + ); + + if (argv.promptInteractive && !process.stdin.isTTY) { + console.error( + 'Error: The --prompt-interactive flag is not supported when piping input from stdin.', + ); + process.exit(1); + } + + if (config.getListExtensions()) { + console.log('Installed extensions:'); + for (const extension of extensions) { + console.log(`- ${extension.config.name}`); + } + process.exit(0); + } + + // Set a default auth type if one isn't set. + if (!settings.merged.security?.auth?.selectedType) { + if (process.env['CLOUD_SHELL'] === 'true') { + settings.setValue( + SettingScope.User, + 'selectedAuthType', + AuthType.CLOUD_SHELL, + ); + } + } + + setMaxSizedBoxDebugging(config.getDebugMode()); + + const mcpServers = config.getMcpServers(); + const mcpServersCount = mcpServers ? Object.keys(mcpServers).length : 0; + + let spinnerInstance; + if (config.isInteractive() && mcpServersCount > 0) { + spinnerInstance = render( + , + ); + } + + await config.initialize(); + + if (spinnerInstance) { + // Small UX detail to show the completion message for a bit before unmounting. + await new Promise((f) => setTimeout(f, 100)); + spinnerInstance.clear(); + spinnerInstance.unmount(); + } + + if (config.getIdeMode()) { + await config.getIdeClient().connect(); + logIdeConnection(config, new IdeConnectionEvent(IdeConnectionType.START)); + } + + // Load custom themes from settings + themeManager.loadCustomThemes(settings.merged.ui?.customThemes); + + if (settings.merged.ui?.theme) { + if (!themeManager.setActiveTheme(settings.merged.ui?.theme)) { + // If the theme is not found during initial load, log a warning and continue. + // The useThemeCommand hook in App.tsx will handle opening the dialog. + console.warn(`Warning: Theme "${settings.merged.ui?.theme}" not found.`); + } + } + + // hop into sandbox if we are outside and sandboxing is enabled + if (!process.env['SANDBOX']) { + const memoryArgs = settings.merged.advanced?.autoConfigureMemory + ? getNodeMemoryArgs(config) + : []; + const sandboxConfig = config.getSandbox(); + if (sandboxConfig) { + if ( + settings.merged.security?.auth?.selectedType && + !settings.merged.security?.auth?.useExternal + ) { + // Validate authentication here because the sandbox will interfere with the Oauth2 web redirect. + try { + const err = validateAuthMethod( + settings.merged.security.auth.selectedType, + ); + if (err) { + throw new Error(err); + } + await config.refreshAuth(settings.merged.security.auth.selectedType); + } catch (err) { + console.error('Error authenticating:', err); + process.exit(1); + } + } + let stdinData = ''; + if (!process.stdin.isTTY) { + stdinData = await readStdin(); + } + + // This function is a copy of the one from sandbox.ts + // It is moved here to decouple sandbox.ts from the CLI's argument structure. + const injectStdinIntoArgs = ( + args: string[], + stdinData?: string, + ): string[] => { + const finalArgs = [...args]; + if (stdinData) { + const promptIndex = finalArgs.findIndex( + (arg) => arg === '--prompt' || arg === '-p', + ); + if (promptIndex > -1 && finalArgs.length > promptIndex + 1) { + // If there's a prompt argument, prepend stdin to it + finalArgs[promptIndex + 1] = + `${stdinData}\n\n${finalArgs[promptIndex + 1]}`; + } else { + // If there's no prompt argument, add stdin as the prompt + finalArgs.push('--prompt', stdinData); + } + } + return finalArgs; + }; + + const sandboxArgs = injectStdinIntoArgs(process.argv, stdinData); + + await start_sandbox(sandboxConfig, memoryArgs, config, sandboxArgs); + process.exit(0); + } else { + // Not in a sandbox and not entering one, so relaunch with additional + // arguments to control memory usage if needed. + if (memoryArgs.length > 0) { + await relaunchWithAdditionalArgs(memoryArgs); + process.exit(0); + } + } + } + + if ( + settings.merged.security?.auth?.selectedType === + AuthType.LOGIN_WITH_GOOGLE && + config.isBrowserLaunchSuppressed() + ) { + // Do oauth before app renders to make copying the link possible. + await getOauthClient(settings.merged.security.auth.selectedType, config); + } + + if (config.getExperimentalZedIntegration()) { + return runZedIntegration(config, settings, extensions, argv); + } + + let input = config.getQuestion(); + const startupWarnings = [ + ...(await getStartupWarnings()), + ...(await getUserStartupWarnings(workspaceRoot)), + ]; + + // Render UI, passing necessary config values. Check that there is no command line question. + if (config.isInteractive()) { + await startInteractiveUI(config, settings, startupWarnings, workspaceRoot); + return; + } + // If not a TTY, read from stdin + // This is for cases where the user pipes input directly into the command + if (!process.stdin.isTTY) { + const stdinData = await readStdin(); + if (stdinData) { + input = `${stdinData}\n\n${input}`; + } + } + if (!input) { + console.error( + `No input provided via stdin. Input can be provided by piping data into gemini or using the --prompt option.`, + ); + process.exit(1); + } + + const prompt_id = Math.random().toString(16).slice(2); + logUserPrompt(config, { + 'event.name': 'user_prompt', + 'event.timestamp': new Date().toISOString(), + prompt: input, + prompt_id, + auth_type: config.getContentGeneratorConfig()?.authType, + prompt_length: input.length, + }); + + const nonInteractiveConfig = await validateNonInteractiveAuth( + settings.merged.security?.auth?.selectedType, + settings.merged.security?.auth?.useExternal, + config, + ); + + if (config.getDebugMode()) { + console.log('Session ID: %s', sessionId); + } + + await runNonInteractive(nonInteractiveConfig, input, prompt_id); + // Call cleanup before process.exit, which causes cleanup to not run + await runExitCleanup(); + process.exit(0); +} + +function setWindowTitle(title: string, settings: LoadedSettings) { + if (!settings.merged.ui?.hideWindowTitle) { + const windowTitle = ( + process.env['CLI_TITLE'] || `Gemini - ${title}` + ).replace( + // eslint-disable-next-line no-control-regex + /[\x00-\x1F\x7F]/g, + '', + ); + process.stdout.write(`\x1b]2;${windowTitle}\x07`); + + process.on('exit', () => { + process.stdout.write(`\x1b]2;\x07`); + }); + } +} diff --git a/projects/gemini-cli/packages/cli/src/nonInteractiveCli.test.ts b/projects/gemini-cli/packages/cli/src/nonInteractiveCli.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..7de83930829fd465727101a86453beaae8fa5e2d --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/nonInteractiveCli.test.ts @@ -0,0 +1,325 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { + Config, + ToolRegistry, + ServerGeminiStreamEvent, +} from '@google/gemini-cli-core'; +import { + executeToolCall, + ToolErrorType, + shutdownTelemetry, + GeminiEventType, +} from '@google/gemini-cli-core'; +import type { Part } from '@google/genai'; +import { runNonInteractive } from './nonInteractiveCli.js'; +import { vi } from 'vitest'; + +// Mock core modules +vi.mock('./ui/hooks/atCommandProcessor.js'); +vi.mock('@google/gemini-cli-core', async (importOriginal) => { + const original = + await importOriginal(); + return { + ...original, + executeToolCall: vi.fn(), + shutdownTelemetry: vi.fn(), + isTelemetrySdkInitialized: vi.fn().mockReturnValue(true), + }; +}); + +describe('runNonInteractive', () => { + let mockConfig: Config; + let mockToolRegistry: ToolRegistry; + let mockCoreExecuteToolCall: vi.Mock; + let mockShutdownTelemetry: vi.Mock; + let consoleErrorSpy: vi.SpyInstance; + let processStdoutSpy: vi.SpyInstance; + let mockGeminiClient: { + sendMessageStream: vi.Mock; + }; + + beforeEach(async () => { + mockCoreExecuteToolCall = vi.mocked(executeToolCall); + mockShutdownTelemetry = vi.mocked(shutdownTelemetry); + + consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + processStdoutSpy = vi + .spyOn(process.stdout, 'write') + .mockImplementation(() => true); + + mockToolRegistry = { + getTool: vi.fn(), + getFunctionDeclarations: vi.fn().mockReturnValue([]), + } as unknown as ToolRegistry; + + mockGeminiClient = { + sendMessageStream: vi.fn(), + }; + + mockConfig = { + initialize: vi.fn().mockResolvedValue(undefined), + getGeminiClient: vi.fn().mockReturnValue(mockGeminiClient), + getToolRegistry: vi.fn().mockReturnValue(mockToolRegistry), + getMaxSessionTurns: vi.fn().mockReturnValue(10), + getIdeMode: vi.fn().mockReturnValue(false), + getFullContext: vi.fn().mockReturnValue(false), + getContentGeneratorConfig: vi.fn().mockReturnValue({}), + getDebugMode: vi.fn().mockReturnValue(false), + } as unknown as Config; + + const { handleAtCommand } = await import( + './ui/hooks/atCommandProcessor.js' + ); + vi.mocked(handleAtCommand).mockImplementation(async ({ query }) => ({ + processedQuery: [{ text: query }], + shouldProceed: true, + })); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + async function* createStreamFromEvents( + events: ServerGeminiStreamEvent[], + ): AsyncGenerator { + for (const event of events) { + yield event; + } + } + + it('should process input and write text output', async () => { + const events: ServerGeminiStreamEvent[] = [ + { type: GeminiEventType.Content, value: 'Hello' }, + { type: GeminiEventType.Content, value: ' World' }, + ]; + mockGeminiClient.sendMessageStream.mockReturnValue( + createStreamFromEvents(events), + ); + + await runNonInteractive(mockConfig, 'Test input', 'prompt-id-1'); + + expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledWith( + [{ text: 'Test input' }], + expect.any(AbortSignal), + 'prompt-id-1', + ); + expect(processStdoutSpy).toHaveBeenCalledWith('Hello'); + expect(processStdoutSpy).toHaveBeenCalledWith(' World'); + expect(processStdoutSpy).toHaveBeenCalledWith('\n'); + expect(mockShutdownTelemetry).toHaveBeenCalled(); + }); + + it('should handle a single tool call and respond', async () => { + const toolCallEvent: ServerGeminiStreamEvent = { + type: GeminiEventType.ToolCallRequest, + value: { + callId: 'tool-1', + name: 'testTool', + args: { arg1: 'value1' }, + isClientInitiated: false, + prompt_id: 'prompt-id-2', + }, + }; + const toolResponse: Part[] = [{ text: 'Tool response' }]; + mockCoreExecuteToolCall.mockResolvedValue({ responseParts: toolResponse }); + + const firstCallEvents: ServerGeminiStreamEvent[] = [toolCallEvent]; + const secondCallEvents: ServerGeminiStreamEvent[] = [ + { type: GeminiEventType.Content, value: 'Final answer' }, + ]; + + mockGeminiClient.sendMessageStream + .mockReturnValueOnce(createStreamFromEvents(firstCallEvents)) + .mockReturnValueOnce(createStreamFromEvents(secondCallEvents)); + + await runNonInteractive(mockConfig, 'Use a tool', 'prompt-id-2'); + + expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledTimes(2); + expect(mockCoreExecuteToolCall).toHaveBeenCalledWith( + mockConfig, + expect.objectContaining({ name: 'testTool' }), + expect.any(AbortSignal), + ); + expect(mockGeminiClient.sendMessageStream).toHaveBeenNthCalledWith( + 2, + [{ text: 'Tool response' }], + expect.any(AbortSignal), + 'prompt-id-2', + ); + expect(processStdoutSpy).toHaveBeenCalledWith('Final answer'); + expect(processStdoutSpy).toHaveBeenCalledWith('\n'); + }); + + it('should handle error during tool execution and should send error back to the model', async () => { + const toolCallEvent: ServerGeminiStreamEvent = { + type: GeminiEventType.ToolCallRequest, + value: { + callId: 'tool-1', + name: 'errorTool', + args: {}, + isClientInitiated: false, + prompt_id: 'prompt-id-3', + }, + }; + mockCoreExecuteToolCall.mockResolvedValue({ + error: new Error('Execution failed'), + errorType: ToolErrorType.EXECUTION_FAILED, + responseParts: [ + { + functionResponse: { + name: 'errorTool', + response: { + output: 'Error: Execution failed', + }, + }, + }, + ], + resultDisplay: 'Execution failed', + }); + const finalResponse: ServerGeminiStreamEvent[] = [ + { + type: GeminiEventType.Content, + value: 'Sorry, let me try again.', + }, + ]; + mockGeminiClient.sendMessageStream + .mockReturnValueOnce(createStreamFromEvents([toolCallEvent])) + .mockReturnValueOnce(createStreamFromEvents(finalResponse)); + + await runNonInteractive(mockConfig, 'Trigger tool error', 'prompt-id-3'); + + expect(mockCoreExecuteToolCall).toHaveBeenCalled(); + expect(consoleErrorSpy).toHaveBeenCalledWith( + 'Error executing tool errorTool: Execution failed', + ); + expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledTimes(2); + expect(mockGeminiClient.sendMessageStream).toHaveBeenNthCalledWith( + 2, + [ + { + functionResponse: { + name: 'errorTool', + response: { + output: 'Error: Execution failed', + }, + }, + }, + ], + expect.any(AbortSignal), + 'prompt-id-3', + ); + expect(processStdoutSpy).toHaveBeenCalledWith('Sorry, let me try again.'); + }); + + it('should exit with error if sendMessageStream throws initially', async () => { + const apiError = new Error('API connection failed'); + mockGeminiClient.sendMessageStream.mockImplementation(() => { + throw apiError; + }); + + await expect( + runNonInteractive(mockConfig, 'Initial fail', 'prompt-id-4'), + ).rejects.toThrow(apiError); + }); + + it('should not exit if a tool is not found, and should send error back to model', async () => { + const toolCallEvent: ServerGeminiStreamEvent = { + type: GeminiEventType.ToolCallRequest, + value: { + callId: 'tool-1', + name: 'nonexistentTool', + args: {}, + isClientInitiated: false, + prompt_id: 'prompt-id-5', + }, + }; + mockCoreExecuteToolCall.mockResolvedValue({ + error: new Error('Tool "nonexistentTool" not found in registry.'), + resultDisplay: 'Tool "nonexistentTool" not found in registry.', + }); + const finalResponse: ServerGeminiStreamEvent[] = [ + { + type: GeminiEventType.Content, + value: "Sorry, I can't find that tool.", + }, + ]; + + mockGeminiClient.sendMessageStream + .mockReturnValueOnce(createStreamFromEvents([toolCallEvent])) + .mockReturnValueOnce(createStreamFromEvents(finalResponse)); + + await runNonInteractive( + mockConfig, + 'Trigger tool not found', + 'prompt-id-5', + ); + + expect(mockCoreExecuteToolCall).toHaveBeenCalled(); + expect(consoleErrorSpy).toHaveBeenCalledWith( + 'Error executing tool nonexistentTool: Tool "nonexistentTool" not found in registry.', + ); + expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledTimes(2); + expect(processStdoutSpy).toHaveBeenCalledWith( + "Sorry, I can't find that tool.", + ); + }); + + it('should exit when max session turns are exceeded', async () => { + vi.mocked(mockConfig.getMaxSessionTurns).mockReturnValue(0); + await expect( + runNonInteractive(mockConfig, 'Trigger loop', 'prompt-id-6'), + ).rejects.toThrow( + 'Reached max session turns for this session. Increase the number of turns by specifying maxSessionTurns in settings.json.', + ); + }); + + it('should preprocess @include commands before sending to the model', async () => { + // 1. Mock the imported atCommandProcessor + const { handleAtCommand } = await import( + './ui/hooks/atCommandProcessor.js' + ); + const mockHandleAtCommand = vi.mocked(handleAtCommand); + + // 2. Define the raw input and the expected processed output + const rawInput = 'Summarize @file.txt'; + const processedParts: Part[] = [ + { text: 'Summarize @file.txt' }, + { text: '\n--- Content from referenced files ---\n' }, + { text: 'This is the content of the file.' }, + { text: '\n--- End of content ---' }, + ]; + + // 3. Setup the mock to return the processed parts + mockHandleAtCommand.mockResolvedValue({ + processedQuery: processedParts, + shouldProceed: true, + }); + + // Mock a simple stream response from the Gemini client + const events: ServerGeminiStreamEvent[] = [ + { type: GeminiEventType.Content, value: 'Summary complete.' }, + ]; + mockGeminiClient.sendMessageStream.mockReturnValue( + createStreamFromEvents(events), + ); + + // 4. Run the non-interactive mode with the raw input + await runNonInteractive(mockConfig, rawInput, 'prompt-id-7'); + + // 5. Assert that sendMessageStream was called with the PROCESSED parts, not the raw input + expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledWith( + processedParts, + expect.any(AbortSignal), + 'prompt-id-7', + ); + + // 6. Assert the final output is correct + expect(processStdoutSpy).toHaveBeenCalledWith('Summary complete.'); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/nonInteractiveCli.ts b/projects/gemini-cli/packages/cli/src/nonInteractiveCli.ts new file mode 100644 index 0000000000000000000000000000000000000000..73e8ae2371139e4753dca925e33487a76e501a56 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/nonInteractiveCli.ts @@ -0,0 +1,138 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { Config, ToolCallRequestInfo } from '@google/gemini-cli-core'; +import { + executeToolCall, + shutdownTelemetry, + isTelemetrySdkInitialized, + GeminiEventType, + parseAndFormatApiError, + FatalInputError, + FatalTurnLimitedError, +} from '@google/gemini-cli-core'; +import type { Content, Part } from '@google/genai'; + +import { ConsolePatcher } from './ui/utils/ConsolePatcher.js'; +import { handleAtCommand } from './ui/hooks/atCommandProcessor.js'; + +export async function runNonInteractive( + config: Config, + input: string, + prompt_id: string, +): Promise { + const consolePatcher = new ConsolePatcher({ + stderr: true, + debugMode: config.getDebugMode(), + }); + + try { + consolePatcher.patch(); + // Handle EPIPE errors when the output is piped to a command that closes early. + process.stdout.on('error', (err: NodeJS.ErrnoException) => { + if (err.code === 'EPIPE') { + // Exit gracefully if the pipe is closed. + process.exit(0); + } + }); + + const geminiClient = config.getGeminiClient(); + + const abortController = new AbortController(); + + const { processedQuery, shouldProceed } = await handleAtCommand({ + query: input, + config, + addItem: (_item, _timestamp) => 0, + onDebugMessage: () => {}, + messageId: Date.now(), + signal: abortController.signal, + }); + + if (!shouldProceed || !processedQuery) { + // An error occurred during @include processing (e.g., file not found). + // The error message is already logged by handleAtCommand. + throw new FatalInputError( + 'Exiting due to an error processing the @ command.', + ); + } + + let currentMessages: Content[] = [ + { role: 'user', parts: processedQuery as Part[] }, + ]; + + let turnCount = 0; + while (true) { + turnCount++; + if ( + config.getMaxSessionTurns() >= 0 && + turnCount > config.getMaxSessionTurns() + ) { + throw new FatalTurnLimitedError( + 'Reached max session turns for this session. Increase the number of turns by specifying maxSessionTurns in settings.json.', + ); + } + const toolCallRequests: ToolCallRequestInfo[] = []; + + const responseStream = geminiClient.sendMessageStream( + currentMessages[0]?.parts || [], + abortController.signal, + prompt_id, + ); + + for await (const event of responseStream) { + if (abortController.signal.aborted) { + console.error('Operation cancelled.'); + return; + } + + if (event.type === GeminiEventType.Content) { + process.stdout.write(event.value); + } else if (event.type === GeminiEventType.ToolCallRequest) { + toolCallRequests.push(event.value); + } + } + + if (toolCallRequests.length > 0) { + const toolResponseParts: Part[] = []; + for (const requestInfo of toolCallRequests) { + const toolResponse = await executeToolCall( + config, + requestInfo, + abortController.signal, + ); + + if (toolResponse.error) { + console.error( + `Error executing tool ${requestInfo.name}: ${toolResponse.resultDisplay || toolResponse.error.message}`, + ); + } + + if (toolResponse.responseParts) { + toolResponseParts.push(...toolResponse.responseParts); + } + } + currentMessages = [{ role: 'user', parts: toolResponseParts }]; + } else { + process.stdout.write('\n'); // Ensure a final newline + return; + } + } + } catch (error) { + console.error( + parseAndFormatApiError( + error, + config.getContentGeneratorConfig()?.authType, + ), + ); + throw error; + } finally { + consolePatcher.cleanup(); + if (isTelemetrySdkInitialized()) { + await shutdownTelemetry(config); + } + } +} diff --git a/projects/gemini-cli/packages/cli/src/patches/is-in-ci.ts b/projects/gemini-cli/packages/cli/src/patches/is-in-ci.ts new file mode 100644 index 0000000000000000000000000000000000000000..a37c8678a6e667f5a0c031da7f18ec8017341782 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/patches/is-in-ci.ts @@ -0,0 +1,17 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// This is a replacement for the `is-in-ci` package that always returns false. +// We are doing this to avoid the issue where `ink` does not render the UI +// when it detects that it is running in a CI environment. +// This is safe because `ink` (and thus `is-in-ci`) is only used in the +// interactive code path of the CLI. +// See issue #1563 for more details. + +const isInCi = false; + +// eslint-disable-next-line import/no-default-export +export default isInCi; diff --git a/projects/gemini-cli/packages/cli/src/services/BuiltinCommandLoader.test.ts b/projects/gemini-cli/packages/cli/src/services/BuiltinCommandLoader.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..0ae31e10aee10667e7c12ca42cf6126deeb1e541 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/BuiltinCommandLoader.test.ts @@ -0,0 +1,127 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +vi.mock('../ui/commands/aboutCommand.js', async () => { + const { CommandKind } = await import('../ui/commands/types.js'); + return { + aboutCommand: { + name: 'about', + description: 'About the CLI', + kind: CommandKind.BUILT_IN, + }, + }; +}); + +vi.mock('../ui/commands/ideCommand.js', () => ({ ideCommand: vi.fn() })); +vi.mock('../ui/commands/restoreCommand.js', () => ({ + restoreCommand: vi.fn(), +})); + +import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest'; +import { BuiltinCommandLoader } from './BuiltinCommandLoader.js'; +import type { Config } from '@google/gemini-cli-core'; +import { CommandKind } from '../ui/commands/types.js'; + +import { ideCommand } from '../ui/commands/ideCommand.js'; +import { restoreCommand } from '../ui/commands/restoreCommand.js'; + +vi.mock('../ui/commands/authCommand.js', () => ({ authCommand: {} })); +vi.mock('../ui/commands/bugCommand.js', () => ({ bugCommand: {} })); +vi.mock('../ui/commands/chatCommand.js', () => ({ chatCommand: {} })); +vi.mock('../ui/commands/clearCommand.js', () => ({ clearCommand: {} })); +vi.mock('../ui/commands/compressCommand.js', () => ({ compressCommand: {} })); +vi.mock('../ui/commands/corgiCommand.js', () => ({ corgiCommand: {} })); +vi.mock('../ui/commands/docsCommand.js', () => ({ docsCommand: {} })); +vi.mock('../ui/commands/editorCommand.js', () => ({ editorCommand: {} })); +vi.mock('../ui/commands/extensionsCommand.js', () => ({ + extensionsCommand: {}, +})); +vi.mock('../ui/commands/helpCommand.js', () => ({ helpCommand: {} })); +vi.mock('../ui/commands/memoryCommand.js', () => ({ memoryCommand: {} })); +vi.mock('../ui/commands/privacyCommand.js', () => ({ privacyCommand: {} })); +vi.mock('../ui/commands/quitCommand.js', () => ({ quitCommand: {} })); +vi.mock('../ui/commands/statsCommand.js', () => ({ statsCommand: {} })); +vi.mock('../ui/commands/themeCommand.js', () => ({ themeCommand: {} })); +vi.mock('../ui/commands/toolsCommand.js', () => ({ toolsCommand: {} })); +vi.mock('../ui/commands/mcpCommand.js', () => ({ + mcpCommand: { + name: 'mcp', + description: 'MCP command', + kind: 'BUILT_IN', + }, +})); + +describe('BuiltinCommandLoader', () => { + let mockConfig: Config; + + const ideCommandMock = ideCommand as Mock; + const restoreCommandMock = restoreCommand as Mock; + + beforeEach(() => { + vi.clearAllMocks(); + mockConfig = { some: 'config' } as unknown as Config; + + ideCommandMock.mockReturnValue({ + name: 'ide', + description: 'IDE command', + kind: CommandKind.BUILT_IN, + }); + restoreCommandMock.mockReturnValue({ + name: 'restore', + description: 'Restore command', + kind: CommandKind.BUILT_IN, + }); + }); + + it('should correctly pass the config object to command factory functions', async () => { + const loader = new BuiltinCommandLoader(mockConfig); + await loader.loadCommands(new AbortController().signal); + + expect(ideCommandMock).toHaveBeenCalledTimes(1); + expect(ideCommandMock).toHaveBeenCalledWith(mockConfig); + expect(restoreCommandMock).toHaveBeenCalledTimes(1); + expect(restoreCommandMock).toHaveBeenCalledWith(mockConfig); + }); + + it('should filter out null command definitions returned by factories', async () => { + // Override the mock's behavior for this specific test. + ideCommandMock.mockReturnValue(null); + const loader = new BuiltinCommandLoader(mockConfig); + const commands = await loader.loadCommands(new AbortController().signal); + + // The 'ide' command should be filtered out. + const ideCmd = commands.find((c) => c.name === 'ide'); + expect(ideCmd).toBeUndefined(); + + // Other commands should still be present. + const aboutCmd = commands.find((c) => c.name === 'about'); + expect(aboutCmd).toBeDefined(); + }); + + it('should handle a null config gracefully when calling factories', async () => { + const loader = new BuiltinCommandLoader(null); + await loader.loadCommands(new AbortController().signal); + expect(ideCommandMock).toHaveBeenCalledTimes(1); + expect(ideCommandMock).toHaveBeenCalledWith(null); + expect(restoreCommandMock).toHaveBeenCalledTimes(1); + expect(restoreCommandMock).toHaveBeenCalledWith(null); + }); + + it('should return a list of all loaded commands', async () => { + const loader = new BuiltinCommandLoader(mockConfig); + const commands = await loader.loadCommands(new AbortController().signal); + + const aboutCmd = commands.find((c) => c.name === 'about'); + expect(aboutCmd).toBeDefined(); + expect(aboutCmd?.kind).toBe(CommandKind.BUILT_IN); + + const ideCmd = commands.find((c) => c.name === 'ide'); + expect(ideCmd).toBeDefined(); + + const mcpCmd = commands.find((c) => c.name === 'mcp'); + expect(mcpCmd).toBeDefined(); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/services/BuiltinCommandLoader.ts b/projects/gemini-cli/packages/cli/src/services/BuiltinCommandLoader.ts new file mode 100644 index 0000000000000000000000000000000000000000..9dafcd205bc67ad23c87c2c19c09da3e2f98a9b0 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/BuiltinCommandLoader.ts @@ -0,0 +1,85 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { ICommandLoader } from './types.js'; +import type { SlashCommand } from '../ui/commands/types.js'; +import type { Config } from '@google/gemini-cli-core'; +import { aboutCommand } from '../ui/commands/aboutCommand.js'; +import { authCommand } from '../ui/commands/authCommand.js'; +import { bugCommand } from '../ui/commands/bugCommand.js'; +import { chatCommand } from '../ui/commands/chatCommand.js'; +import { clearCommand } from '../ui/commands/clearCommand.js'; +import { compressCommand } from '../ui/commands/compressCommand.js'; +import { copyCommand } from '../ui/commands/copyCommand.js'; +import { corgiCommand } from '../ui/commands/corgiCommand.js'; +import { docsCommand } from '../ui/commands/docsCommand.js'; +import { directoryCommand } from '../ui/commands/directoryCommand.js'; +import { editorCommand } from '../ui/commands/editorCommand.js'; +import { extensionsCommand } from '../ui/commands/extensionsCommand.js'; +import { helpCommand } from '../ui/commands/helpCommand.js'; +import { ideCommand } from '../ui/commands/ideCommand.js'; +import { initCommand } from '../ui/commands/initCommand.js'; +import { mcpCommand } from '../ui/commands/mcpCommand.js'; +import { memoryCommand } from '../ui/commands/memoryCommand.js'; +import { privacyCommand } from '../ui/commands/privacyCommand.js'; +import { quitCommand } from '../ui/commands/quitCommand.js'; +import { restoreCommand } from '../ui/commands/restoreCommand.js'; +import { statsCommand } from '../ui/commands/statsCommand.js'; +import { themeCommand } from '../ui/commands/themeCommand.js'; +import { toolsCommand } from '../ui/commands/toolsCommand.js'; +import { settingsCommand } from '../ui/commands/settingsCommand.js'; +import { vimCommand } from '../ui/commands/vimCommand.js'; +import { setupGithubCommand } from '../ui/commands/setupGithubCommand.js'; +import { terminalSetupCommand } from '../ui/commands/terminalSetupCommand.js'; + +/** + * Loads the core, hard-coded slash commands that are an integral part + * of the Gemini CLI application. + */ +export class BuiltinCommandLoader implements ICommandLoader { + constructor(private config: Config | null) {} + + /** + * Gathers all raw built-in command definitions, injects dependencies where + * needed (e.g., config) and filters out any that are not available. + * + * @param _signal An AbortSignal (unused for this synchronous loader). + * @returns A promise that resolves to an array of `SlashCommand` objects. + */ + async loadCommands(_signal: AbortSignal): Promise { + const allDefinitions: Array = [ + aboutCommand, + authCommand, + bugCommand, + chatCommand, + clearCommand, + compressCommand, + copyCommand, + corgiCommand, + docsCommand, + directoryCommand, + editorCommand, + extensionsCommand, + helpCommand, + ideCommand(this.config), + initCommand, + mcpCommand, + memoryCommand, + privacyCommand, + quitCommand, + restoreCommand(this.config), + statsCommand, + themeCommand, + toolsCommand, + settingsCommand, + vimCommand, + setupGithubCommand, + terminalSetupCommand, + ]; + + return allDefinitions.filter((cmd): cmd is SlashCommand => cmd !== null); + } +} diff --git a/projects/gemini-cli/packages/cli/src/services/CommandService.test.ts b/projects/gemini-cli/packages/cli/src/services/CommandService.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..e2d5b9f585d6132a2a19a0b7478fb799a4a01112 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/CommandService.test.ts @@ -0,0 +1,352 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { CommandService } from './CommandService.js'; +import { type ICommandLoader } from './types.js'; +import { CommandKind, type SlashCommand } from '../ui/commands/types.js'; + +const createMockCommand = (name: string, kind: CommandKind): SlashCommand => ({ + name, + description: `Description for ${name}`, + kind, + action: vi.fn(), +}); + +const mockCommandA = createMockCommand('command-a', CommandKind.BUILT_IN); +const mockCommandB = createMockCommand('command-b', CommandKind.BUILT_IN); +const mockCommandC = createMockCommand('command-c', CommandKind.FILE); +const mockCommandB_Override = createMockCommand('command-b', CommandKind.FILE); + +class MockCommandLoader implements ICommandLoader { + private commandsToLoad: SlashCommand[]; + + constructor(commandsToLoad: SlashCommand[]) { + this.commandsToLoad = commandsToLoad; + } + + loadCommands = vi.fn( + async (): Promise => Promise.resolve(this.commandsToLoad), + ); +} + +describe('CommandService', () => { + beforeEach(() => { + vi.spyOn(console, 'debug').mockImplementation(() => {}); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should load commands from a single loader', async () => { + const mockLoader = new MockCommandLoader([mockCommandA, mockCommandB]); + const service = await CommandService.create( + [mockLoader], + new AbortController().signal, + ); + + const commands = service.getCommands(); + + expect(mockLoader.loadCommands).toHaveBeenCalledTimes(1); + expect(commands).toHaveLength(2); + expect(commands).toEqual( + expect.arrayContaining([mockCommandA, mockCommandB]), + ); + }); + + it('should aggregate commands from multiple loaders', async () => { + const loader1 = new MockCommandLoader([mockCommandA]); + const loader2 = new MockCommandLoader([mockCommandC]); + const service = await CommandService.create( + [loader1, loader2], + new AbortController().signal, + ); + + const commands = service.getCommands(); + + expect(loader1.loadCommands).toHaveBeenCalledTimes(1); + expect(loader2.loadCommands).toHaveBeenCalledTimes(1); + expect(commands).toHaveLength(2); + expect(commands).toEqual( + expect.arrayContaining([mockCommandA, mockCommandC]), + ); + }); + + it('should override commands from earlier loaders with those from later loaders', async () => { + const loader1 = new MockCommandLoader([mockCommandA, mockCommandB]); + const loader2 = new MockCommandLoader([ + mockCommandB_Override, + mockCommandC, + ]); + const service = await CommandService.create( + [loader1, loader2], + new AbortController().signal, + ); + + const commands = service.getCommands(); + + expect(commands).toHaveLength(3); // Should be A, C, and the overridden B. + + // The final list should contain the override from the *last* loader. + const commandB = commands.find((cmd) => cmd.name === 'command-b'); + expect(commandB).toBeDefined(); + expect(commandB?.kind).toBe(CommandKind.FILE); // Verify it's the overridden version. + expect(commandB).toEqual(mockCommandB_Override); + + // Ensure the other commands are still present. + expect(commands).toEqual( + expect.arrayContaining([ + mockCommandA, + mockCommandC, + mockCommandB_Override, + ]), + ); + }); + + it('should handle loaders that return an empty array of commands gracefully', async () => { + const loader1 = new MockCommandLoader([mockCommandA]); + const emptyLoader = new MockCommandLoader([]); + const loader3 = new MockCommandLoader([mockCommandB]); + const service = await CommandService.create( + [loader1, emptyLoader, loader3], + new AbortController().signal, + ); + + const commands = service.getCommands(); + + expect(emptyLoader.loadCommands).toHaveBeenCalledTimes(1); + expect(commands).toHaveLength(2); + expect(commands).toEqual( + expect.arrayContaining([mockCommandA, mockCommandB]), + ); + }); + + it('should load commands from successful loaders even if one fails', async () => { + const successfulLoader = new MockCommandLoader([mockCommandA]); + const failingLoader = new MockCommandLoader([]); + const error = new Error('Loader failed'); + vi.spyOn(failingLoader, 'loadCommands').mockRejectedValue(error); + + const service = await CommandService.create( + [successfulLoader, failingLoader], + new AbortController().signal, + ); + + const commands = service.getCommands(); + expect(commands).toHaveLength(1); + expect(commands).toEqual([mockCommandA]); + expect(console.debug).toHaveBeenCalledWith( + 'A command loader failed:', + error, + ); + }); + + it('getCommands should return a readonly array that cannot be mutated', async () => { + const service = await CommandService.create( + [new MockCommandLoader([mockCommandA])], + new AbortController().signal, + ); + + const commands = service.getCommands(); + + // Expect it to throw a TypeError at runtime because the array is frozen. + expect(() => { + // @ts-expect-error - Testing immutability is intentional here. + commands.push(mockCommandB); + }).toThrow(); + + // Verify the original array was not mutated. + expect(service.getCommands()).toHaveLength(1); + }); + + it('should pass the abort signal to all loaders', async () => { + const controller = new AbortController(); + const signal = controller.signal; + + const loader1 = new MockCommandLoader([mockCommandA]); + const loader2 = new MockCommandLoader([mockCommandB]); + + await CommandService.create([loader1, loader2], signal); + + expect(loader1.loadCommands).toHaveBeenCalledTimes(1); + expect(loader1.loadCommands).toHaveBeenCalledWith(signal); + expect(loader2.loadCommands).toHaveBeenCalledTimes(1); + expect(loader2.loadCommands).toHaveBeenCalledWith(signal); + }); + + it('should rename extension commands when they conflict', async () => { + const builtinCommand = createMockCommand('deploy', CommandKind.BUILT_IN); + const userCommand = createMockCommand('sync', CommandKind.FILE); + const extensionCommand1 = { + ...createMockCommand('deploy', CommandKind.FILE), + extensionName: 'firebase', + description: '[firebase] Deploy to Firebase', + }; + const extensionCommand2 = { + ...createMockCommand('sync', CommandKind.FILE), + extensionName: 'git-helper', + description: '[git-helper] Sync with remote', + }; + + const mockLoader1 = new MockCommandLoader([builtinCommand]); + const mockLoader2 = new MockCommandLoader([ + userCommand, + extensionCommand1, + extensionCommand2, + ]); + + const service = await CommandService.create( + [mockLoader1, mockLoader2], + new AbortController().signal, + ); + + const commands = service.getCommands(); + expect(commands).toHaveLength(4); + + // Built-in command keeps original name + const deployBuiltin = commands.find( + (cmd) => cmd.name === 'deploy' && !cmd.extensionName, + ); + expect(deployBuiltin).toBeDefined(); + expect(deployBuiltin?.kind).toBe(CommandKind.BUILT_IN); + + // Extension command conflicting with built-in gets renamed + const deployExtension = commands.find( + (cmd) => cmd.name === 'firebase.deploy', + ); + expect(deployExtension).toBeDefined(); + expect(deployExtension?.extensionName).toBe('firebase'); + + // User command keeps original name + const syncUser = commands.find( + (cmd) => cmd.name === 'sync' && !cmd.extensionName, + ); + expect(syncUser).toBeDefined(); + expect(syncUser?.kind).toBe(CommandKind.FILE); + + // Extension command conflicting with user command gets renamed + const syncExtension = commands.find( + (cmd) => cmd.name === 'git-helper.sync', + ); + expect(syncExtension).toBeDefined(); + expect(syncExtension?.extensionName).toBe('git-helper'); + }); + + it('should handle user/project command override correctly', async () => { + const builtinCommand = createMockCommand('help', CommandKind.BUILT_IN); + const userCommand = createMockCommand('help', CommandKind.FILE); + const projectCommand = createMockCommand('deploy', CommandKind.FILE); + const userDeployCommand = createMockCommand('deploy', CommandKind.FILE); + + const mockLoader1 = new MockCommandLoader([builtinCommand]); + const mockLoader2 = new MockCommandLoader([ + userCommand, + userDeployCommand, + projectCommand, + ]); + + const service = await CommandService.create( + [mockLoader1, mockLoader2], + new AbortController().signal, + ); + + const commands = service.getCommands(); + expect(commands).toHaveLength(2); + + // User command overrides built-in + const helpCommand = commands.find((cmd) => cmd.name === 'help'); + expect(helpCommand).toBeDefined(); + expect(helpCommand?.kind).toBe(CommandKind.FILE); + + // Project command overrides user command (last wins) + const deployCommand = commands.find((cmd) => cmd.name === 'deploy'); + expect(deployCommand).toBeDefined(); + expect(deployCommand?.kind).toBe(CommandKind.FILE); + }); + + it('should handle secondary conflicts when renaming extension commands', async () => { + // User has both /deploy and /gcp.deploy commands + const userCommand1 = createMockCommand('deploy', CommandKind.FILE); + const userCommand2 = createMockCommand('gcp.deploy', CommandKind.FILE); + + // Extension also has a deploy command that will conflict with user's /deploy + const extensionCommand = { + ...createMockCommand('deploy', CommandKind.FILE), + extensionName: 'gcp', + description: '[gcp] Deploy to Google Cloud', + }; + + const mockLoader = new MockCommandLoader([ + userCommand1, + userCommand2, + extensionCommand, + ]); + + const service = await CommandService.create( + [mockLoader], + new AbortController().signal, + ); + + const commands = service.getCommands(); + expect(commands).toHaveLength(3); + + // Original user command keeps its name + const deployUser = commands.find( + (cmd) => cmd.name === 'deploy' && !cmd.extensionName, + ); + expect(deployUser).toBeDefined(); + + // User's dot notation command keeps its name + const gcpDeployUser = commands.find( + (cmd) => cmd.name === 'gcp.deploy' && !cmd.extensionName, + ); + expect(gcpDeployUser).toBeDefined(); + + // Extension command gets renamed with suffix due to secondary conflict + const deployExtension = commands.find( + (cmd) => cmd.name === 'gcp.deploy1' && cmd.extensionName === 'gcp', + ); + expect(deployExtension).toBeDefined(); + expect(deployExtension?.description).toBe('[gcp] Deploy to Google Cloud'); + }); + + it('should handle multiple secondary conflicts with incrementing suffixes', async () => { + // User has /deploy, /gcp.deploy, and /gcp.deploy1 + const userCommand1 = createMockCommand('deploy', CommandKind.FILE); + const userCommand2 = createMockCommand('gcp.deploy', CommandKind.FILE); + const userCommand3 = createMockCommand('gcp.deploy1', CommandKind.FILE); + + // Extension has a deploy command + const extensionCommand = { + ...createMockCommand('deploy', CommandKind.FILE), + extensionName: 'gcp', + description: '[gcp] Deploy to Google Cloud', + }; + + const mockLoader = new MockCommandLoader([ + userCommand1, + userCommand2, + userCommand3, + extensionCommand, + ]); + + const service = await CommandService.create( + [mockLoader], + new AbortController().signal, + ); + + const commands = service.getCommands(); + expect(commands).toHaveLength(4); + + // Extension command gets renamed with suffix 2 due to multiple conflicts + const deployExtension = commands.find( + (cmd) => cmd.name === 'gcp.deploy2' && cmd.extensionName === 'gcp', + ); + expect(deployExtension).toBeDefined(); + expect(deployExtension?.description).toBe('[gcp] Deploy to Google Cloud'); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/services/CommandService.ts b/projects/gemini-cli/packages/cli/src/services/CommandService.ts new file mode 100644 index 0000000000000000000000000000000000000000..5f1e09d50dbaff2811a9d7705f9798273a643496 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/CommandService.ts @@ -0,0 +1,103 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { SlashCommand } from '../ui/commands/types.js'; +import type { ICommandLoader } from './types.js'; + +/** + * Orchestrates the discovery and loading of all slash commands for the CLI. + * + * This service operates on a provider-based loader pattern. It is initialized + * with an array of `ICommandLoader` instances, each responsible for fetching + * commands from a specific source (e.g., built-in code, local files). + * + * The CommandService is responsible for invoking these loaders, aggregating their + * results, and resolving any name conflicts. This architecture allows the command + * system to be extended with new sources without modifying the service itself. + */ +export class CommandService { + /** + * Private constructor to enforce the use of the async factory. + * @param commands A readonly array of the fully loaded and de-duplicated commands. + */ + private constructor(private readonly commands: readonly SlashCommand[]) {} + + /** + * Asynchronously creates and initializes a new CommandService instance. + * + * This factory method orchestrates the entire command loading process. It + * runs all provided loaders in parallel, aggregates their results, handles + * name conflicts for extension commands by renaming them, and then returns a + * fully constructed `CommandService` instance. + * + * Conflict resolution: + * - Extension commands that conflict with existing commands are renamed to + * `extensionName.commandName` + * - Non-extension commands (built-in, user, project) override earlier commands + * with the same name based on loader order + * + * @param loaders An array of objects that conform to the `ICommandLoader` + * interface. Built-in commands should come first, followed by FileCommandLoader. + * @param signal An AbortSignal to cancel the loading process. + * @returns A promise that resolves to a new, fully initialized `CommandService` instance. + */ + static async create( + loaders: ICommandLoader[], + signal: AbortSignal, + ): Promise { + const results = await Promise.allSettled( + loaders.map((loader) => loader.loadCommands(signal)), + ); + + const allCommands: SlashCommand[] = []; + for (const result of results) { + if (result.status === 'fulfilled') { + allCommands.push(...result.value); + } else { + console.debug('A command loader failed:', result.reason); + } + } + + const commandMap = new Map(); + for (const cmd of allCommands) { + let finalName = cmd.name; + + // Extension commands get renamed if they conflict with existing commands + if (cmd.extensionName && commandMap.has(cmd.name)) { + let renamedName = `${cmd.extensionName}.${cmd.name}`; + let suffix = 1; + + // Keep trying until we find a name that doesn't conflict + while (commandMap.has(renamedName)) { + renamedName = `${cmd.extensionName}.${cmd.name}${suffix}`; + suffix++; + } + + finalName = renamedName; + } + + commandMap.set(finalName, { + ...cmd, + name: finalName, + }); + } + + const finalCommands = Object.freeze(Array.from(commandMap.values())); + return new CommandService(finalCommands); + } + + /** + * Retrieves the currently loaded and de-duplicated list of slash commands. + * + * This method is a safe accessor for the service's state. It returns a + * readonly array, preventing consumers from modifying the service's internal state. + * + * @returns A readonly, unified array of available `SlashCommand` objects. + */ + getCommands(): readonly SlashCommand[] { + return this.commands; + } +} diff --git a/projects/gemini-cli/packages/cli/src/services/FileCommandLoader.test.ts b/projects/gemini-cli/packages/cli/src/services/FileCommandLoader.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..5b4c91cd9394e50456958b5a8f9e2f606c70c6be --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/FileCommandLoader.test.ts @@ -0,0 +1,1230 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as path from 'node:path'; +import type { Config } from '@google/gemini-cli-core'; +import { Storage } from '@google/gemini-cli-core'; +import mock from 'mock-fs'; +import { FileCommandLoader } from './FileCommandLoader.js'; +import { assert, vi } from 'vitest'; +import { createMockCommandContext } from '../test-utils/mockCommandContext.js'; +import { + SHELL_INJECTION_TRIGGER, + SHORTHAND_ARGS_PLACEHOLDER, + type PromptPipelineContent, +} from './prompt-processors/types.js'; +import { + ConfirmationRequiredError, + ShellProcessor, +} from './prompt-processors/shellProcessor.js'; +import { DefaultArgumentProcessor } from './prompt-processors/argumentProcessor.js'; +import type { CommandContext } from '../ui/commands/types.js'; +import { AtFileProcessor } from './prompt-processors/atFileProcessor.js'; + +const mockShellProcess = vi.hoisted(() => vi.fn()); +const mockAtFileProcess = vi.hoisted(() => vi.fn()); +vi.mock('./prompt-processors/atFileProcessor.js', () => ({ + AtFileProcessor: vi.fn().mockImplementation(() => ({ + process: mockAtFileProcess, + })), +})); +vi.mock('./prompt-processors/shellProcessor.js', () => ({ + ShellProcessor: vi.fn().mockImplementation(() => ({ + process: mockShellProcess, + })), + ConfirmationRequiredError: class extends Error { + constructor( + message: string, + public commandsToConfirm: string[], + ) { + super(message); + this.name = 'ConfirmationRequiredError'; + } + }, +})); + +vi.mock('./prompt-processors/argumentProcessor.js', async (importOriginal) => { + const original = + await importOriginal< + typeof import('./prompt-processors/argumentProcessor.js') + >(); + return { + DefaultArgumentProcessor: vi + .fn() + .mockImplementation(() => new original.DefaultArgumentProcessor()), + }; +}); +vi.mock('@google/gemini-cli-core', async (importOriginal) => { + const original = + await importOriginal(); + return { + ...original, + Storage: original.Storage, + isCommandAllowed: vi.fn(), + ShellExecutionService: { + execute: vi.fn(), + }, + }; +}); + +describe('FileCommandLoader', () => { + const signal: AbortSignal = new AbortController().signal; + + beforeEach(() => { + vi.clearAllMocks(); + mockShellProcess.mockImplementation( + (prompt: PromptPipelineContent, context: CommandContext) => { + const userArgsRaw = context?.invocation?.args || ''; + // This is a simplified mock. A real implementation would need to iterate + // through all parts and process only the text parts. + const firstTextPart = prompt.find( + (p) => typeof p === 'string' || 'text' in p, + ); + let textContent = ''; + if (typeof firstTextPart === 'string') { + textContent = firstTextPart; + } else if (firstTextPart && 'text' in firstTextPart) { + textContent = firstTextPart.text ?? ''; + } + + const processedText = textContent.replaceAll( + SHORTHAND_ARGS_PLACEHOLDER, + userArgsRaw, + ); + return Promise.resolve([{ text: processedText }]); + }, + ); + mockAtFileProcess.mockImplementation(async (prompt: string) => prompt); + }); + + afterEach(() => { + mock.restore(); + }); + + it('loads a single command from a file', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'test.toml': 'prompt = "This is a test prompt"', + }, + }); + + const loader = new FileCommandLoader(null); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + const command = commands[0]; + expect(command).toBeDefined(); + expect(command.name).toBe('test'); + + const result = await command.action?.( + createMockCommandContext({ + invocation: { + raw: '/test', + name: 'test', + args: '', + }, + }), + '', + ); + if (result?.type === 'submit_prompt') { + expect(result.content).toEqual([{ text: 'This is a test prompt' }]); + } else { + assert.fail('Incorrect action type'); + } + }); + + // Symlink creation on Windows requires special permissions that are not + // available in the standard CI environment. Therefore, we skip these tests + // on Windows to prevent CI failures. The core functionality is still + // validated on Linux and macOS. + const itif = (condition: boolean) => (condition ? it : it.skip); + + itif(process.platform !== 'win32')( + 'loads commands from a symlinked directory', + async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + const realCommandsDir = '/real/commands'; + mock({ + [realCommandsDir]: { + 'test.toml': 'prompt = "This is a test prompt"', + }, + // Symlink the user commands directory to the real one + [userCommandsDir]: mock.symlink({ + path: realCommandsDir, + }), + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + const command = commands[0]; + expect(command).toBeDefined(); + expect(command.name).toBe('test'); + }, + ); + + itif(process.platform !== 'win32')( + 'loads commands from a symlinked subdirectory', + async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + const realNamespacedDir = '/real/namespaced-commands'; + mock({ + [userCommandsDir]: { + namespaced: mock.symlink({ + path: realNamespacedDir, + }), + }, + [realNamespacedDir]: { + 'my-test.toml': 'prompt = "This is a test prompt"', + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + const command = commands[0]; + expect(command).toBeDefined(); + expect(command.name).toBe('namespaced:my-test'); + }, + ); + + it('loads multiple commands', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'test1.toml': 'prompt = "Prompt 1"', + 'test2.toml': 'prompt = "Prompt 2"', + }, + }); + + const loader = new FileCommandLoader(null); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(2); + }); + + it('creates deeply nested namespaces correctly', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + + mock({ + [userCommandsDir]: { + gcp: { + pipelines: { + 'run.toml': 'prompt = "run pipeline"', + }, + }, + }, + }); + const mockConfig = { + getProjectRoot: vi.fn(() => '/path/to/project'), + getExtensions: vi.fn(() => []), + getFolderTrustFeature: vi.fn(() => false), + getFolderTrust: vi.fn(() => false), + } as unknown as Config; + const loader = new FileCommandLoader(mockConfig); + const commands = await loader.loadCommands(signal); + expect(commands).toHaveLength(1); + expect(commands[0]!.name).toBe('gcp:pipelines:run'); + }); + + it('creates namespaces from nested directories', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + git: { + 'commit.toml': 'prompt = "git commit prompt"', + }, + }, + }); + + const loader = new FileCommandLoader(null); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + const command = commands[0]; + expect(command).toBeDefined(); + expect(command.name).toBe('git:commit'); + }); + + it('returns both user and project commands in order', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + const projectCommandsDir = new Storage( + process.cwd(), + ).getProjectCommandsDir(); + mock({ + [userCommandsDir]: { + 'test.toml': 'prompt = "User prompt"', + }, + [projectCommandsDir]: { + 'test.toml': 'prompt = "Project prompt"', + }, + }); + + const mockConfig = { + getProjectRoot: vi.fn(() => process.cwd()), + getExtensions: vi.fn(() => []), + getFolderTrustFeature: vi.fn(() => false), + getFolderTrust: vi.fn(() => false), + } as unknown as Config; + const loader = new FileCommandLoader(mockConfig); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(2); + const userResult = await commands[0].action?.( + createMockCommandContext({ + invocation: { + raw: '/test', + name: 'test', + args: '', + }, + }), + '', + ); + if (userResult?.type === 'submit_prompt') { + expect(userResult.content).toEqual([{ text: 'User prompt' }]); + } else { + assert.fail('Incorrect action type for user command'); + } + const projectResult = await commands[1].action?.( + createMockCommandContext({ + invocation: { + raw: '/test', + name: 'test', + args: '', + }, + }), + '', + ); + if (projectResult?.type === 'submit_prompt') { + expect(projectResult.content).toEqual([{ text: 'Project prompt' }]); + } else { + assert.fail('Incorrect action type for project command'); + } + }); + + it('ignores files with TOML syntax errors', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'invalid.toml': 'this is not valid toml', + 'good.toml': 'prompt = "This one is fine"', + }, + }); + + const loader = new FileCommandLoader(null); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + expect(commands[0].name).toBe('good'); + }); + + it('ignores files that are semantically invalid (missing prompt)', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'no_prompt.toml': 'description = "This file is missing a prompt"', + 'good.toml': 'prompt = "This one is fine"', + }, + }); + + const loader = new FileCommandLoader(null); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + expect(commands[0].name).toBe('good'); + }); + + it('handles filename edge cases correctly', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'test.v1.toml': 'prompt = "Test prompt"', + }, + }); + + const loader = new FileCommandLoader(null); + const commands = await loader.loadCommands(signal); + const command = commands[0]; + expect(command).toBeDefined(); + expect(command.name).toBe('test.v1'); + }); + + it('handles file system errors gracefully', async () => { + mock({}); // Mock an empty file system + const loader = new FileCommandLoader(null); + const commands = await loader.loadCommands(signal); + expect(commands).toHaveLength(0); + }); + + it('uses a default description if not provided', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'test.toml': 'prompt = "Test prompt"', + }, + }); + + const loader = new FileCommandLoader(null); + const commands = await loader.loadCommands(signal); + const command = commands[0]; + expect(command).toBeDefined(); + expect(command.description).toBe('Custom command from test.toml'); + }); + + it('uses the provided description', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'test.toml': 'prompt = "Test prompt"\ndescription = "My test command"', + }, + }); + + const loader = new FileCommandLoader(null); + const commands = await loader.loadCommands(signal); + const command = commands[0]; + expect(command).toBeDefined(); + expect(command.description).toBe('My test command'); + }); + + it('should sanitize colons in filenames to prevent namespace conflicts', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'legacy:command.toml': 'prompt = "This is a legacy command"', + }, + }); + + const loader = new FileCommandLoader(null); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + const command = commands[0]; + expect(command).toBeDefined(); + + // Verify that the ':' in the filename was replaced with an '_' + expect(command.name).toBe('legacy_command'); + }); + + describe('Processor Instantiation Logic', () => { + it('instantiates only DefaultArgumentProcessor if no {{args}} or !{} are present', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'simple.toml': `prompt = "Just a regular prompt"`, + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + await loader.loadCommands(signal); + + expect(ShellProcessor).not.toHaveBeenCalled(); + expect(DefaultArgumentProcessor).toHaveBeenCalledTimes(1); + }); + + it('instantiates only ShellProcessor if {{args}} is present (but not !{})', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'args.toml': `prompt = "Prompt with {{args}}"`, + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + await loader.loadCommands(signal); + + expect(ShellProcessor).toHaveBeenCalledTimes(1); + expect(DefaultArgumentProcessor).not.toHaveBeenCalled(); + }); + + it('instantiates ShellProcessor and DefaultArgumentProcessor if !{} is present (but not {{args}})', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'shell.toml': `prompt = "Prompt with !{cmd}"`, + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + await loader.loadCommands(signal); + + expect(ShellProcessor).toHaveBeenCalledTimes(1); + expect(DefaultArgumentProcessor).toHaveBeenCalledTimes(1); + }); + + it('instantiates only ShellProcessor if both {{args}} and !{} are present', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'both.toml': `prompt = "Prompt with {{args}} and !{cmd}"`, + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + await loader.loadCommands(signal); + + expect(ShellProcessor).toHaveBeenCalledTimes(1); + expect(DefaultArgumentProcessor).not.toHaveBeenCalled(); + }); + + it('instantiates AtFileProcessor and DefaultArgumentProcessor if @{} is present', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'at-file.toml': `prompt = "Context: @{./my-file.txt}"`, + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + await loader.loadCommands(signal); + + expect(AtFileProcessor).toHaveBeenCalledTimes(1); + expect(ShellProcessor).not.toHaveBeenCalled(); + expect(DefaultArgumentProcessor).toHaveBeenCalledTimes(1); + }); + + it('instantiates ShellProcessor and AtFileProcessor if !{} and @{} are present', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'shell-and-at.toml': `prompt = "Run !{cmd} with @{file.txt}"`, + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + await loader.loadCommands(signal); + + expect(ShellProcessor).toHaveBeenCalledTimes(1); + expect(AtFileProcessor).toHaveBeenCalledTimes(1); + expect(DefaultArgumentProcessor).toHaveBeenCalledTimes(1); // because no {{args}} + }); + + it('instantiates only ShellProcessor and AtFileProcessor if {{args}} and @{} are present', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'args-and-at.toml': `prompt = "Run {{args}} with @{file.txt}"`, + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + await loader.loadCommands(signal); + + expect(ShellProcessor).toHaveBeenCalledTimes(1); + expect(AtFileProcessor).toHaveBeenCalledTimes(1); + expect(DefaultArgumentProcessor).not.toHaveBeenCalled(); + }); + }); + + describe('Extension Command Loading', () => { + it('loads commands from active extensions', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + const projectCommandsDir = new Storage( + process.cwd(), + ).getProjectCommandsDir(); + const extensionDir = path.join( + process.cwd(), + '.gemini/extensions/test-ext', + ); + + mock({ + [userCommandsDir]: { + 'user.toml': 'prompt = "User command"', + }, + [projectCommandsDir]: { + 'project.toml': 'prompt = "Project command"', + }, + [extensionDir]: { + 'gemini-extension.json': JSON.stringify({ + name: 'test-ext', + version: '1.0.0', + }), + commands: { + 'ext.toml': 'prompt = "Extension command"', + }, + }, + }); + + const mockConfig = { + getProjectRoot: vi.fn(() => process.cwd()), + getExtensions: vi.fn(() => [ + { + name: 'test-ext', + version: '1.0.0', + isActive: true, + path: extensionDir, + }, + ]), + getFolderTrustFeature: vi.fn(() => false), + getFolderTrust: vi.fn(() => false), + } as unknown as Config; + const loader = new FileCommandLoader(mockConfig); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(3); + const commandNames = commands.map((cmd) => cmd.name); + expect(commandNames).toEqual(['user', 'project', 'ext']); + + const extCommand = commands.find((cmd) => cmd.name === 'ext'); + expect(extCommand?.extensionName).toBe('test-ext'); + expect(extCommand?.description).toMatch(/^\[test-ext\]/); + }); + + it('extension commands have extensionName metadata for conflict resolution', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + const projectCommandsDir = new Storage( + process.cwd(), + ).getProjectCommandsDir(); + const extensionDir = path.join( + process.cwd(), + '.gemini/extensions/test-ext', + ); + + mock({ + [extensionDir]: { + 'gemini-extension.json': JSON.stringify({ + name: 'test-ext', + version: '1.0.0', + }), + commands: { + 'deploy.toml': 'prompt = "Extension deploy command"', + }, + }, + [userCommandsDir]: { + 'deploy.toml': 'prompt = "User deploy command"', + }, + [projectCommandsDir]: { + 'deploy.toml': 'prompt = "Project deploy command"', + }, + }); + + const mockConfig = { + getProjectRoot: vi.fn(() => process.cwd()), + getExtensions: vi.fn(() => [ + { + name: 'test-ext', + version: '1.0.0', + isActive: true, + path: extensionDir, + }, + ]), + getFolderTrustFeature: vi.fn(() => false), + getFolderTrust: vi.fn(() => false), + } as unknown as Config; + const loader = new FileCommandLoader(mockConfig); + const commands = await loader.loadCommands(signal); + + // Return all commands, even duplicates + expect(commands).toHaveLength(3); + + expect(commands[0].name).toBe('deploy'); + expect(commands[0].extensionName).toBeUndefined(); + const result0 = await commands[0].action?.( + createMockCommandContext({ + invocation: { + raw: '/deploy', + name: 'deploy', + args: '', + }, + }), + '', + ); + expect(result0?.type).toBe('submit_prompt'); + if (result0?.type === 'submit_prompt') { + expect(result0.content).toEqual([{ text: 'User deploy command' }]); + } + + expect(commands[1].name).toBe('deploy'); + expect(commands[1].extensionName).toBeUndefined(); + const result1 = await commands[1].action?.( + createMockCommandContext({ + invocation: { + raw: '/deploy', + name: 'deploy', + args: '', + }, + }), + '', + ); + expect(result1?.type).toBe('submit_prompt'); + if (result1?.type === 'submit_prompt') { + expect(result1.content).toEqual([{ text: 'Project deploy command' }]); + } + + expect(commands[2].name).toBe('deploy'); + expect(commands[2].extensionName).toBe('test-ext'); + expect(commands[2].description).toMatch(/^\[test-ext\]/); + const result2 = await commands[2].action?.( + createMockCommandContext({ + invocation: { + raw: '/deploy', + name: 'deploy', + args: '', + }, + }), + '', + ); + expect(result2?.type).toBe('submit_prompt'); + if (result2?.type === 'submit_prompt') { + expect(result2.content).toEqual([{ text: 'Extension deploy command' }]); + } + }); + + it('only loads commands from active extensions', async () => { + const extensionDir1 = path.join( + process.cwd(), + '.gemini/extensions/active-ext', + ); + const extensionDir2 = path.join( + process.cwd(), + '.gemini/extensions/inactive-ext', + ); + + mock({ + [extensionDir1]: { + 'gemini-extension.json': JSON.stringify({ + name: 'active-ext', + version: '1.0.0', + }), + commands: { + 'active.toml': 'prompt = "Active extension command"', + }, + }, + [extensionDir2]: { + 'gemini-extension.json': JSON.stringify({ + name: 'inactive-ext', + version: '1.0.0', + }), + commands: { + 'inactive.toml': 'prompt = "Inactive extension command"', + }, + }, + }); + + const mockConfig = { + getProjectRoot: vi.fn(() => process.cwd()), + getExtensions: vi.fn(() => [ + { + name: 'active-ext', + version: '1.0.0', + isActive: true, + path: extensionDir1, + }, + { + name: 'inactive-ext', + version: '1.0.0', + isActive: false, + path: extensionDir2, + }, + ]), + getFolderTrustFeature: vi.fn(() => false), + getFolderTrust: vi.fn(() => false), + } as unknown as Config; + const loader = new FileCommandLoader(mockConfig); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + expect(commands[0].name).toBe('active'); + expect(commands[0].extensionName).toBe('active-ext'); + expect(commands[0].description).toMatch(/^\[active-ext\]/); + }); + + it('handles missing extension commands directory gracefully', async () => { + const extensionDir = path.join( + process.cwd(), + '.gemini/extensions/no-commands', + ); + + mock({ + [extensionDir]: { + 'gemini-extension.json': JSON.stringify({ + name: 'no-commands', + version: '1.0.0', + }), + // No commands directory + }, + }); + + const mockConfig = { + getProjectRoot: vi.fn(() => process.cwd()), + getExtensions: vi.fn(() => [ + { + name: 'no-commands', + version: '1.0.0', + isActive: true, + path: extensionDir, + }, + ]), + getFolderTrustFeature: vi.fn(() => false), + getFolderTrust: vi.fn(() => false), + } as unknown as Config; + const loader = new FileCommandLoader(mockConfig); + const commands = await loader.loadCommands(signal); + expect(commands).toHaveLength(0); + }); + + it('handles nested command structure in extensions', async () => { + const extensionDir = path.join(process.cwd(), '.gemini/extensions/a'); + + mock({ + [extensionDir]: { + 'gemini-extension.json': JSON.stringify({ + name: 'a', + version: '1.0.0', + }), + commands: { + b: { + 'c.toml': 'prompt = "Nested command from extension a"', + d: { + 'e.toml': 'prompt = "Deeply nested command"', + }, + }, + 'simple.toml': 'prompt = "Simple command"', + }, + }, + }); + + const mockConfig = { + getProjectRoot: vi.fn(() => process.cwd()), + getExtensions: vi.fn(() => [ + { name: 'a', version: '1.0.0', isActive: true, path: extensionDir }, + ]), + getFolderTrustFeature: vi.fn(() => false), + getFolderTrust: vi.fn(() => false), + } as unknown as Config; + const loader = new FileCommandLoader(mockConfig); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(3); + + const commandNames = commands.map((cmd) => cmd.name).sort(); + expect(commandNames).toEqual(['b:c', 'b:d:e', 'simple']); + + const nestedCmd = commands.find((cmd) => cmd.name === 'b:c'); + expect(nestedCmd?.extensionName).toBe('a'); + expect(nestedCmd?.description).toMatch(/^\[a\]/); + expect(nestedCmd).toBeDefined(); + const result = await nestedCmd!.action?.( + createMockCommandContext({ + invocation: { + raw: '/b:c', + name: 'b:c', + args: '', + }, + }), + '', + ); + if (result?.type === 'submit_prompt') { + expect(result.content).toEqual([ + { text: 'Nested command from extension a' }, + ]); + } else { + assert.fail('Incorrect action type'); + } + }); + }); + + describe('Argument Handling Integration (via ShellProcessor)', () => { + it('correctly processes a command with {{args}}', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'shorthand.toml': + 'prompt = "The user wants to: {{args}}"\ndescription = "Shorthand test"', + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands.find((c) => c.name === 'shorthand'); + expect(command).toBeDefined(); + + const result = await command!.action?.( + createMockCommandContext({ + invocation: { + raw: '/shorthand do something cool', + name: 'shorthand', + args: 'do something cool', + }, + }), + 'do something cool', + ); + expect(result?.type).toBe('submit_prompt'); + if (result?.type === 'submit_prompt') { + expect(result.content).toEqual([ + { text: 'The user wants to: do something cool' }, + ]); + } + }); + }); + + describe('Default Argument Processor Integration', () => { + it('correctly processes a command without {{args}}', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'model_led.toml': + 'prompt = "This is the instruction."\ndescription = "Default processor test"', + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands.find((c) => c.name === 'model_led'); + expect(command).toBeDefined(); + + const result = await command!.action?.( + createMockCommandContext({ + invocation: { + raw: '/model_led 1.2.0 added "a feature"', + name: 'model_led', + args: '1.2.0 added "a feature"', + }, + }), + '1.2.0 added "a feature"', + ); + expect(result?.type).toBe('submit_prompt'); + if (result?.type === 'submit_prompt') { + const expectedContent = + 'This is the instruction.\n\n/model_led 1.2.0 added "a feature"'; + expect(result.content).toEqual([{ text: expectedContent }]); + } + }); + }); + + describe('Shell Processor Integration', () => { + it('instantiates ShellProcessor if {{args}} is present (even without shell trigger)', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'args_only.toml': `prompt = "Hello {{args}}"`, + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + await loader.loadCommands(signal); + + expect(ShellProcessor).toHaveBeenCalledWith('args_only'); + }); + it('instantiates ShellProcessor if the trigger is present', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'shell.toml': `prompt = "Run this: ${SHELL_INJECTION_TRIGGER}echo hello}"`, + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + await loader.loadCommands(signal); + + expect(ShellProcessor).toHaveBeenCalledWith('shell'); + }); + + it('does not instantiate ShellProcessor if no triggers ({{args}} or !{}) are present', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'regular.toml': `prompt = "Just a regular prompt"`, + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + await loader.loadCommands(signal); + + expect(ShellProcessor).not.toHaveBeenCalled(); + }); + + it('returns a "submit_prompt" action if shell processing succeeds', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'shell.toml': `prompt = "Run !{echo 'hello'}"`, + }, + }); + mockShellProcess.mockResolvedValue([{ text: 'Run hello' }]); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands.find((c) => c.name === 'shell'); + expect(command).toBeDefined(); + + const result = await command!.action!( + createMockCommandContext({ + invocation: { raw: '/shell', name: 'shell', args: '' }, + }), + '', + ); + + expect(result?.type).toBe('submit_prompt'); + if (result?.type === 'submit_prompt') { + expect(result.content).toEqual([{ text: 'Run hello' }]); + } + }); + + it('returns a "confirm_shell_commands" action if shell processing requires it', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + const rawInvocation = '/shell rm -rf /'; + mock({ + [userCommandsDir]: { + 'shell.toml': `prompt = "Run !{rm -rf /}"`, + }, + }); + + // Mock the processor to throw the specific error + const error = new ConfirmationRequiredError('Confirmation needed', [ + 'rm -rf /', + ]); + mockShellProcess.mockRejectedValue(error); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands.find((c) => c.name === 'shell'); + expect(command).toBeDefined(); + + const result = await command!.action!( + createMockCommandContext({ + invocation: { raw: rawInvocation, name: 'shell', args: 'rm -rf /' }, + }), + 'rm -rf /', + ); + + expect(result?.type).toBe('confirm_shell_commands'); + if (result?.type === 'confirm_shell_commands') { + expect(result.commandsToConfirm).toEqual(['rm -rf /']); + expect(result.originalInvocation.raw).toBe(rawInvocation); + } + }); + + it('re-throws other errors from the processor', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'shell.toml': `prompt = "Run !{something}"`, + }, + }); + + const genericError = new Error('Something else went wrong'); + mockShellProcess.mockRejectedValue(genericError); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands.find((c) => c.name === 'shell'); + expect(command).toBeDefined(); + + await expect( + command!.action!( + createMockCommandContext({ + invocation: { raw: '/shell', name: 'shell', args: '' }, + }), + '', + ), + ).rejects.toThrow('Something else went wrong'); + }); + it('assembles the processor pipeline in the correct order (AtFile -> Shell -> Default)', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + // This prompt uses !{}, @{}, but NOT {{args}}, so all processors should be active. + 'pipeline.toml': ` + prompt = "Shell says: !{echo foo}. File says: @{./bar.txt}" + `, + }, + './bar.txt': 'bar content', + }); + + const defaultProcessMock = vi + .fn() + .mockImplementation((p: PromptPipelineContent) => + Promise.resolve([ + { text: `${(p[0] as { text: string }).text}-default-processed` }, + ]), + ); + + mockShellProcess.mockImplementation((p: PromptPipelineContent) => + Promise.resolve([ + { text: `${(p[0] as { text: string }).text}-shell-processed` }, + ]), + ); + + mockAtFileProcess.mockImplementation((p: PromptPipelineContent) => + Promise.resolve([ + { text: `${(p[0] as { text: string }).text}-at-file-processed` }, + ]), + ); + + vi.mocked(DefaultArgumentProcessor).mockImplementation( + () => + ({ + process: defaultProcessMock, + }) as unknown as DefaultArgumentProcessor, + ); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands.find((c) => c.name === 'pipeline'); + expect(command).toBeDefined(); + + const result = await command!.action!( + createMockCommandContext({ + invocation: { + raw: '/pipeline baz', + name: 'pipeline', + args: 'baz', + }, + }), + 'baz', + ); + + expect(mockAtFileProcess.mock.invocationCallOrder[0]).toBeLessThan( + mockShellProcess.mock.invocationCallOrder[0], + ); + expect(mockShellProcess.mock.invocationCallOrder[0]).toBeLessThan( + defaultProcessMock.mock.invocationCallOrder[0], + ); + + // Verify the flow of the prompt through the processors + // 1. AtFile processor runs first + expect(mockAtFileProcess).toHaveBeenCalledWith( + [{ text: expect.stringContaining('@{./bar.txt}') }], + expect.any(Object), + ); + // 2. Shell processor runs second + expect(mockShellProcess).toHaveBeenCalledWith( + [{ text: expect.stringContaining('-at-file-processed') }], + expect.any(Object), + ); + // 3. Default processor runs third + expect(defaultProcessMock).toHaveBeenCalledWith( + [{ text: expect.stringContaining('-shell-processed') }], + expect.any(Object), + ); + + if (result?.type === 'submit_prompt') { + const contentAsArray = Array.isArray(result.content) + ? result.content + : [result.content]; + expect(contentAsArray.length).toBeGreaterThan(0); + const firstPart = contentAsArray[0]; + + if (typeof firstPart === 'object' && firstPart && 'text' in firstPart) { + expect(firstPart.text).toContain( + '-at-file-processed-shell-processed-default-processed', + ); + } else { + assert.fail( + 'First part of content is not a text part or is a string', + ); + } + } else { + assert.fail('Incorrect action type'); + } + }); + }); + + describe('@-file Processor Integration', () => { + it('correctly processes a command with @{file}', async () => { + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'at-file.toml': + 'prompt = "Context from file: @{./test.txt}"\ndescription = "@-file test"', + }, + './test.txt': 'file content', + }); + + mockAtFileProcess.mockImplementation( + async (prompt: PromptPipelineContent) => { + // A simplified mock of AtFileProcessor's behavior + const textContent = (prompt[0] as { text: string }).text; + if (textContent.includes('@{./test.txt}')) { + return [ + { + text: textContent.replace('@{./test.txt}', 'file content'), + }, + ]; + } + return prompt; + }, + ); + + // Prevent default processor from interfering + vi.mocked(DefaultArgumentProcessor).mockImplementation( + () => + ({ + process: (p: PromptPipelineContent) => Promise.resolve(p), + }) as unknown as DefaultArgumentProcessor, + ); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands.find((c) => c.name === 'at-file'); + expect(command).toBeDefined(); + + const result = await command!.action?.( + createMockCommandContext({ + invocation: { + raw: '/at-file', + name: 'at-file', + args: '', + }, + }), + '', + ); + expect(result?.type).toBe('submit_prompt'); + if (result?.type === 'submit_prompt') { + expect(result.content).toEqual([ + { text: 'Context from file: file content' }, + ]); + } + }); + }); + + describe('with folder trust enabled', () => { + it('loads multiple commands', async () => { + const mockConfig = { + getProjectRoot: vi.fn(() => '/path/to/project'), + getExtensions: vi.fn(() => []), + getFolderTrustFeature: vi.fn(() => true), + getFolderTrust: vi.fn(() => true), + } as unknown as Config; + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'test1.toml': 'prompt = "Prompt 1"', + 'test2.toml': 'prompt = "Prompt 2"', + }, + }); + + const loader = new FileCommandLoader(mockConfig); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(2); + }); + + it('does not load when folder is not trusted', async () => { + const mockConfig = { + getProjectRoot: vi.fn(() => '/path/to/project'), + getExtensions: vi.fn(() => []), + getFolderTrustFeature: vi.fn(() => true), + getFolderTrust: vi.fn(() => false), + } as unknown as Config; + const userCommandsDir = Storage.getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'test1.toml': 'prompt = "Prompt 1"', + 'test2.toml': 'prompt = "Prompt 2"', + }, + }); + + const loader = new FileCommandLoader(mockConfig); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(0); + }); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/services/FileCommandLoader.ts b/projects/gemini-cli/packages/cli/src/services/FileCommandLoader.ts new file mode 100644 index 0000000000000000000000000000000000000000..a78c372aede5039831b40a3d851a47b350ee5da7 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/FileCommandLoader.ts @@ -0,0 +1,315 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { promises as fs } from 'node:fs'; +import path from 'node:path'; +import toml from '@iarna/toml'; +import { glob } from 'glob'; +import { z } from 'zod'; +import type { Config } from '@google/gemini-cli-core'; +import { Storage } from '@google/gemini-cli-core'; +import type { ICommandLoader } from './types.js'; +import type { + CommandContext, + SlashCommand, + SlashCommandActionReturn, +} from '../ui/commands/types.js'; +import { CommandKind } from '../ui/commands/types.js'; +import { DefaultArgumentProcessor } from './prompt-processors/argumentProcessor.js'; +import type { + IPromptProcessor, + PromptPipelineContent, +} from './prompt-processors/types.js'; +import { + SHORTHAND_ARGS_PLACEHOLDER, + SHELL_INJECTION_TRIGGER, + AT_FILE_INJECTION_TRIGGER, +} from './prompt-processors/types.js'; +import { + ConfirmationRequiredError, + ShellProcessor, +} from './prompt-processors/shellProcessor.js'; +import { AtFileProcessor } from './prompt-processors/atFileProcessor.js'; + +interface CommandDirectory { + path: string; + extensionName?: string; +} + +/** + * Defines the Zod schema for a command definition file. This serves as the + * single source of truth for both validation and type inference. + */ +const TomlCommandDefSchema = z.object({ + prompt: z.string({ + required_error: "The 'prompt' field is required.", + invalid_type_error: "The 'prompt' field must be a string.", + }), + description: z.string().optional(), +}); + +/** + * Discovers and loads custom slash commands from .toml files in both the + * user's global config directory and the current project's directory. + * + * This loader is responsible for: + * - Recursively scanning command directories. + * - Parsing and validating TOML files. + * - Adapting valid definitions into executable SlashCommand objects. + * - Handling file system errors and malformed files gracefully. + */ +export class FileCommandLoader implements ICommandLoader { + private readonly projectRoot: string; + private readonly folderTrustEnabled: boolean; + private readonly folderTrust: boolean; + + constructor(private readonly config: Config | null) { + this.folderTrustEnabled = !!config?.getFolderTrustFeature(); + this.folderTrust = !!config?.getFolderTrust(); + this.projectRoot = config?.getProjectRoot() || process.cwd(); + } + + /** + * Loads all commands from user, project, and extension directories. + * Returns commands in order: user → project → extensions (alphabetically). + * + * Order is important for conflict resolution in CommandService: + * - User/project commands (without extensionName) use "last wins" strategy + * - Extension commands (with extensionName) get renamed if conflicts exist + * + * @param signal An AbortSignal to cancel the loading process. + * @returns A promise that resolves to an array of all loaded SlashCommands. + */ + async loadCommands(signal: AbortSignal): Promise { + const allCommands: SlashCommand[] = []; + const globOptions = { + nodir: true, + dot: true, + signal, + follow: true, + }; + + // Load commands from each directory + const commandDirs = this.getCommandDirectories(); + for (const dirInfo of commandDirs) { + try { + const files = await glob('**/*.toml', { + ...globOptions, + cwd: dirInfo.path, + }); + + if (this.folderTrustEnabled && !this.folderTrust) { + return []; + } + + const commandPromises = files.map((file) => + this.parseAndAdaptFile( + path.join(dirInfo.path, file), + dirInfo.path, + dirInfo.extensionName, + ), + ); + + const commands = (await Promise.all(commandPromises)).filter( + (cmd): cmd is SlashCommand => cmd !== null, + ); + + // Add all commands without deduplication + allCommands.push(...commands); + } catch (error) { + if ((error as NodeJS.ErrnoException).code !== 'ENOENT') { + console.error( + `[FileCommandLoader] Error loading commands from ${dirInfo.path}:`, + error, + ); + } + } + } + + return allCommands; + } + + /** + * Get all command directories in order for loading. + * User commands → Project commands → Extension commands + * This order ensures extension commands can detect all conflicts. + */ + private getCommandDirectories(): CommandDirectory[] { + const dirs: CommandDirectory[] = []; + + const storage = this.config?.storage ?? new Storage(this.projectRoot); + + // 1. User commands + dirs.push({ path: Storage.getUserCommandsDir() }); + + // 2. Project commands (override user commands) + dirs.push({ path: storage.getProjectCommandsDir() }); + + // 3. Extension commands (processed last to detect all conflicts) + if (this.config) { + const activeExtensions = this.config + .getExtensions() + .filter((ext) => ext.isActive) + .sort((a, b) => a.name.localeCompare(b.name)); // Sort alphabetically for deterministic loading + + const extensionCommandDirs = activeExtensions.map((ext) => ({ + path: path.join(ext.path, 'commands'), + extensionName: ext.name, + })); + + dirs.push(...extensionCommandDirs); + } + + return dirs; + } + + /** + * Parses a single .toml file and transforms it into a SlashCommand object. + * @param filePath The absolute path to the .toml file. + * @param baseDir The root command directory for name calculation. + * @param extensionName Optional extension name to prefix commands with. + * @returns A promise resolving to a SlashCommand, or null if the file is invalid. + */ + private async parseAndAdaptFile( + filePath: string, + baseDir: string, + extensionName?: string, + ): Promise { + let fileContent: string; + try { + fileContent = await fs.readFile(filePath, 'utf-8'); + } catch (error: unknown) { + console.error( + `[FileCommandLoader] Failed to read file ${filePath}:`, + error instanceof Error ? error.message : String(error), + ); + return null; + } + + let parsed: unknown; + try { + parsed = toml.parse(fileContent); + } catch (error: unknown) { + console.error( + `[FileCommandLoader] Failed to parse TOML file ${filePath}:`, + error instanceof Error ? error.message : String(error), + ); + return null; + } + + const validationResult = TomlCommandDefSchema.safeParse(parsed); + + if (!validationResult.success) { + console.error( + `[FileCommandLoader] Skipping invalid command file: ${filePath}. Validation errors:`, + validationResult.error.flatten(), + ); + return null; + } + + const validDef = validationResult.data; + + const relativePathWithExt = path.relative(baseDir, filePath); + const relativePath = relativePathWithExt.substring( + 0, + relativePathWithExt.length - 5, // length of '.toml' + ); + const baseCommandName = relativePath + .split(path.sep) + // Sanitize each path segment to prevent ambiguity. Since ':' is our + // namespace separator, we replace any literal colons in filenames + // with underscores to avoid naming conflicts. + .map((segment) => segment.replaceAll(':', '_')) + .join(':'); + + // Add extension name tag for extension commands + const defaultDescription = `Custom command from ${path.basename(filePath)}`; + let description = validDef.description || defaultDescription; + if (extensionName) { + description = `[${extensionName}] ${description}`; + } + + const processors: IPromptProcessor[] = []; + const usesArgs = validDef.prompt.includes(SHORTHAND_ARGS_PLACEHOLDER); + const usesShellInjection = validDef.prompt.includes( + SHELL_INJECTION_TRIGGER, + ); + const usesAtFileInjection = validDef.prompt.includes( + AT_FILE_INJECTION_TRIGGER, + ); + + // 1. @-File Injection (Security First). + // This runs first to ensure we're not executing shell commands that + // could dynamically generate malicious @-paths. + if (usesAtFileInjection) { + processors.push(new AtFileProcessor(baseCommandName)); + } + + // 2. Argument and Shell Injection. + // This runs after file content has been safely injected. + if (usesShellInjection || usesArgs) { + processors.push(new ShellProcessor(baseCommandName)); + } + + // 3. Default Argument Handling. + // Appends the raw invocation if no explicit {{args}} are used. + if (!usesArgs) { + processors.push(new DefaultArgumentProcessor()); + } + + return { + name: baseCommandName, + description, + kind: CommandKind.FILE, + extensionName, + action: async ( + context: CommandContext, + _args: string, + ): Promise => { + if (!context.invocation) { + console.error( + `[FileCommandLoader] Critical error: Command '${baseCommandName}' was executed without invocation context.`, + ); + return { + type: 'submit_prompt', + content: [{ text: validDef.prompt }], // Fallback to unprocessed prompt + }; + } + + try { + let processedContent: PromptPipelineContent = [ + { text: validDef.prompt }, + ]; + for (const processor of processors) { + processedContent = await processor.process( + processedContent, + context, + ); + } + + return { + type: 'submit_prompt', + content: processedContent, + }; + } catch (e) { + // Check if it's our specific error type + if (e instanceof ConfirmationRequiredError) { + // Halt and request confirmation from the UI layer. + return { + type: 'confirm_shell_commands', + commandsToConfirm: e.commandsToConfirm, + originalInvocation: { + raw: context.invocation.raw, + }, + }; + } + // Re-throw other errors to be handled by the global error handler. + throw e; + } + }, + }; + } +} diff --git a/projects/gemini-cli/packages/cli/src/services/McpPromptLoader.test.ts b/projects/gemini-cli/packages/cli/src/services/McpPromptLoader.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..609e553362dc8b1afaa3ffa628ed1e759347524a --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/McpPromptLoader.test.ts @@ -0,0 +1,128 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { McpPromptLoader } from './McpPromptLoader.js'; +import type { Config } from '@google/gemini-cli-core'; +import type { PromptArgument } from '@modelcontextprotocol/sdk/types.js'; +import { describe, it, expect } from 'vitest'; + +describe('McpPromptLoader', () => { + const mockConfig = {} as Config; + + describe('parseArgs', () => { + it('should handle multi-word positional arguments', () => { + const loader = new McpPromptLoader(mockConfig); + const promptArgs: PromptArgument[] = [ + { name: 'arg1', required: true }, + { name: 'arg2', required: true }, + ]; + const userArgs = 'hello world'; + const result = loader.parseArgs(userArgs, promptArgs); + expect(result).toEqual({ arg1: 'hello', arg2: 'world' }); + }); + + it('should handle quoted multi-word positional arguments', () => { + const loader = new McpPromptLoader(mockConfig); + const promptArgs: PromptArgument[] = [ + { name: 'arg1', required: true }, + { name: 'arg2', required: true }, + ]; + const userArgs = '"hello world" foo'; + const result = loader.parseArgs(userArgs, promptArgs); + expect(result).toEqual({ arg1: 'hello world', arg2: 'foo' }); + }); + + it('should handle a single positional argument with multiple words', () => { + const loader = new McpPromptLoader(mockConfig); + const promptArgs: PromptArgument[] = [{ name: 'arg1', required: true }]; + const userArgs = 'hello world'; + const result = loader.parseArgs(userArgs, promptArgs); + expect(result).toEqual({ arg1: 'hello world' }); + }); + + it('should handle escaped quotes in positional arguments', () => { + const loader = new McpPromptLoader(mockConfig); + const promptArgs: PromptArgument[] = [{ name: 'arg1', required: true }]; + const userArgs = '"hello \\"world\\""'; + const result = loader.parseArgs(userArgs, promptArgs); + expect(result).toEqual({ arg1: 'hello "world"' }); + }); + + it('should handle escaped backslashes in positional arguments', () => { + const loader = new McpPromptLoader(mockConfig); + const promptArgs: PromptArgument[] = [{ name: 'arg1', required: true }]; + const userArgs = '"hello\\\\world"'; + const result = loader.parseArgs(userArgs, promptArgs); + expect(result).toEqual({ arg1: 'hello\\world' }); + }); + + it('should handle named args followed by positional args', () => { + const loader = new McpPromptLoader(mockConfig); + const promptArgs: PromptArgument[] = [ + { name: 'named', required: true }, + { name: 'pos', required: true }, + ]; + const userArgs = '--named="value" positional'; + const result = loader.parseArgs(userArgs, promptArgs); + expect(result).toEqual({ named: 'value', pos: 'positional' }); + }); + + it('should handle positional args followed by named args', () => { + const loader = new McpPromptLoader(mockConfig); + const promptArgs: PromptArgument[] = [ + { name: 'pos', required: true }, + { name: 'named', required: true }, + ]; + const userArgs = 'positional --named="value"'; + const result = loader.parseArgs(userArgs, promptArgs); + expect(result).toEqual({ pos: 'positional', named: 'value' }); + }); + + it('should handle positional args interspersed with named args', () => { + const loader = new McpPromptLoader(mockConfig); + const promptArgs: PromptArgument[] = [ + { name: 'pos1', required: true }, + { name: 'named', required: true }, + { name: 'pos2', required: true }, + ]; + const userArgs = 'p1 --named="value" p2'; + const result = loader.parseArgs(userArgs, promptArgs); + expect(result).toEqual({ pos1: 'p1', named: 'value', pos2: 'p2' }); + }); + + it('should treat an escaped quote at the start as a literal', () => { + const loader = new McpPromptLoader(mockConfig); + const promptArgs: PromptArgument[] = [ + { name: 'arg1', required: true }, + { name: 'arg2', required: true }, + ]; + const userArgs = '\\"hello world'; + const result = loader.parseArgs(userArgs, promptArgs); + expect(result).toEqual({ arg1: '"hello', arg2: 'world' }); + }); + + it('should handle a complex mix of args', () => { + const loader = new McpPromptLoader(mockConfig); + const promptArgs: PromptArgument[] = [ + { name: 'pos1', required: true }, + { name: 'named1', required: true }, + { name: 'pos2', required: true }, + { name: 'named2', required: true }, + { name: 'pos3', required: true }, + ]; + const userArgs = + 'p1 --named1="value 1" "p2 has spaces" --named2=value2 "p3 \\"with quotes\\""'; + const result = loader.parseArgs(userArgs, promptArgs); + expect(result).toEqual({ + pos1: 'p1', + named1: 'value 1', + pos2: 'p2 has spaces', + named2: 'value2', + pos3: 'p3 "with quotes"', + }); + }); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/services/McpPromptLoader.ts b/projects/gemini-cli/packages/cli/src/services/McpPromptLoader.ts new file mode 100644 index 0000000000000000000000000000000000000000..c24e7b5122f3f79e8e7fd630ec608f280c4bdc24 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/McpPromptLoader.ts @@ -0,0 +1,253 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { Config } from '@google/gemini-cli-core'; +import { getErrorMessage, getMCPServerPrompts } from '@google/gemini-cli-core'; +import type { + CommandContext, + SlashCommand, + SlashCommandActionReturn, +} from '../ui/commands/types.js'; +import { CommandKind } from '../ui/commands/types.js'; +import type { ICommandLoader } from './types.js'; +import type { PromptArgument } from '@modelcontextprotocol/sdk/types.js'; + +/** + * Discovers and loads executable slash commands from prompts exposed by + * Model-Context-Protocol (MCP) servers. + */ +export class McpPromptLoader implements ICommandLoader { + constructor(private readonly config: Config | null) {} + + /** + * Loads all available prompts from all configured MCP servers and adapts + * them into executable SlashCommand objects. + * + * @param _signal An AbortSignal (unused for this synchronous loader). + * @returns A promise that resolves to an array of loaded SlashCommands. + */ + loadCommands(_signal: AbortSignal): Promise { + const promptCommands: SlashCommand[] = []; + if (!this.config) { + return Promise.resolve([]); + } + const mcpServers = this.config.getMcpServers() || {}; + for (const serverName in mcpServers) { + const prompts = getMCPServerPrompts(this.config, serverName) || []; + for (const prompt of prompts) { + const commandName = `${prompt.name}`; + const newPromptCommand: SlashCommand = { + name: commandName, + description: prompt.description || `Invoke prompt ${prompt.name}`, + kind: CommandKind.MCP_PROMPT, + subCommands: [ + { + name: 'help', + description: 'Show help for this prompt', + kind: CommandKind.MCP_PROMPT, + action: async (): Promise => { + if (!prompt.arguments || prompt.arguments.length === 0) { + return { + type: 'message', + messageType: 'info', + content: `Prompt "${prompt.name}" has no arguments.`, + }; + } + + let helpMessage = `Arguments for "${prompt.name}":\n\n`; + if (prompt.arguments && prompt.arguments.length > 0) { + helpMessage += `You can provide arguments by name (e.g., --argName="value") or by position.\n\n`; + helpMessage += `e.g., ${prompt.name} ${prompt.arguments?.map((_) => `"foo"`)} is equivalent to ${prompt.name} ${prompt.arguments?.map((arg) => `--${arg.name}="foo"`)}\n\n`; + } + for (const arg of prompt.arguments) { + helpMessage += ` --${arg.name}\n`; + if (arg.description) { + helpMessage += ` ${arg.description}\n`; + } + helpMessage += ` (required: ${ + arg.required ? 'yes' : 'no' + })\n\n`; + } + return { + type: 'message', + messageType: 'info', + content: helpMessage, + }; + }, + }, + ], + action: async ( + context: CommandContext, + args: string, + ): Promise => { + if (!this.config) { + return { + type: 'message', + messageType: 'error', + content: 'Config not loaded.', + }; + } + + const promptInputs = this.parseArgs(args, prompt.arguments); + if (promptInputs instanceof Error) { + return { + type: 'message', + messageType: 'error', + content: promptInputs.message, + }; + } + + try { + const mcpServers = this.config.getMcpServers() || {}; + const mcpServerConfig = mcpServers[serverName]; + if (!mcpServerConfig) { + return { + type: 'message', + messageType: 'error', + content: `MCP server config not found for '${serverName}'.`, + }; + } + const result = await prompt.invoke(promptInputs); + + if (result['error']) { + return { + type: 'message', + messageType: 'error', + content: `Error invoking prompt: ${result['error']}`, + }; + } + + if (!result.messages?.[0]?.content?.['text']) { + return { + type: 'message', + messageType: 'error', + content: + 'Received an empty or invalid prompt response from the server.', + }; + } + + return { + type: 'submit_prompt', + content: JSON.stringify(result.messages[0].content.text), + }; + } catch (error) { + return { + type: 'message', + messageType: 'error', + content: `Error: ${getErrorMessage(error)}`, + }; + } + }, + completion: async (_: CommandContext, partialArg: string) => { + if (!prompt || !prompt.arguments) { + return []; + } + + const suggestions: string[] = []; + const usedArgNames = new Set( + (partialArg.match(/--([^=]+)/g) || []).map((s) => s.substring(2)), + ); + + for (const arg of prompt.arguments) { + if (!usedArgNames.has(arg.name)) { + suggestions.push(`--${arg.name}=""`); + } + } + + return suggestions; + }, + }; + promptCommands.push(newPromptCommand); + } + } + return Promise.resolve(promptCommands); + } + + /** + * Parses the `userArgs` string representing the prompt arguments (all the text + * after the command) into a record matching the shape of the `promptArgs`. + * + * @param userArgs + * @param promptArgs + * @returns A record of the parsed arguments + * @visibleForTesting + */ + parseArgs( + userArgs: string, + promptArgs: PromptArgument[] | undefined, + ): Record | Error { + const argValues: { [key: string]: string } = {}; + const promptInputs: Record = {}; + + // arg parsing: --key="value" or --key=value + const namedArgRegex = /--([^=]+)=(?:"((?:\\.|[^"\\])*)"|([^ ]+))/g; + let match; + let lastIndex = 0; + const positionalParts: string[] = []; + + while ((match = namedArgRegex.exec(userArgs)) !== null) { + const key = match[1]; + // Extract the quoted or unquoted argument and remove escape chars. + const value = (match[2] ?? match[3]).replace(/\\(.)/g, '$1'); + argValues[key] = value; + // Capture text between matches as potential positional args + if (match.index > lastIndex) { + positionalParts.push(userArgs.substring(lastIndex, match.index)); + } + lastIndex = namedArgRegex.lastIndex; + } + + // Capture any remaining text after the last named arg + if (lastIndex < userArgs.length) { + positionalParts.push(userArgs.substring(lastIndex)); + } + + const positionalArgsString = positionalParts.join('').trim(); + // extracts either quoted strings or non-quoted sequences of non-space characters. + const positionalArgRegex = /(?:"((?:\\.|[^"\\])*)"|([^ ]+))/g; + const positionalArgs: string[] = []; + while ((match = positionalArgRegex.exec(positionalArgsString)) !== null) { + // Extract the quoted or unquoted argument and remove escape chars. + positionalArgs.push((match[1] ?? match[2]).replace(/\\(.)/g, '$1')); + } + + if (!promptArgs) { + return promptInputs; + } + for (const arg of promptArgs) { + if (argValues[arg.name]) { + promptInputs[arg.name] = argValues[arg.name]; + } + } + + const unfilledArgs = promptArgs.filter( + (arg) => arg.required && !promptInputs[arg.name], + ); + + if (unfilledArgs.length === 1) { + // If we have only one unfilled arg, we don't require quotes we just + // join all the given arguments together as if they were quoted. + promptInputs[unfilledArgs[0].name] = positionalArgs.join(' '); + } else { + const missingArgs: string[] = []; + for (let i = 0; i < unfilledArgs.length; i++) { + if (positionalArgs.length > i) { + promptInputs[unfilledArgs[i].name] = positionalArgs[i]; + } else { + missingArgs.push(unfilledArgs[i].name); + } + } + if (missingArgs.length > 0) { + const missingArgNames = missingArgs + .map((name) => `--${name}`) + .join(', '); + return new Error(`Missing required argument(s): ${missingArgNames}`); + } + } + + return promptInputs; + } +} diff --git a/projects/gemini-cli/packages/cli/src/services/prompt-processors/argumentProcessor.test.ts b/projects/gemini-cli/packages/cli/src/services/prompt-processors/argumentProcessor.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..80bde1287d1a0845fc79a3ff7a1d4db69d3b6c41 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/prompt-processors/argumentProcessor.test.ts @@ -0,0 +1,43 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { DefaultArgumentProcessor } from './argumentProcessor.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { describe, it, expect } from 'vitest'; + +describe('Argument Processors', () => { + describe('DefaultArgumentProcessor', () => { + const processor = new DefaultArgumentProcessor(); + + it('should append the full command if args are provided', async () => { + const prompt = [{ text: 'Parse the command.' }]; + const context = createMockCommandContext({ + invocation: { + raw: '/mycommand arg1 "arg two"', + name: 'mycommand', + args: 'arg1 "arg two"', + }, + }); + const result = await processor.process(prompt, context); + expect(result).toEqual([ + { text: 'Parse the command.\n\n/mycommand arg1 "arg two"' }, + ]); + }); + + it('should NOT append the full command if no args are provided', async () => { + const prompt = [{ text: 'Parse the command.' }]; + const context = createMockCommandContext({ + invocation: { + raw: '/mycommand', + name: 'mycommand', + args: '', + }, + }); + const result = await processor.process(prompt, context); + expect(result).toEqual([{ text: 'Parse the command.' }]); + }); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/services/prompt-processors/argumentProcessor.ts b/projects/gemini-cli/packages/cli/src/services/prompt-processors/argumentProcessor.ts new file mode 100644 index 0000000000000000000000000000000000000000..b5e5b38a0c8bbf3e0094564e507591fe043f4369 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/prompt-processors/argumentProcessor.ts @@ -0,0 +1,27 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { appendToLastTextPart } from '@google/gemini-cli-core'; +import type { IPromptProcessor, PromptPipelineContent } from './types.js'; +import type { CommandContext } from '../../ui/commands/types.js'; + +/** + * Appends the user's full command invocation to the prompt if arguments are + * provided, allowing the model to perform its own argument parsing. + * + * This processor is only used if the prompt does NOT contain {{args}}. + */ +export class DefaultArgumentProcessor implements IPromptProcessor { + async process( + prompt: PromptPipelineContent, + context: CommandContext, + ): Promise { + if (context.invocation?.args) { + return appendToLastTextPart(prompt, context.invocation.raw); + } + return prompt; + } +} diff --git a/projects/gemini-cli/packages/cli/src/services/prompt-processors/atFileProcessor.test.ts b/projects/gemini-cli/packages/cli/src/services/prompt-processors/atFileProcessor.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..3f49248169c440580687a97a10443a94cd62d541 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/prompt-processors/atFileProcessor.test.ts @@ -0,0 +1,221 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { type CommandContext } from '../../ui/commands/types.js'; +import { AtFileProcessor } from './atFileProcessor.js'; +import { MessageType } from '../../ui/types.js'; +import type { Config } from '@google/gemini-cli-core'; +import type { PartUnion } from '@google/genai'; + +// Mock the core dependency +const mockReadPathFromWorkspace = vi.hoisted(() => vi.fn()); +vi.mock('@google/gemini-cli-core', async (importOriginal) => { + const original = await importOriginal(); + return { + ...original, + readPathFromWorkspace: mockReadPathFromWorkspace, + }; +}); + +describe('AtFileProcessor', () => { + let context: CommandContext; + let mockConfig: Config; + + beforeEach(() => { + vi.clearAllMocks(); + + mockConfig = { + // The processor only passes the config through, so we don't need a full mock. + } as unknown as Config; + + context = createMockCommandContext({ + services: { + config: mockConfig, + }, + }); + + // Default mock success behavior: return content wrapped in a text part. + mockReadPathFromWorkspace.mockImplementation( + async (path: string): Promise => [ + { text: `content of ${path}` }, + ], + ); + }); + + it('should not change the prompt if no @{ trigger is present', async () => { + const processor = new AtFileProcessor(); + const prompt: PartUnion[] = [{ text: 'This is a simple prompt.' }]; + const result = await processor.process(prompt, context); + expect(result).toEqual(prompt); + expect(mockReadPathFromWorkspace).not.toHaveBeenCalled(); + }); + + it('should not change the prompt if config service is missing', async () => { + const processor = new AtFileProcessor(); + const prompt: PartUnion[] = [{ text: 'Analyze @{file.txt}' }]; + const contextWithoutConfig = createMockCommandContext({ + services: { + config: null, + }, + }); + const result = await processor.process(prompt, contextWithoutConfig); + expect(result).toEqual(prompt); + expect(mockReadPathFromWorkspace).not.toHaveBeenCalled(); + }); + + describe('Parsing Logic', () => { + it('should replace a single valid @{path/to/file.txt} placeholder', async () => { + const processor = new AtFileProcessor(); + const prompt: PartUnion[] = [ + { text: 'Analyze this file: @{path/to/file.txt}' }, + ]; + const result = await processor.process(prompt, context); + expect(mockReadPathFromWorkspace).toHaveBeenCalledWith( + 'path/to/file.txt', + mockConfig, + ); + expect(result).toEqual([ + { text: 'Analyze this file: ' }, + { text: 'content of path/to/file.txt' }, + ]); + }); + + it('should replace multiple different @{...} placeholders', async () => { + const processor = new AtFileProcessor(); + const prompt: PartUnion[] = [ + { text: 'Compare @{file1.js} with @{file2.js}' }, + ]; + const result = await processor.process(prompt, context); + expect(mockReadPathFromWorkspace).toHaveBeenCalledTimes(2); + expect(mockReadPathFromWorkspace).toHaveBeenCalledWith( + 'file1.js', + mockConfig, + ); + expect(mockReadPathFromWorkspace).toHaveBeenCalledWith( + 'file2.js', + mockConfig, + ); + expect(result).toEqual([ + { text: 'Compare ' }, + { text: 'content of file1.js' }, + { text: ' with ' }, + { text: 'content of file2.js' }, + ]); + }); + + it('should handle placeholders at the beginning, middle, and end', async () => { + const processor = new AtFileProcessor(); + const prompt: PartUnion[] = [ + { text: '@{start.txt} in the @{middle.txt} and @{end.txt}' }, + ]; + const result = await processor.process(prompt, context); + expect(result).toEqual([ + { text: 'content of start.txt' }, + { text: ' in the ' }, + { text: 'content of middle.txt' }, + { text: ' and ' }, + { text: 'content of end.txt' }, + ]); + }); + + it('should correctly parse paths that contain balanced braces', async () => { + const processor = new AtFileProcessor(); + const prompt: PartUnion[] = [ + { text: 'Analyze @{path/with/{braces}/file.txt}' }, + ]; + const result = await processor.process(prompt, context); + expect(mockReadPathFromWorkspace).toHaveBeenCalledWith( + 'path/with/{braces}/file.txt', + mockConfig, + ); + expect(result).toEqual([ + { text: 'Analyze ' }, + { text: 'content of path/with/{braces}/file.txt' }, + ]); + }); + + it('should throw an error if the prompt contains an unclosed trigger', async () => { + const processor = new AtFileProcessor(); + const prompt: PartUnion[] = [{ text: 'Hello @{world' }]; + // The new parser throws an error for unclosed injections. + await expect(processor.process(prompt, context)).rejects.toThrow( + /Unclosed injection/, + ); + }); + }); + + describe('Integration and Error Handling', () => { + it('should leave the placeholder unmodified if readPathFromWorkspace throws', async () => { + const processor = new AtFileProcessor(); + const prompt: PartUnion[] = [ + { text: 'Analyze @{not-found.txt} and @{good-file.txt}' }, + ]; + mockReadPathFromWorkspace.mockImplementation(async (path: string) => { + if (path === 'not-found.txt') { + throw new Error('File not found'); + } + return [{ text: `content of ${path}` }]; + }); + + const result = await processor.process(prompt, context); + expect(result).toEqual([ + { text: 'Analyze ' }, + { text: '@{not-found.txt}' }, // Placeholder is preserved as a text part + { text: ' and ' }, + { text: 'content of good-file.txt' }, + ]); + }); + }); + + describe('UI Feedback', () => { + it('should call ui.addItem with an ERROR on failure', async () => { + const processor = new AtFileProcessor(); + const prompt: PartUnion[] = [{ text: 'Analyze @{bad-file.txt}' }]; + mockReadPathFromWorkspace.mockRejectedValue(new Error('Access denied')); + + await processor.process(prompt, context); + + expect(context.ui.addItem).toHaveBeenCalledTimes(1); + expect(context.ui.addItem).toHaveBeenCalledWith( + { + type: MessageType.ERROR, + text: "Failed to inject content for '@{bad-file.txt}': Access denied", + }, + expect.any(Number), + ); + }); + + it('should call ui.addItem with a WARNING if the file was ignored', async () => { + const processor = new AtFileProcessor(); + const prompt: PartUnion[] = [{ text: 'Analyze @{ignored.txt}' }]; + // Simulate an ignored file by returning an empty array. + mockReadPathFromWorkspace.mockResolvedValue([]); + + const result = await processor.process(prompt, context); + + // The placeholder should be removed, resulting in only the prefix. + expect(result).toEqual([{ text: 'Analyze ' }]); + + expect(context.ui.addItem).toHaveBeenCalledTimes(1); + expect(context.ui.addItem).toHaveBeenCalledWith( + { + type: MessageType.INFO, + text: "File '@{ignored.txt}' was ignored by .gitignore or .geminiignore and was not included in the prompt.", + }, + expect.any(Number), + ); + }); + + it('should NOT call ui.addItem on success', async () => { + const processor = new AtFileProcessor(); + const prompt: PartUnion[] = [{ text: 'Analyze @{good-file.txt}' }]; + await processor.process(prompt, context); + expect(context.ui.addItem).not.toHaveBeenCalled(); + }); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/services/prompt-processors/atFileProcessor.ts b/projects/gemini-cli/packages/cli/src/services/prompt-processors/atFileProcessor.ts new file mode 100644 index 0000000000000000000000000000000000000000..bff19fb63e4f0cdd07873bc8deb4dc311ee437ee --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/prompt-processors/atFileProcessor.ts @@ -0,0 +1,96 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + flatMapTextParts, + readPathFromWorkspace, +} from '@google/gemini-cli-core'; +import type { CommandContext } from '../../ui/commands/types.js'; +import { MessageType } from '../../ui/types.js'; +import { + AT_FILE_INJECTION_TRIGGER, + type IPromptProcessor, + type PromptPipelineContent, +} from './types.js'; +import { extractInjections } from './injectionParser.js'; + +export class AtFileProcessor implements IPromptProcessor { + constructor(private readonly commandName?: string) {} + + async process( + input: PromptPipelineContent, + context: CommandContext, + ): Promise { + const config = context.services.config; + if (!config) { + return input; + } + + return flatMapTextParts(input, async (text) => { + if (!text.includes(AT_FILE_INJECTION_TRIGGER)) { + return [{ text }]; + } + + const injections = extractInjections( + text, + AT_FILE_INJECTION_TRIGGER, + this.commandName, + ); + if (injections.length === 0) { + return [{ text }]; + } + + const output: PromptPipelineContent = []; + let lastIndex = 0; + + for (const injection of injections) { + const prefix = text.substring(lastIndex, injection.startIndex); + if (prefix) { + output.push({ text: prefix }); + } + + const pathStr = injection.content; + try { + const fileContentParts = await readPathFromWorkspace(pathStr, config); + if (fileContentParts.length === 0) { + const uiMessage = `File '@{${pathStr}}' was ignored by .gitignore or .geminiignore and was not included in the prompt.`; + context.ui.addItem( + { type: MessageType.INFO, text: uiMessage }, + Date.now(), + ); + } + output.push(...fileContentParts); + } catch (error) { + const message = + error instanceof Error ? error.message : String(error); + const uiMessage = `Failed to inject content for '@{${pathStr}}': ${message}`; + + console.error( + `[AtFileProcessor] ${uiMessage}. Leaving placeholder in prompt.`, + ); + context.ui.addItem( + { type: MessageType.ERROR, text: uiMessage }, + Date.now(), + ); + + const placeholder = text.substring( + injection.startIndex, + injection.endIndex, + ); + output.push({ text: placeholder }); + } + lastIndex = injection.endIndex; + } + + const suffix = text.substring(lastIndex); + if (suffix) { + output.push({ text: suffix }); + } + + return output; + }); + } +} diff --git a/projects/gemini-cli/packages/cli/src/services/prompt-processors/injectionParser.test.ts b/projects/gemini-cli/packages/cli/src/services/prompt-processors/injectionParser.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..5ce0f8f79f7e60014f2d483f6c13cd5127d1dc82 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/prompt-processors/injectionParser.test.ts @@ -0,0 +1,223 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { extractInjections } from './injectionParser.js'; + +describe('extractInjections', () => { + const SHELL_TRIGGER = '!{'; + const AT_FILE_TRIGGER = '@{'; + + describe('Basic Functionality', () => { + it('should return an empty array if no trigger is present', () => { + const prompt = 'This is a simple prompt without injections.'; + const result = extractInjections(prompt, SHELL_TRIGGER); + expect(result).toEqual([]); + }); + + it('should extract a single, simple injection', () => { + const prompt = 'Run this command: !{ls -la}'; + const result = extractInjections(prompt, SHELL_TRIGGER); + expect(result).toEqual([ + { + content: 'ls -la', + startIndex: 18, + endIndex: 27, + }, + ]); + }); + + it('should extract multiple injections', () => { + const prompt = 'First: !{cmd1}, Second: !{cmd2}'; + const result = extractInjections(prompt, SHELL_TRIGGER); + expect(result).toHaveLength(2); + expect(result[0]).toEqual({ + content: 'cmd1', + startIndex: 7, + endIndex: 14, + }); + expect(result[1]).toEqual({ + content: 'cmd2', + startIndex: 24, + endIndex: 31, + }); + }); + + it('should handle different triggers (e.g., @{)', () => { + const prompt = 'Read this file: @{path/to/file.txt}'; + const result = extractInjections(prompt, AT_FILE_TRIGGER); + expect(result).toEqual([ + { + content: 'path/to/file.txt', + startIndex: 16, + endIndex: 35, + }, + ]); + }); + }); + + describe('Positioning and Edge Cases', () => { + it('should handle injections at the start and end of the prompt', () => { + const prompt = '!{start} middle text !{end}'; + const result = extractInjections(prompt, SHELL_TRIGGER); + expect(result).toHaveLength(2); + expect(result[0]).toEqual({ + content: 'start', + startIndex: 0, + endIndex: 8, + }); + expect(result[1]).toEqual({ + content: 'end', + startIndex: 21, + endIndex: 27, + }); + }); + + it('should handle adjacent injections', () => { + const prompt = '!{A}!{B}'; + const result = extractInjections(prompt, SHELL_TRIGGER); + expect(result).toHaveLength(2); + expect(result[0]).toEqual({ content: 'A', startIndex: 0, endIndex: 4 }); + expect(result[1]).toEqual({ content: 'B', startIndex: 4, endIndex: 8 }); + }); + + it('should handle empty injections', () => { + const prompt = 'Empty: !{}'; + const result = extractInjections(prompt, SHELL_TRIGGER); + expect(result).toEqual([ + { + content: '', + startIndex: 7, + endIndex: 10, + }, + ]); + }); + + it('should trim whitespace within the content', () => { + const prompt = '!{ \n command with space \t }'; + const result = extractInjections(prompt, SHELL_TRIGGER); + expect(result).toEqual([ + { + content: 'command with space', + startIndex: 0, + endIndex: 29, + }, + ]); + }); + + it('should ignore similar patterns that are not the exact trigger', () => { + const prompt = 'Not a trigger: !(cmd) or {cmd} or ! {cmd}'; + const result = extractInjections(prompt, SHELL_TRIGGER); + expect(result).toEqual([]); + }); + + it('should ignore extra closing braces before the trigger', () => { + const prompt = 'Ignore this } then !{run}'; + const result = extractInjections(prompt, SHELL_TRIGGER); + expect(result).toEqual([ + { + content: 'run', + startIndex: 19, + endIndex: 25, + }, + ]); + }); + + it('should stop parsing at the first balanced closing brace (non-greedy)', () => { + // This tests that the parser doesn't greedily consume extra closing braces + const prompt = 'Run !{ls -l}} extra braces'; + const result = extractInjections(prompt, SHELL_TRIGGER); + expect(result).toEqual([ + { + content: 'ls -l', + startIndex: 4, + endIndex: 12, + }, + ]); + }); + }); + + describe('Nested Braces (Balanced)', () => { + it('should correctly parse content with simple nested braces (e.g., JSON)', () => { + const prompt = `Send JSON: !{curl -d '{"key": "value"}'}`; + const result = extractInjections(prompt, SHELL_TRIGGER); + expect(result).toHaveLength(1); + expect(result[0].content).toBe(`curl -d '{"key": "value"}'`); + }); + + it('should correctly parse content with shell constructs (e.g., awk)', () => { + const prompt = `Process text: !{awk '{print $1}' file.txt}`; + const result = extractInjections(prompt, SHELL_TRIGGER); + expect(result).toHaveLength(1); + expect(result[0].content).toBe(`awk '{print $1}' file.txt`); + }); + + it('should correctly parse multiple levels of nesting', () => { + const prompt = `!{level1 {level2 {level3}} suffix}`; + const result = extractInjections(prompt, SHELL_TRIGGER); + expect(result).toHaveLength(1); + expect(result[0].content).toBe(`level1 {level2 {level3}} suffix`); + expect(result[0].endIndex).toBe(prompt.length); + }); + + it('should correctly parse paths containing balanced braces', () => { + const prompt = 'Analyze @{path/with/{braces}/file.txt}'; + const result = extractInjections(prompt, AT_FILE_TRIGGER); + expect(result).toHaveLength(1); + expect(result[0].content).toBe('path/with/{braces}/file.txt'); + }); + + it('should correctly handle an injection containing the trigger itself', () => { + // This works because the parser counts braces, it doesn't look for the trigger again until the current one is closed. + const prompt = '!{echo "The trigger is !{ confusing }"}'; + const expectedContent = 'echo "The trigger is !{ confusing }"'; + const result = extractInjections(prompt, SHELL_TRIGGER); + expect(result).toHaveLength(1); + expect(result[0].content).toBe(expectedContent); + }); + }); + + describe('Error Handling (Unbalanced/Unclosed)', () => { + it('should throw an error for a simple unclosed injection', () => { + const prompt = 'This prompt has !{an unclosed trigger'; + expect(() => extractInjections(prompt, SHELL_TRIGGER)).toThrow( + /Invalid syntax: Unclosed injection starting at index 16 \('!{'\)/, + ); + }); + + it('should throw an error if the prompt ends inside a nested block', () => { + const prompt = 'This fails: !{outer {inner'; + expect(() => extractInjections(prompt, SHELL_TRIGGER)).toThrow( + /Invalid syntax: Unclosed injection starting at index 12 \('!{'\)/, + ); + }); + + it('should include the context name in the error message if provided', () => { + const prompt = 'Failing !{command'; + const contextName = 'test-command'; + expect(() => + extractInjections(prompt, SHELL_TRIGGER, contextName), + ).toThrow( + /Invalid syntax in command 'test-command': Unclosed injection starting at index 8/, + ); + }); + + it('should throw if content contains unbalanced braces (e.g., missing closing)', () => { + // This is functionally the same as an unclosed injection from the parser's perspective. + const prompt = 'Analyze @{path/with/braces{example.txt}'; + expect(() => extractInjections(prompt, AT_FILE_TRIGGER)).toThrow( + /Invalid syntax: Unclosed injection starting at index 8 \('@{'\)/, + ); + }); + + it('should clearly state that unbalanced braces in content are not supported in the error', () => { + const prompt = 'Analyze @{path/with/braces{example.txt}'; + expect(() => extractInjections(prompt, AT_FILE_TRIGGER)).toThrow( + /Paths or commands with unbalanced braces are not supported directly/, + ); + }); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/services/prompt-processors/injectionParser.ts b/projects/gemini-cli/packages/cli/src/services/prompt-processors/injectionParser.ts new file mode 100644 index 0000000000000000000000000000000000000000..52d3226db8fd8dbb5351997c2ff27717bd1a6bec --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/prompt-processors/injectionParser.ts @@ -0,0 +1,89 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * Represents a single detected injection site in a prompt string. + */ +export interface Injection { + /** The content extracted from within the braces (e.g., the command or path), trimmed. */ + content: string; + /** The starting index of the injection (inclusive, points to the start of the trigger). */ + startIndex: number; + /** The ending index of the injection (exclusive, points after the closing '}'). */ + endIndex: number; +} + +/** + * Iteratively parses a prompt string to extract injections (e.g., !{...} or @{...}), + * correctly handling nested braces within the content. + * + * This parser relies on simple brace counting and does not support escaping. + * + * @param prompt The prompt string to parse. + * @param trigger The opening trigger sequence (e.g., '!{', '@{'). + * @param contextName Optional context name (e.g., command name) for error messages. + * @returns An array of extracted Injection objects. + * @throws Error if an unclosed injection is found. + */ +export function extractInjections( + prompt: string, + trigger: string, + contextName?: string, +): Injection[] { + const injections: Injection[] = []; + let index = 0; + + while (index < prompt.length) { + const startIndex = prompt.indexOf(trigger, index); + + if (startIndex === -1) { + break; + } + + let currentIndex = startIndex + trigger.length; + let braceCount = 1; + let foundEnd = false; + + while (currentIndex < prompt.length) { + const char = prompt[currentIndex]; + + if (char === '{') { + braceCount++; + } else if (char === '}') { + braceCount--; + if (braceCount === 0) { + const injectionContent = prompt.substring( + startIndex + trigger.length, + currentIndex, + ); + const endIndex = currentIndex + 1; + + injections.push({ + content: injectionContent.trim(), + startIndex, + endIndex, + }); + + index = endIndex; + foundEnd = true; + break; + } + } + currentIndex++; + } + + // Check if the inner loop finished without finding the closing brace. + if (!foundEnd) { + const contextInfo = contextName ? ` in command '${contextName}'` : ''; + // Enforce strict parsing (Comment 1) and clarify limitations (Comment 2). + throw new Error( + `Invalid syntax${contextInfo}: Unclosed injection starting at index ${startIndex} ('${trigger}'). Ensure braces are balanced. Paths or commands with unbalanced braces are not supported directly.`, + ); + } + } + + return injections; +} diff --git a/projects/gemini-cli/packages/cli/src/services/prompt-processors/shellProcessor.test.ts b/projects/gemini-cli/packages/cli/src/services/prompt-processors/shellProcessor.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..3b2418e136f8ea2a3c88e0c017f64604f20aab11 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/prompt-processors/shellProcessor.test.ts @@ -0,0 +1,703 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeEach, vi, type Mock } from 'vitest'; +import { ConfirmationRequiredError, ShellProcessor } from './shellProcessor.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import type { CommandContext } from '../../ui/commands/types.js'; +import type { Config } from '@google/gemini-cli-core'; +import { ApprovalMode } from '@google/gemini-cli-core'; +import os from 'node:os'; +import { quote } from 'shell-quote'; +import { createPartFromText } from '@google/genai'; +import type { PromptPipelineContent } from './types.js'; + +// Helper function to determine the expected escaped string based on the current OS, +// mirroring the logic in the actual `escapeShellArg` implementation. +function getExpectedEscapedArgForPlatform(arg: string): string { + if (os.platform() === 'win32') { + const comSpec = (process.env['ComSpec'] || 'cmd.exe').toLowerCase(); + const isPowerShell = + comSpec.endsWith('powershell.exe') || comSpec.endsWith('pwsh.exe'); + + if (isPowerShell) { + return `'${arg.replace(/'/g, "''")}'`; + } else { + return `"${arg.replace(/"/g, '""')}"`; + } + } else { + return quote([arg]); + } +} + +// Helper to create PromptPipelineContent +function createPromptPipelineContent(text: string): PromptPipelineContent { + return [createPartFromText(text)]; +} + +const mockCheckCommandPermissions = vi.hoisted(() => vi.fn()); +const mockShellExecute = vi.hoisted(() => vi.fn()); + +vi.mock('@google/gemini-cli-core', async (importOriginal) => { + const original = await importOriginal(); + return { + ...original, + checkCommandPermissions: mockCheckCommandPermissions, + ShellExecutionService: { + execute: mockShellExecute, + }, + }; +}); + +const SUCCESS_RESULT = { + output: 'default shell output', + exitCode: 0, + error: null, + aborted: false, + signal: null, +}; + +describe('ShellProcessor', () => { + let context: CommandContext; + let mockConfig: Partial; + + beforeEach(() => { + vi.clearAllMocks(); + + mockConfig = { + getTargetDir: vi.fn().mockReturnValue('/test/dir'), + getApprovalMode: vi.fn().mockReturnValue(ApprovalMode.DEFAULT), + getShouldUseNodePtyShell: vi.fn().mockReturnValue(false), + }; + + context = createMockCommandContext({ + invocation: { + raw: '/cmd default args', + name: 'cmd', + args: 'default args', + }, + services: { + config: mockConfig as Config, + }, + session: { + sessionShellAllowlist: new Set(), + }, + }); + + mockShellExecute.mockReturnValue({ + result: Promise.resolve(SUCCESS_RESULT), + }); + + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + }); + + it('should throw an error if config is missing', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent('!{ls}'); + const contextWithoutConfig = createMockCommandContext({ + services: { + config: null, + }, + }); + + await expect( + processor.process(prompt, contextWithoutConfig), + ).rejects.toThrow(/Security configuration not loaded/); + }); + + it('should not change the prompt if no shell injections are present', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'This is a simple prompt with no injections.', + ); + const result = await processor.process(prompt, context); + expect(result).toEqual(prompt); + expect(mockShellExecute).not.toHaveBeenCalled(); + }); + + it('should process a single valid shell injection if allowed', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'The current status is: !{git status}', + ); + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'On branch main' }), + }); + + const result = await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + 'git status', + expect.any(Object), + context.session.sessionShellAllowlist, + ); + expect(mockShellExecute).toHaveBeenCalledWith( + 'git status', + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + expect(result).toEqual([{ text: 'The current status is: On branch main' }]); + }); + + it('should process multiple valid shell injections if all are allowed', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + '!{git status} in !{pwd}', + ); + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + + mockShellExecute + .mockReturnValueOnce({ + result: Promise.resolve({ + ...SUCCESS_RESULT, + output: 'On branch main', + }), + }) + .mockReturnValueOnce({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: '/usr/home' }), + }); + + const result = await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).toHaveBeenCalledTimes(2); + expect(mockShellExecute).toHaveBeenCalledTimes(2); + expect(result).toEqual([{ text: 'On branch main in /usr/home' }]); + }); + + it('should throw ConfirmationRequiredError if a command is not allowed in default mode', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'Do something dangerous: !{rm -rf /}', + ); + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: false, + disallowedCommands: ['rm -rf /'], + }); + + await expect(processor.process(prompt, context)).rejects.toThrow( + ConfirmationRequiredError, + ); + }); + + it('should NOT throw ConfirmationRequiredError if a command is not allowed but approval mode is YOLO', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'Do something dangerous: !{rm -rf /}', + ); + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: false, + disallowedCommands: ['rm -rf /'], + }); + // Override the approval mode for this test + (mockConfig.getApprovalMode as Mock).mockReturnValue(ApprovalMode.YOLO); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'deleted' }), + }); + + const result = await processor.process(prompt, context); + + // It should proceed with execution + expect(mockShellExecute).toHaveBeenCalledWith( + 'rm -rf /', + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + expect(result).toEqual([{ text: 'Do something dangerous: deleted' }]); + }); + + it('should still throw an error for a hard-denied command even in YOLO mode', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'Do something forbidden: !{reboot}', + ); + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: false, + disallowedCommands: ['reboot'], + isHardDenial: true, // This is the key difference + blockReason: 'System commands are blocked', + }); + // Set approval mode to YOLO + (mockConfig.getApprovalMode as Mock).mockReturnValue(ApprovalMode.YOLO); + + await expect(processor.process(prompt, context)).rejects.toThrow( + /Blocked command: "reboot". Reason: System commands are blocked/, + ); + + // Ensure it never tried to execute + expect(mockShellExecute).not.toHaveBeenCalled(); + }); + + it('should throw ConfirmationRequiredError with the correct command', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'Do something dangerous: !{rm -rf /}', + ); + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: false, + disallowedCommands: ['rm -rf /'], + }); + + try { + await processor.process(prompt, context); + // Fail if it doesn't throw + expect(true).toBe(false); + } catch (e) { + expect(e).toBeInstanceOf(ConfirmationRequiredError); + if (e instanceof ConfirmationRequiredError) { + expect(e.commandsToConfirm).toEqual(['rm -rf /']); + } + } + + expect(mockShellExecute).not.toHaveBeenCalled(); + }); + + it('should throw ConfirmationRequiredError with multiple commands if multiple are disallowed', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + '!{cmd1} and !{cmd2}', + ); + mockCheckCommandPermissions.mockImplementation((cmd) => { + if (cmd === 'cmd1') { + return { allAllowed: false, disallowedCommands: ['cmd1'] }; + } + if (cmd === 'cmd2') { + return { allAllowed: false, disallowedCommands: ['cmd2'] }; + } + return { allAllowed: true, disallowedCommands: [] }; + }); + + try { + await processor.process(prompt, context); + // Fail if it doesn't throw + expect(true).toBe(false); + } catch (e) { + expect(e).toBeInstanceOf(ConfirmationRequiredError); + if (e instanceof ConfirmationRequiredError) { + expect(e.commandsToConfirm).toEqual(['cmd1', 'cmd2']); + } + } + }); + + it('should not execute any commands if at least one requires confirmation', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'First: !{echo "hello"}, Second: !{rm -rf /}', + ); + + mockCheckCommandPermissions.mockImplementation((cmd) => { + if (cmd.includes('rm')) { + return { allAllowed: false, disallowedCommands: [cmd] }; + } + return { allAllowed: true, disallowedCommands: [] }; + }); + + await expect(processor.process(prompt, context)).rejects.toThrow( + ConfirmationRequiredError, + ); + + // Ensure no commands were executed because the pipeline was halted. + expect(mockShellExecute).not.toHaveBeenCalled(); + }); + + it('should only request confirmation for disallowed commands in a mixed prompt', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'Allowed: !{ls -l}, Disallowed: !{rm -rf /}', + ); + + mockCheckCommandPermissions.mockImplementation((cmd) => ({ + allAllowed: !cmd.includes('rm'), + disallowedCommands: cmd.includes('rm') ? [cmd] : [], + })); + + try { + await processor.process(prompt, context); + expect.fail('Should have thrown ConfirmationRequiredError'); + } catch (e) { + expect(e).toBeInstanceOf(ConfirmationRequiredError); + if (e instanceof ConfirmationRequiredError) { + expect(e.commandsToConfirm).toEqual(['rm -rf /']); + } + } + }); + + it('should execute all commands if they are on the session allowlist', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'Run !{cmd1} and !{cmd2}', + ); + + // Add commands to the session allowlist + context.session.sessionShellAllowlist = new Set(['cmd1', 'cmd2']); + + // checkCommandPermissions should now pass for these + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + + mockShellExecute + .mockReturnValueOnce({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'output1' }), + }) + .mockReturnValueOnce({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'output2' }), + }); + + const result = await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + 'cmd1', + expect.any(Object), + context.session.sessionShellAllowlist, + ); + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + 'cmd2', + expect.any(Object), + context.session.sessionShellAllowlist, + ); + expect(mockShellExecute).toHaveBeenCalledTimes(2); + expect(result).toEqual([{ text: 'Run output1 and output2' }]); + }); + + it('should trim whitespace from the command inside the injection before interpolation', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'Files: !{ ls {{args}} -l }', + ); + + const rawArgs = context.invocation!.args; + + const expectedEscapedArgs = getExpectedEscapedArgForPlatform(rawArgs); + + const expectedCommand = `ls ${expectedEscapedArgs} -l`; + + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'total 0' }), + }); + + await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + expectedCommand, + expect.any(Object), + context.session.sessionShellAllowlist, + ); + expect(mockShellExecute).toHaveBeenCalledWith( + expectedCommand, + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + }); + + it('should handle an empty command inside the injection gracefully (skips execution)', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = + createPromptPipelineContent('This is weird: !{}'); + + const result = await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).not.toHaveBeenCalled(); + expect(mockShellExecute).not.toHaveBeenCalled(); + + // It replaces !{} with an empty string. + expect(result).toEqual([{ text: 'This is weird: ' }]); + }); + + describe('Error Reporting', () => { + it('should append exit code and command name on failure', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = + createPromptPipelineContent('!{cmd}'); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ + ...SUCCESS_RESULT, + output: 'some error output', + stderr: '', + exitCode: 1, + }), + }); + + const result = await processor.process(prompt, context); + + expect(result).toEqual([ + { + text: "some error output\n[Shell command 'cmd' exited with code 1]", + }, + ]); + }); + + it('should append signal info and command name if terminated by signal', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = + createPromptPipelineContent('!{cmd}'); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ + ...SUCCESS_RESULT, + output: 'output', + stderr: '', + exitCode: null, + signal: 'SIGTERM', + }), + }); + + const result = await processor.process(prompt, context); + + expect(result).toEqual([ + { + text: "output\n[Shell command 'cmd' terminated by signal SIGTERM]", + }, + ]); + }); + + it('should throw a detailed error if the shell fails to spawn', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = + createPromptPipelineContent('!{bad-command}'); + const spawnError = new Error('spawn EACCES'); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ + ...SUCCESS_RESULT, + stdout: '', + stderr: '', + exitCode: null, + error: spawnError, + aborted: false, + }), + }); + + await expect(processor.process(prompt, context)).rejects.toThrow( + "Failed to start shell command in 'test-command': spawn EACCES. Command: bad-command", + ); + }); + + it('should report abort status with command name if aborted', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + '!{long-running-command}', + ); + const spawnError = new Error('Aborted'); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ + ...SUCCESS_RESULT, + output: 'partial output', + stderr: '', + exitCode: null, + error: spawnError, + aborted: true, // Key difference + }), + }); + + const result = await processor.process(prompt, context); + expect(result).toEqual([ + { + text: "partial output\n[Shell command 'long-running-command' aborted]", + }, + ]); + }); + }); + + describe('Context-Aware Argument Interpolation ({{args}})', () => { + const rawArgs = 'user input'; + + beforeEach(() => { + // Update context for these tests to use specific arguments + context.invocation!.args = rawArgs; + }); + + it('should perform raw replacement if no shell injections are present (optimization path)', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'The user said: {{args}}', + ); + + const result = await processor.process(prompt, context); + + expect(result).toEqual([{ text: `The user said: ${rawArgs}` }]); + expect(mockShellExecute).not.toHaveBeenCalled(); + }); + + it('should perform raw replacement outside !{} blocks', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'Outside: {{args}}. Inside: !{echo "hello"}', + ); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'hello' }), + }); + + const result = await processor.process(prompt, context); + + expect(result).toEqual([{ text: `Outside: ${rawArgs}. Inside: hello` }]); + }); + + it('should perform escaped replacement inside !{} blocks', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'Command: !{grep {{args}} file.txt}', + ); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'match found' }), + }); + + const result = await processor.process(prompt, context); + + const expectedEscapedArgs = getExpectedEscapedArgForPlatform(rawArgs); + const expectedCommand = `grep ${expectedEscapedArgs} file.txt`; + + expect(mockShellExecute).toHaveBeenCalledWith( + expectedCommand, + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + + expect(result).toEqual([{ text: 'Command: match found' }]); + }); + + it('should handle both raw (outside) and escaped (inside) injection simultaneously', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'User "({{args}})" requested search: !{search {{args}}}', + ); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'results' }), + }); + + const result = await processor.process(prompt, context); + + const expectedEscapedArgs = getExpectedEscapedArgForPlatform(rawArgs); + const expectedCommand = `search ${expectedEscapedArgs}`; + expect(mockShellExecute).toHaveBeenCalledWith( + expectedCommand, + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + + expect(result).toEqual([ + { text: `User "(${rawArgs})" requested search: results` }, + ]); + }); + + it('should perform security checks on the final, resolved (escaped) command', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = + createPromptPipelineContent('!{rm {{args}}}'); + + const expectedEscapedArgs = getExpectedEscapedArgForPlatform(rawArgs); + const expectedResolvedCommand = `rm ${expectedEscapedArgs}`; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: false, + disallowedCommands: [expectedResolvedCommand], + isHardDenial: false, + }); + + await expect(processor.process(prompt, context)).rejects.toThrow( + ConfirmationRequiredError, + ); + + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + expectedResolvedCommand, + expect.any(Object), + context.session.sessionShellAllowlist, + ); + }); + + it('should report the resolved command if a hard denial occurs', async () => { + const processor = new ShellProcessor('test-command'); + const prompt: PromptPipelineContent = + createPromptPipelineContent('!{rm {{args}}}'); + const expectedEscapedArgs = getExpectedEscapedArgForPlatform(rawArgs); + const expectedResolvedCommand = `rm ${expectedEscapedArgs}`; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: false, + disallowedCommands: [expectedResolvedCommand], + isHardDenial: true, + blockReason: 'It is forbidden.', + }); + + await expect(processor.process(prompt, context)).rejects.toThrow( + `Blocked command: "${expectedResolvedCommand}". Reason: It is forbidden.`, + ); + }); + }); + describe('Real-World Escaping Scenarios', () => { + it('should correctly handle multiline arguments', async () => { + const processor = new ShellProcessor('test-command'); + const multilineArgs = 'first line\nsecond line'; + context.invocation!.args = multilineArgs; + const prompt: PromptPipelineContent = createPromptPipelineContent( + 'Commit message: !{git commit -m {{args}}}', + ); + + const expectedEscapedArgs = + getExpectedEscapedArgForPlatform(multilineArgs); + const expectedCommand = `git commit -m ${expectedEscapedArgs}`; + + await processor.process(prompt, context); + + expect(mockShellExecute).toHaveBeenCalledWith( + expectedCommand, + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + }); + + it.each([ + { name: 'spaces', input: 'file with spaces.txt' }, + { name: 'double quotes', input: 'a "quoted" string' }, + { name: 'single quotes', input: "it's a string" }, + { name: 'command substitution (backticks)', input: '`reboot`' }, + { name: 'command substitution (dollar)', input: '$(reboot)' }, + { name: 'variable expansion', input: '$HOME' }, + { name: 'command chaining (semicolon)', input: 'a; reboot' }, + { name: 'command chaining (ampersand)', input: 'a && reboot' }, + ])('should safely escape args containing $name', async ({ input }) => { + const processor = new ShellProcessor('test-command'); + context.invocation!.args = input; + const prompt: PromptPipelineContent = + createPromptPipelineContent('!{echo {{args}}}'); + + const expectedEscapedArgs = getExpectedEscapedArgForPlatform(input); + const expectedCommand = `echo ${expectedEscapedArgs}`; + + await processor.process(prompt, context); + + expect(mockShellExecute).toHaveBeenCalledWith( + expectedCommand, + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + }); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/services/prompt-processors/shellProcessor.ts b/projects/gemini-cli/packages/cli/src/services/prompt-processors/shellProcessor.ts new file mode 100644 index 0000000000000000000000000000000000000000..d45e8f8a113129ffc2d5e4dbb3e0b109085fb214 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/prompt-processors/shellProcessor.ts @@ -0,0 +1,207 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + ApprovalMode, + checkCommandPermissions, + escapeShellArg, + getShellConfiguration, + ShellExecutionService, + flatMapTextParts, +} from '@google/gemini-cli-core'; + +import type { CommandContext } from '../../ui/commands/types.js'; +import type { IPromptProcessor, PromptPipelineContent } from './types.js'; +import { + SHELL_INJECTION_TRIGGER, + SHORTHAND_ARGS_PLACEHOLDER, +} from './types.js'; +import { extractInjections, type Injection } from './injectionParser.js'; + +export class ConfirmationRequiredError extends Error { + constructor( + message: string, + public commandsToConfirm: string[], + ) { + super(message); + this.name = 'ConfirmationRequiredError'; + } +} + +/** + * Represents a single detected shell injection site in the prompt, + * after resolution of arguments. Extends the base Injection interface. + */ +interface ResolvedShellInjection extends Injection { + /** The command after {{args}} has been escaped and substituted. */ + resolvedCommand?: string; +} + +/** + * Handles prompt interpolation, including shell command execution (`!{...}`) + * and context-aware argument injection (`{{args}}`). + * + * This processor ensures that: + * 1. `{{args}}` outside `!{...}` are replaced with raw input. + * 2. `{{args}}` inside `!{...}` are replaced with shell-escaped input. + * 3. Shell commands are executed securely after argument substitution. + * 4. Parsing correctly handles nested braces. + */ +export class ShellProcessor implements IPromptProcessor { + constructor(private readonly commandName: string) {} + + async process( + prompt: PromptPipelineContent, + context: CommandContext, + ): Promise { + return flatMapTextParts(prompt, (text) => + this.processString(text, context), + ); + } + + private async processString( + prompt: string, + context: CommandContext, + ): Promise { + const userArgsRaw = context.invocation?.args || ''; + + if (!prompt.includes(SHELL_INJECTION_TRIGGER)) { + return [ + { text: prompt.replaceAll(SHORTHAND_ARGS_PLACEHOLDER, userArgsRaw) }, + ]; + } + + const config = context.services.config; + if (!config) { + throw new Error( + `Security configuration not loaded. Cannot verify shell command permissions for '${this.commandName}'. Aborting.`, + ); + } + const { sessionShellAllowlist } = context.session; + + const injections = extractInjections( + prompt, + SHELL_INJECTION_TRIGGER, + this.commandName, + ); + + // If extractInjections found no closed blocks (and didn't throw), treat as raw. + if (injections.length === 0) { + return [ + { text: prompt.replaceAll(SHORTHAND_ARGS_PLACEHOLDER, userArgsRaw) }, + ]; + } + + const { shell } = getShellConfiguration(); + const userArgsEscaped = escapeShellArg(userArgsRaw, shell); + + const resolvedInjections: ResolvedShellInjection[] = injections.map( + (injection) => { + const command = injection.content; + + if (command === '') { + return { ...injection, resolvedCommand: undefined }; + } + + const resolvedCommand = command.replaceAll( + SHORTHAND_ARGS_PLACEHOLDER, + userArgsEscaped, + ); + return { ...injection, resolvedCommand }; + }, + ); + + const commandsToConfirm = new Set(); + for (const injection of resolvedInjections) { + const command = injection.resolvedCommand; + + if (!command) continue; + + // Security check on the final, escaped command string. + const { allAllowed, disallowedCommands, blockReason, isHardDenial } = + checkCommandPermissions(command, config, sessionShellAllowlist); + + if (!allAllowed) { + if (isHardDenial) { + throw new Error( + `${this.commandName} cannot be run. Blocked command: "${command}". Reason: ${blockReason || 'Blocked by configuration.'}`, + ); + } + + // If not a hard denial, respect YOLO mode and auto-approve. + if (config.getApprovalMode() !== ApprovalMode.YOLO) { + disallowedCommands.forEach((uc) => commandsToConfirm.add(uc)); + } + } + } + + // Handle confirmation requirements. + if (commandsToConfirm.size > 0) { + throw new ConfirmationRequiredError( + 'Shell command confirmation required', + Array.from(commandsToConfirm), + ); + } + + let processedPrompt = ''; + let lastIndex = 0; + + for (const injection of resolvedInjections) { + // Append the text segment BEFORE the injection, substituting {{args}} with RAW input. + const segment = prompt.substring(lastIndex, injection.startIndex); + processedPrompt += segment.replaceAll( + SHORTHAND_ARGS_PLACEHOLDER, + userArgsRaw, + ); + + // Execute the resolved command (which already has ESCAPED input). + if (injection.resolvedCommand) { + const { result } = await ShellExecutionService.execute( + injection.resolvedCommand, + config.getTargetDir(), + () => {}, + new AbortController().signal, + config.getShouldUseNodePtyShell(), + ); + + const executionResult = await result; + + // Handle Spawn Errors + if (executionResult.error && !executionResult.aborted) { + throw new Error( + `Failed to start shell command in '${this.commandName}': ${executionResult.error.message}. Command: ${injection.resolvedCommand}`, + ); + } + + // Append the output, making stderr explicit for the model. + processedPrompt += executionResult.output; + + // Append a status message if the command did not succeed. + if (executionResult.aborted) { + processedPrompt += `\n[Shell command '${injection.resolvedCommand}' aborted]`; + } else if ( + executionResult.exitCode !== 0 && + executionResult.exitCode !== null + ) { + processedPrompt += `\n[Shell command '${injection.resolvedCommand}' exited with code ${executionResult.exitCode}]`; + } else if (executionResult.signal !== null) { + processedPrompt += `\n[Shell command '${injection.resolvedCommand}' terminated by signal ${executionResult.signal}]`; + } + } + + lastIndex = injection.endIndex; + } + + // Append the remaining text AFTER the last injection, substituting {{args}} with RAW input. + const finalSegment = prompt.substring(lastIndex); + processedPrompt += finalSegment.replaceAll( + SHORTHAND_ARGS_PLACEHOLDER, + userArgsRaw, + ); + + return [{ text: processedPrompt }]; + } +} diff --git a/projects/gemini-cli/packages/cli/src/services/prompt-processors/types.ts b/projects/gemini-cli/packages/cli/src/services/prompt-processors/types.ts new file mode 100644 index 0000000000000000000000000000000000000000..c687657443fe038e648fc9bf791b655b7c196e3a --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/prompt-processors/types.ts @@ -0,0 +1,54 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { CommandContext } from '../../ui/commands/types.js'; +import type { PartUnion } from '@google/genai'; + +/** + * Defines the input/output type for prompt processors. + */ +export type PromptPipelineContent = PartUnion[]; + +/** + * Defines the interface for a prompt processor, a module that can transform + * a prompt string before it is sent to the model. Processors are chained + * together to create a processing pipeline. + */ +export interface IPromptProcessor { + /** + * Processes a prompt input (which may contain text and multi-modal parts), + * applying a specific transformation as part of a pipeline. + * + * @param prompt The current state of the prompt string. This may have been + * modified by previous processors in the pipeline. + * @param context The full command context, providing access to invocation + * details (like `context.invocation.raw` and `context.invocation.args`), + * application services, and UI handlers. + * @returns A promise that resolves to the transformed prompt string, which + * will be passed to the next processor or, if it's the last one, sent to the model. + */ + process( + prompt: PromptPipelineContent, + context: CommandContext, + ): Promise; +} + +/** + * The placeholder string for shorthand argument injection in custom commands. + * When used outside of !{...}, arguments are injected raw. + * When used inside !{...}, arguments are shell-escaped. + */ +export const SHORTHAND_ARGS_PLACEHOLDER = '{{args}}'; + +/** + * The trigger string for shell command injection in custom commands. + */ +export const SHELL_INJECTION_TRIGGER = '!{'; + +/** + * The trigger string for at file injection in custom commands. + */ +export const AT_FILE_INJECTION_TRIGGER = '@{'; diff --git a/projects/gemini-cli/packages/cli/src/services/types.ts b/projects/gemini-cli/packages/cli/src/services/types.ts new file mode 100644 index 0000000000000000000000000000000000000000..13a87687ee4a0aff423d68943b6babcc796c4545 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/services/types.ts @@ -0,0 +1,24 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { SlashCommand } from '../ui/commands/types.js'; + +/** + * Defines the contract for any class that can load and provide slash commands. + * This allows the CommandService to be extended with new command sources + * (e.g., file-based, remote APIs) without modification. + * + * Loaders should receive any necessary dependencies (like Config) via their + * constructor. + */ +export interface ICommandLoader { + /** + * Discovers and returns a list of slash commands from the loader's source. + * @param signal An AbortSignal to allow cancellation. + * @returns A promise that resolves to an array of SlashCommand objects. + */ + loadCommands(signal: AbortSignal): Promise; +} diff --git a/projects/gemini-cli/packages/cli/src/test-utils/customMatchers.ts b/projects/gemini-cli/packages/cli/src/test-utils/customMatchers.ts new file mode 100644 index 0000000000000000000000000000000000000000..2a1b275ad2b5caf5ce76ba8fea073388b930134c --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/test-utils/customMatchers.ts @@ -0,0 +1,66 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/// + +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { Assertion } from 'vitest'; +import { expect } from 'vitest'; +import type { TextBuffer } from '../ui/components/shared/text-buffer.js'; + +// RegExp to detect invalid characters: backspace, and ANSI escape codes +// eslint-disable-next-line no-control-regex +const invalidCharsRegex = /[\b\x1b]/; + +function toHaveOnlyValidCharacters(this: Assertion, buffer: TextBuffer) { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const { isNot } = this as any; + let pass = true; + const invalidLines: Array<{ line: number; content: string }> = []; + + for (let i = 0; i < buffer.lines.length; i++) { + const line = buffer.lines[i]; + if (line.includes('\n')) { + pass = false; + invalidLines.push({ line: i, content: line }); + break; // Fail fast on newlines + } + if (invalidCharsRegex.test(line)) { + pass = false; + invalidLines.push({ line: i, content: line }); + } + } + + return { + pass, + message: () => + `Expected buffer ${isNot ? 'not ' : ''}to have only valid characters, but found invalid characters in lines:\n${invalidLines + .map((l) => ` [${l.line}]: "${l.content}"`) /* This line was changed */ + .join('\n')}`, + actual: buffer.lines, + expected: 'Lines with no line breaks, backspaces, or escape codes.', + }; +} + +expect.extend({ + toHaveOnlyValidCharacters, + // eslint-disable-next-line @typescript-eslint/no-explicit-any +} as any); + +// Extend Vitest's `expect` interface with the custom matcher's type definition. +declare module 'vitest' { + interface Assertion { + toHaveOnlyValidCharacters(): T; + } + interface AsymmetricMatchersContaining { + toHaveOnlyValidCharacters(): void; + } +} diff --git a/projects/gemini-cli/packages/cli/src/test-utils/mockCommandContext.test.ts b/projects/gemini-cli/packages/cli/src/test-utils/mockCommandContext.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..310bf748645270564d8f8832d3ea68921386cd28 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/test-utils/mockCommandContext.test.ts @@ -0,0 +1,62 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect } from 'vitest'; +import { createMockCommandContext } from './mockCommandContext.js'; + +describe('createMockCommandContext', () => { + it('should return a valid CommandContext object with default mocks', () => { + const context = createMockCommandContext(); + + // Just a few spot checks to ensure the structure is correct + // and functions are mocks. + expect(context).toBeDefined(); + expect(context.ui.addItem).toBeInstanceOf(Function); + expect(vi.isMockFunction(context.ui.addItem)).toBe(true); + }); + + it('should apply top-level overrides correctly', () => { + const mockClear = vi.fn(); + const overrides = { + ui: { + clear: mockClear, + }, + }; + + const context = createMockCommandContext(overrides); + + // Call the function to see if the override was used + context.ui.clear(); + + // Assert that our specific mock was called, not the default + expect(mockClear).toHaveBeenCalled(); + // And that other defaults are still in place + expect(vi.isMockFunction(context.ui.addItem)).toBe(true); + }); + + it('should apply deeply nested overrides correctly', () => { + // This is the most important test for factory's logic. + const mockConfig = { + getProjectRoot: () => '/test/project', + getModel: () => 'gemini-pro', + }; + + const overrides = { + services: { + config: mockConfig, + }, + }; + + const context = createMockCommandContext(overrides); + + expect(context.services.config).toBeDefined(); + expect(context.services.config?.getModel()).toBe('gemini-pro'); + expect(context.services.config?.getProjectRoot()).toBe('/test/project'); + + // Verify a default property on the same nested object is still there + expect(context.services.logger).toBeDefined(); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/test-utils/mockCommandContext.ts b/projects/gemini-cli/packages/cli/src/test-utils/mockCommandContext.ts new file mode 100644 index 0000000000000000000000000000000000000000..648e3c8fd62c969a7b9b6d8d7501ed115011700b --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/test-utils/mockCommandContext.ts @@ -0,0 +1,104 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi } from 'vitest'; +import type { CommandContext } from '../ui/commands/types.js'; +import type { LoadedSettings } from '../config/settings.js'; +import type { GitService } from '@google/gemini-cli-core'; +import type { SessionStatsState } from '../ui/contexts/SessionContext.js'; + +// A utility type to make all properties of an object, and its nested objects, partial. +type DeepPartial = T extends object + ? { + [P in keyof T]?: DeepPartial; + } + : T; + +/** + * Creates a deep, fully-typed mock of the CommandContext for use in tests. + * All functions are pre-mocked with `vi.fn()`. + * + * @param overrides - A deep partial object to override any default mock values. + * @returns A complete, mocked CommandContext object. + */ +export const createMockCommandContext = ( + overrides: DeepPartial = {}, +): CommandContext => { + const defaultMocks: CommandContext = { + invocation: { + raw: '', + name: '', + args: '', + }, + services: { + config: null, + settings: { merged: {} } as LoadedSettings, + git: undefined as GitService | undefined, + logger: { + log: vi.fn(), + logMessage: vi.fn(), + saveCheckpoint: vi.fn(), + loadCheckpoint: vi.fn().mockResolvedValue([]), + // eslint-disable-next-line @typescript-eslint/no-explicit-any + } as any, // Cast because Logger is a class. + }, + ui: { + addItem: vi.fn(), + clear: vi.fn(), + setDebugMessage: vi.fn(), + pendingItem: null, + setPendingItem: vi.fn(), + loadHistory: vi.fn(), + toggleCorgiMode: vi.fn(), + toggleVimEnabled: vi.fn(), + // eslint-disable-next-line @typescript-eslint/no-explicit-any + } as any, + session: { + sessionShellAllowlist: new Set(), + stats: { + sessionStartTime: new Date(), + lastPromptTokenCount: 0, + metrics: { + models: {}, + tools: { + totalCalls: 0, + totalSuccess: 0, + totalFail: 0, + totalDurationMs: 0, + totalDecisions: { accept: 0, reject: 0, modify: 0 }, + byName: {}, + }, + }, + } as SessionStatsState, + }, + }; + + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const merge = (target: any, source: any): any => { + const output = { ...target }; + + for (const key in source) { + if (Object.prototype.hasOwnProperty.call(source, key)) { + const sourceValue = source[key]; + const targetValue = output[key]; + + if ( + // We only want to recursivlty merge plain objects + Object.prototype.toString.call(sourceValue) === '[object Object]' && + Object.prototype.toString.call(targetValue) === '[object Object]' + ) { + output[key] = merge(targetValue, sourceValue); + } else { + // If not, we do a direct assignment. This preserves Date objects and others. + output[key] = sourceValue; + } + } + } + return output; + }; + + return merge(defaultMocks, overrides); +}; diff --git a/projects/gemini-cli/packages/cli/src/test-utils/render.tsx b/projects/gemini-cli/packages/cli/src/test-utils/render.tsx new file mode 100644 index 0000000000000000000000000000000000000000..0aff7c744911d1b6ad472263bedc441814610a6f --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/test-utils/render.tsx @@ -0,0 +1,18 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { render } from 'ink-testing-library'; +import type React from 'react'; +import { KeypressProvider } from '../ui/contexts/KeypressContext.js'; + +export const renderWithProviders = ( + component: React.ReactElement, +): ReturnType => + render( + + {component} + , + ); diff --git a/projects/gemini-cli/packages/cli/src/ui/App.test.tsx b/projects/gemini-cli/packages/cli/src/ui/App.test.tsx new file mode 100644 index 0000000000000000000000000000000000000000..7fffa157434be7c75a94f3cdab1512fc42e23d31 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/ui/App.test.tsx @@ -0,0 +1,1689 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { Mock } from 'vitest'; +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { waitFor } from '@testing-library/react'; +import { renderWithProviders } from '../test-utils/render.js'; +import { AppWrapper as App } from './App.js'; +import type { + AccessibilitySettings, + MCPServerConfig, + ToolRegistry, + SandboxConfig, + GeminiClient, + AuthType, +} from '@google/gemini-cli-core'; +import { + ApprovalMode, + ideContext, + Config as ServerConfig, +} from '@google/gemini-cli-core'; +import type { SettingsFile, Settings } from '../config/settings.js'; +import { LoadedSettings } from '../config/settings.js'; +import process from 'node:process'; +import { useGeminiStream } from './hooks/useGeminiStream.js'; +import { useConsoleMessages } from './hooks/useConsoleMessages.js'; +import type { ConsoleMessageItem } from './types.js'; +import { StreamingState } from './types.js'; +import { Tips } from './components/Tips.js'; +import type { UpdateObject } from './utils/updateCheck.js'; +import { checkForUpdates } from './utils/updateCheck.js'; +import { EventEmitter } from 'node:events'; +import { updateEventEmitter } from '../utils/updateEventEmitter.js'; +import * as auth from '../config/auth.js'; +import * as useTerminalSize from './hooks/useTerminalSize.js'; + +// Define a more complete mock server config based on actual Config +interface MockServerConfig { + apiKey: string; + model: string; + sandbox?: SandboxConfig; + targetDir: string; + debugMode: boolean; + question?: string; + fullContext: boolean; + coreTools?: string[]; + toolDiscoveryCommand?: string; + toolCallCommand?: string; + mcpServerCommand?: string; + mcpServers?: Record; // Use imported MCPServerConfig + userAgent: string; + userMemory: string; + geminiMdFileCount: number; + approvalMode: ApprovalMode; + vertexai?: boolean; + showMemoryUsage?: boolean; + accessibility?: AccessibilitySettings; + embeddingModel: string; + + getApiKey: Mock<() => string>; + getModel: Mock<() => string>; + getSandbox: Mock<() => SandboxConfig | undefined>; + getTargetDir: Mock<() => string>; + getToolRegistry: Mock<() => ToolRegistry>; // Use imported ToolRegistry type + getDebugMode: Mock<() => boolean>; + getQuestion: Mock<() => string | undefined>; + getFullContext: Mock<() => boolean>; + getCoreTools: Mock<() => string[] | undefined>; + getToolDiscoveryCommand: Mock<() => string | undefined>; + getToolCallCommand: Mock<() => string | undefined>; + getMcpServerCommand: Mock<() => string | undefined>; + getMcpServers: Mock<() => Record | undefined>; + getExtensions: Mock< + () => Array<{ name: string; version: string; isActive: boolean }> + >; + getBlockedMcpServers: Mock< + () => Array<{ name: string; extensionName: string }> + >; + getUserAgent: Mock<() => string>; + getUserMemory: Mock<() => string>; + setUserMemory: Mock<(newUserMemory: string) => void>; + getGeminiMdFileCount: Mock<() => number>; + setGeminiMdFileCount: Mock<(count: number) => void>; + getApprovalMode: Mock<() => ApprovalMode>; + setApprovalMode: Mock<(skip: ApprovalMode) => void>; + getVertexAI: Mock<() => boolean | undefined>; + getShowMemoryUsage: Mock<() => boolean>; + getAccessibility: Mock<() => AccessibilitySettings>; + getProjectRoot: Mock<() => string | undefined>; + getAllGeminiMdFilenames: Mock<() => string[]>; + getGeminiClient: Mock<() => GeminiClient | undefined>; + getUserTier: Mock<() => Promise>; + getIdeClient: Mock<() => { getCurrentIde: Mock<() => string | undefined> }>; + getScreenReader: Mock<() => boolean>; +} + +// Mock @google/gemini-cli-core and its Config class +vi.mock('@google/gemini-cli-core', async (importOriginal) => { + const actualCore = + await importOriginal(); + const ConfigClassMock = vi + .fn() + .mockImplementation((optionsPassedToConstructor) => { + const opts = { ...optionsPassedToConstructor }; // Clone + // Basic mock structure, will be extended by the instance in tests + return { + apiKey: opts.apiKey || 'test-key', + model: opts.model || 'test-model-in-mock-factory', + sandbox: opts.sandbox, + targetDir: opts.targetDir || '/test/dir', + debugMode: opts.debugMode || false, + question: opts.question, + fullContext: opts.fullContext ?? false, + coreTools: opts.coreTools, + toolDiscoveryCommand: opts.toolDiscoveryCommand, + toolCallCommand: opts.toolCallCommand, + mcpServerCommand: opts.mcpServerCommand, + mcpServers: opts.mcpServers, + userAgent: opts.userAgent || 'test-agent', + userMemory: opts.userMemory || '', + geminiMdFileCount: opts.geminiMdFileCount || 0, + approvalMode: opts.approvalMode ?? ApprovalMode.DEFAULT, + vertexai: opts.vertexai, + showMemoryUsage: opts.showMemoryUsage ?? false, + accessibility: opts.accessibility ?? {}, + embeddingModel: opts.embeddingModel || 'test-embedding-model', + + getApiKey: vi.fn(() => opts.apiKey || 'test-key'), + getModel: vi.fn(() => opts.model || 'test-model-in-mock-factory'), + getSandbox: vi.fn(() => opts.sandbox), + getTargetDir: vi.fn(() => opts.targetDir || '/test/dir'), + getToolRegistry: vi.fn(() => ({}) as ToolRegistry), // Simple mock + getDebugMode: vi.fn(() => opts.debugMode || false), + getQuestion: vi.fn(() => opts.question), + getFullContext: vi.fn(() => opts.fullContext ?? false), + getCoreTools: vi.fn(() => opts.coreTools), + getToolDiscoveryCommand: vi.fn(() => opts.toolDiscoveryCommand), + getToolCallCommand: vi.fn(() => opts.toolCallCommand), + getMcpServerCommand: vi.fn(() => opts.mcpServerCommand), + getMcpServers: vi.fn(() => opts.mcpServers), + getPromptRegistry: vi.fn(), + getExtensions: vi.fn(() => []), + getBlockedMcpServers: vi.fn(() => []), + getUserAgent: vi.fn(() => opts.userAgent || 'test-agent'), + getUserMemory: vi.fn(() => opts.userMemory || ''), + setUserMemory: vi.fn(), + getGeminiMdFileCount: vi.fn(() => opts.geminiMdFileCount || 0), + setGeminiMdFileCount: vi.fn(), + getApprovalMode: vi.fn(() => opts.approvalMode ?? ApprovalMode.DEFAULT), + setApprovalMode: vi.fn(), + getVertexAI: vi.fn(() => opts.vertexai), + getShowMemoryUsage: vi.fn(() => opts.showMemoryUsage ?? false), + getAccessibility: vi.fn(() => opts.accessibility ?? {}), + getProjectRoot: vi.fn(() => opts.targetDir), + getEnablePromptCompletion: vi.fn(() => false), + getGeminiClient: vi.fn(() => ({ + getUserTier: vi.fn(), + })), + getCheckpointingEnabled: vi.fn(() => opts.checkpointing ?? true), + getAllGeminiMdFilenames: vi.fn(() => ['GEMINI.md']), + setFlashFallbackHandler: vi.fn(), + getSessionId: vi.fn(() => 'test-session-id'), + getUserTier: vi.fn().mockResolvedValue(undefined), + getIdeMode: vi.fn(() => true), + getWorkspaceContext: vi.fn(() => ({ + getDirectories: vi.fn(() => []), + })), + getIdeClient: vi.fn(() => ({ + getCurrentIde: vi.fn(() => 'vscode'), + getDetectedIdeDisplayName: vi.fn(() => 'VSCode'), + addStatusChangeListener: vi.fn(), + removeStatusChangeListener: vi.fn(), + getConnectionStatus: vi.fn(() => 'connected'), + })), + isTrustedFolder: vi.fn(() => true), + getScreenReader: vi.fn(() => false), + getFolderTrustFeature: vi.fn(() => false), + getFolderTrust: vi.fn(() => false), + }; + }); + + const ideContextMock = { + getIdeContext: vi.fn(), + subscribeToIdeContext: vi.fn(() => vi.fn()), // subscribe returns an unsubscribe function + }; + + return { + ...actualCore, + Config: ConfigClassMock, + MCPServerConfig: actualCore.MCPServerConfig, + getAllGeminiMdFilenames: vi.fn(() => ['GEMINI.md']), + ideContext: ideContextMock, + isGitRepository: vi.fn(), + }; +}); + +// Mock heavy dependencies or those with side effects +vi.mock('./hooks/useGeminiStream', () => ({ + useGeminiStream: vi.fn(() => ({ + streamingState: 'Idle', + submitQuery: vi.fn(), + initError: null, + pendingHistoryItems: [], + thought: null, + })), +})); + +vi.mock('./hooks/useAuthCommand', () => ({ + useAuthCommand: vi.fn(() => ({ + isAuthDialogOpen: false, + openAuthDialog: vi.fn(), + handleAuthSelect: vi.fn(), + handleAuthHighlight: vi.fn(), + isAuthenticating: false, + cancelAuthentication: vi.fn(), + })), +})); + +vi.mock('./hooks/useFolderTrust', () => ({ + useFolderTrust: vi.fn(() => ({ + isFolderTrustDialogOpen: false, + handleFolderTrustSelect: vi.fn(), + isRestarting: false, + })), +})); + +vi.mock('./hooks/useLogger', () => ({ + useLogger: vi.fn(() => ({ + getPreviousUserMessages: vi.fn().mockResolvedValue([]), + })), +})); + +vi.mock('./hooks/useInputHistoryStore.js', () => ({ + useInputHistoryStore: vi.fn(() => ({ + inputHistory: [], + addInput: vi.fn(), + initializeFromLogger: vi.fn(), + })), +})); + +vi.mock('./hooks/useConsoleMessages.js', () => ({ + useConsoleMessages: vi.fn(() => ({ + consoleMessages: [], + handleNewMessage: vi.fn(), + clearConsoleMessages: vi.fn(), + })), +})); + +vi.mock('../config/config.js', async (importOriginal) => { + const actual = await importOriginal(); + return { + // @ts-expect-error - this is fine + ...actual, + loadHierarchicalGeminiMemory: vi + .fn() + .mockResolvedValue({ memoryContent: '', fileCount: 0 }), + }; +}); + +vi.mock('./components/Tips.js', () => ({ + Tips: vi.fn(() => null), +})); + +vi.mock('./components/Header.js', () => ({ + Header: vi.fn(() => null), +})); + +vi.mock('./utils/updateCheck.js', () => ({ + checkForUpdates: vi.fn(), +})); + +vi.mock('../config/auth.js', () => ({ + validateAuthMethod: vi.fn(), +})); + +vi.mock('../hooks/useTerminalSize.js', () => ({ + useTerminalSize: vi.fn(), +})); + +const mockedCheckForUpdates = vi.mocked(checkForUpdates); +const { isGitRepository: mockedIsGitRepository } = vi.mocked( + await import('@google/gemini-cli-core'), +); + +vi.mock('node:child_process'); + +describe('App UI', () => { + let mockConfig: MockServerConfig; + let mockSettings: LoadedSettings; + let mockVersion: string; + let currentUnmount: (() => void) | undefined; + + const createMockSettings = ( + settings: { + system?: Partial; + user?: Partial; + workspace?: Partial; + } = {}, + ): LoadedSettings => { + const systemSettingsFile: SettingsFile = { + path: '/system/settings.json', + settings: settings.system || {}, + }; + const systemDefaultsFile: SettingsFile = { + path: '/system/system-defaults.json', + settings: {}, + }; + const userSettingsFile: SettingsFile = { + path: '/user/settings.json', + settings: settings.user || {}, + }; + const workspaceSettingsFile: SettingsFile = { + path: '/workspace/.gemini/settings.json', + settings: settings.workspace || {}, + }; + return new LoadedSettings( + systemSettingsFile, + systemDefaultsFile, + userSettingsFile, + workspaceSettingsFile, + [], + true, + new Set(), + ); + }; + + beforeEach(() => { + vi.spyOn(useTerminalSize, 'useTerminalSize').mockReturnValue({ + columns: 120, + rows: 24, + }); + + const ServerConfigMocked = vi.mocked(ServerConfig, true); + mockConfig = new ServerConfigMocked({ + embeddingModel: 'test-embedding-model', + sandbox: undefined, + targetDir: '/test/dir', + debugMode: false, + userMemory: '', + geminiMdFileCount: 0, + showMemoryUsage: false, + sessionId: 'test-session-id', + cwd: '/tmp', + model: 'model', + }) as unknown as MockServerConfig; + mockVersion = '0.0.0-test'; + + // Ensure the getShowMemoryUsage mock function is specifically set up if not covered by constructor mock + if (!mockConfig.getShowMemoryUsage) { + mockConfig.getShowMemoryUsage = vi.fn(() => false); + } + mockConfig.getShowMemoryUsage.mockReturnValue(false); // Default for most tests + + // Ensure a theme is set so the theme dialog does not appear. + mockSettings = createMockSettings({ workspace: { theme: 'Default' } }); + + // Ensure getWorkspaceContext is available if not added by the constructor + if (!mockConfig.getWorkspaceContext) { + mockConfig.getWorkspaceContext = vi.fn(() => ({ + getDirectories: vi.fn(() => ['/test/dir']), + })); + } + vi.mocked(ideContext.getIdeContext).mockReturnValue(undefined); + }); + + afterEach(() => { + if (currentUnmount) { + currentUnmount(); + currentUnmount = undefined; + } + vi.clearAllMocks(); // Clear mocks after each test + }); + + describe('handleAutoUpdate', () => { + let spawnEmitter: EventEmitter; + + beforeEach(async () => { + const { spawn } = await import('node:child_process'); + spawnEmitter = new EventEmitter(); + spawnEmitter.stdout = new EventEmitter(); + spawnEmitter.stderr = new EventEmitter(); + (spawn as vi.Mock).mockReturnValue(spawnEmitter); + }); + + afterEach(() => { + delete process.env.GEMINI_CLI_DISABLE_AUTOUPDATER; + }); + + it('should not start the update process when running from git', async () => { + mockedIsGitRepository.mockResolvedValue(true); + const info: UpdateObject = { + update: { + name: '@google/gemini-cli', + latest: '1.1.0', + current: '1.0.0', + }, + message: 'Gemini CLI update available!', + }; + mockedCheckForUpdates.mockResolvedValue(info); + const { spawn } = await import('node:child_process'); + + const { unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + // Wait for any potential async operations to complete + await waitFor(() => { + expect(spawn).not.toHaveBeenCalled(); + }); + }); + + it('should show a success message when update succeeds', async () => { + mockedIsGitRepository.mockResolvedValue(false); + const info: UpdateObject = { + update: { + name: '@google/gemini-cli', + latest: '1.1.0', + current: '1.0.0', + }, + message: 'Update available', + }; + mockedCheckForUpdates.mockResolvedValue(info); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + updateEventEmitter.emit('update-success', info); + + // Wait for the success message to appear + await waitFor(() => { + expect(lastFrame()).toContain( + 'Update successful! The new version will be used on your next run.', + ); + }); + }); + + it('should show an error message when update fails', async () => { + mockedIsGitRepository.mockResolvedValue(false); + const info: UpdateObject = { + update: { + name: '@google/gemini-cli', + latest: '1.1.0', + current: '1.0.0', + }, + message: 'Update available', + }; + mockedCheckForUpdates.mockResolvedValue(info); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + updateEventEmitter.emit('update-failed', info); + + // Wait for the error message to appear + await waitFor(() => { + expect(lastFrame()).toContain( + 'Automatic update failed. Please try updating manually', + ); + }); + }); + + it('should show an error message when spawn fails', async () => { + mockedIsGitRepository.mockResolvedValue(false); + const info: UpdateObject = { + update: { + name: '@google/gemini-cli', + latest: '1.1.0', + current: '1.0.0', + }, + message: 'Update available', + }; + mockedCheckForUpdates.mockResolvedValue(info); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + // We are testing the App's reaction to an `update-failed` event, + // which is what should be emitted when a spawn error occurs elsewhere. + updateEventEmitter.emit('update-failed', info); + + // Wait for the error message to appear + await waitFor(() => { + expect(lastFrame()).toContain( + 'Automatic update failed. Please try updating manually', + ); + }); + }); + + it('should not auto-update if GEMINI_CLI_DISABLE_AUTOUPDATER is true', async () => { + mockedIsGitRepository.mockResolvedValue(false); + process.env.GEMINI_CLI_DISABLE_AUTOUPDATER = 'true'; + const info: UpdateObject = { + update: { + name: '@google/gemini-cli', + latest: '1.1.0', + current: '1.0.0', + }, + message: 'Update available', + }; + mockedCheckForUpdates.mockResolvedValue(info); + const { spawn } = await import('node:child_process'); + + const { unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + // Wait for any potential async operations to complete + await waitFor(() => { + expect(spawn).not.toHaveBeenCalled(); + }); + }); + }); + + it('should display active file when available', async () => { + vi.mocked(ideContext.getIdeContext).mockReturnValue({ + workspaceState: { + openFiles: [ + { + path: '/path/to/my-file.ts', + isActive: true, + selectedText: 'hello', + timestamp: 0, + }, + ], + }, + }); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).toContain('1 open file (ctrl+g to view)'); + }); + + it('should not display any files when not available', async () => { + vi.mocked(ideContext.getIdeContext).mockReturnValue({ + workspaceState: { + openFiles: [], + }, + }); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).not.toContain('Open File'); + }); + + it('should display active file and other open files', async () => { + vi.mocked(ideContext.getIdeContext).mockReturnValue({ + workspaceState: { + openFiles: [ + { + path: '/path/to/my-file.ts', + isActive: true, + selectedText: 'hello', + timestamp: 0, + }, + { + path: '/path/to/another-file.ts', + isActive: false, + timestamp: 1, + }, + { + path: '/path/to/third-file.ts', + isActive: false, + timestamp: 2, + }, + ], + }, + }); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).toContain('3 open files (ctrl+g to view)'); + }); + + it('should display active file and other context', async () => { + vi.mocked(ideContext.getIdeContext).mockReturnValue({ + workspaceState: { + openFiles: [ + { + path: '/path/to/my-file.ts', + isActive: true, + selectedText: 'hello', + timestamp: 0, + }, + ], + }, + }); + mockConfig.getGeminiMdFileCount.mockReturnValue(1); + mockConfig.getAllGeminiMdFilenames.mockReturnValue(['GEMINI.md']); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).toContain( + 'Using: 1 open file (ctrl+g to view) | 1 GEMINI.md file', + ); + }); + + it('should display default "GEMINI.md" in footer when contextFileName is not set and count is 1', async () => { + mockConfig.getGeminiMdFileCount.mockReturnValue(1); + mockConfig.getAllGeminiMdFilenames.mockReturnValue(['GEMINI.md']); + // For this test, ensure showMemoryUsage is false or debugMode is false if it relies on that + mockConfig.getDebugMode.mockReturnValue(false); + mockConfig.getShowMemoryUsage.mockReturnValue(false); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); // Wait for any async updates + expect(lastFrame()).toContain('Using: 1 GEMINI.md file'); + }); + + it('should display default "GEMINI.md" with plural when contextFileName is not set and count is > 1', async () => { + mockConfig.getGeminiMdFileCount.mockReturnValue(2); + mockConfig.getAllGeminiMdFilenames.mockReturnValue([ + 'GEMINI.md', + 'GEMINI.md', + ]); + mockConfig.getDebugMode.mockReturnValue(false); + mockConfig.getShowMemoryUsage.mockReturnValue(false); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).toContain('Using: 2 GEMINI.md files'); + }); + + it('should display custom contextFileName in footer when set and count is 1', async () => { + mockSettings = createMockSettings({ + workspace: { + context: { fileName: 'AGENTS.md' }, + ui: { theme: 'Default' }, + }, + }); + mockConfig.getGeminiMdFileCount.mockReturnValue(1); + mockConfig.getAllGeminiMdFilenames.mockReturnValue(['AGENTS.md']); + mockConfig.getDebugMode.mockReturnValue(false); + mockConfig.getShowMemoryUsage.mockReturnValue(false); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).toContain('Using: 1 AGENTS.md file'); + }); + + it('should display a generic message when multiple context files with different names are provided', async () => { + mockSettings = createMockSettings({ + workspace: { + context: { fileName: ['AGENTS.md', 'CONTEXT.md'] }, + ui: { theme: 'Default' }, + }, + }); + mockConfig.getGeminiMdFileCount.mockReturnValue(2); + mockConfig.getAllGeminiMdFilenames.mockReturnValue([ + 'AGENTS.md', + 'CONTEXT.md', + ]); + mockConfig.getDebugMode.mockReturnValue(false); + mockConfig.getShowMemoryUsage.mockReturnValue(false); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).toContain('Using: 2 context files'); + }); + + it('should display custom contextFileName with plural when set and count is > 1', async () => { + mockSettings = createMockSettings({ + workspace: { + context: { fileName: 'MY_NOTES.TXT' }, + ui: { theme: 'Default' }, + }, + }); + mockConfig.getGeminiMdFileCount.mockReturnValue(3); + mockConfig.getAllGeminiMdFilenames.mockReturnValue([ + 'MY_NOTES.TXT', + 'MY_NOTES.TXT', + 'MY_NOTES.TXT', + ]); + mockConfig.getDebugMode.mockReturnValue(false); + mockConfig.getShowMemoryUsage.mockReturnValue(false); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).toContain('Using: 3 MY_NOTES.TXT files'); + }); + + it('should not display context file message if count is 0, even if contextFileName is set', async () => { + mockSettings = createMockSettings({ + workspace: { + context: { fileName: 'ANY_FILE.MD' }, + ui: { theme: 'Default' }, + }, + }); + mockConfig.getGeminiMdFileCount.mockReturnValue(0); + mockConfig.getAllGeminiMdFilenames.mockReturnValue([]); + mockConfig.getDebugMode.mockReturnValue(false); + mockConfig.getShowMemoryUsage.mockReturnValue(false); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).not.toContain('ANY_FILE.MD'); + }); + + it('should display GEMINI.md and MCP server count when both are present', async () => { + mockConfig.getGeminiMdFileCount.mockReturnValue(2); + mockConfig.getAllGeminiMdFilenames.mockReturnValue([ + 'GEMINI.md', + 'GEMINI.md', + ]); + mockConfig.getMcpServers.mockReturnValue({ + server1: {} as MCPServerConfig, + }); + mockConfig.getDebugMode.mockReturnValue(false); + mockConfig.getShowMemoryUsage.mockReturnValue(false); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).toContain('1 MCP server'); + }); + + it('should display only MCP server count when GEMINI.md count is 0', async () => { + mockConfig.getGeminiMdFileCount.mockReturnValue(0); + mockConfig.getAllGeminiMdFilenames.mockReturnValue([]); + mockConfig.getMcpServers.mockReturnValue({ + server1: {} as MCPServerConfig, + server2: {} as MCPServerConfig, + }); + mockConfig.getDebugMode.mockReturnValue(false); + mockConfig.getShowMemoryUsage.mockReturnValue(false); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).toContain('Using: 2 MCP servers (ctrl+t to view)'); + }); + + it('should display Tips component by default', async () => { + const { unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(vi.mocked(Tips)).toHaveBeenCalled(); + }); + + it('should not display Tips component when hideTips is true', async () => { + mockSettings = createMockSettings({ + workspace: { + ui: { hideTips: true }, + }, + }); + + const { unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(vi.mocked(Tips)).not.toHaveBeenCalled(); + }); + + it('should display Header component by default', async () => { + const { Header } = await import('./components/Header.js'); + const { unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(vi.mocked(Header)).toHaveBeenCalled(); + }); + + it('should not display Header component when hideBanner is true', async () => { + const { Header } = await import('./components/Header.js'); + mockSettings = createMockSettings({ + user: { ui: { hideBanner: true } }, + }); + + const { unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(vi.mocked(Header)).not.toHaveBeenCalled(); + }); + + it('should display Footer component by default', async () => { + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + // Footer should render - look for target directory which is always shown + expect(lastFrame()).toContain('/test/dir'); + }); + + it('should not display Footer component when hideFooter is true', async () => { + mockSettings = createMockSettings({ + user: { ui: { hideFooter: true } }, + }); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + // Footer should not render - target directory should not appear + expect(lastFrame()).not.toContain('/test/dir'); + }); + + it('should show footer if system says show, but workspace and user settings say hide', async () => { + mockSettings = createMockSettings({ + system: { ui: { hideFooter: false } }, + user: { ui: { hideFooter: true } }, + workspace: { ui: { hideFooter: true } }, + }); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + // Footer should render because system overrides - look for target directory + expect(lastFrame()).toContain('/test/dir'); + }); + + it('should show tips if system says show, but workspace and user settings say hide', async () => { + mockSettings = createMockSettings({ + system: { ui: { hideTips: false } }, + user: { ui: { hideTips: true } }, + workspace: { ui: { hideTips: true } }, + }); + + const { unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(vi.mocked(Tips)).toHaveBeenCalled(); + }); + + describe('when no theme is set', () => { + let originalNoColor: string | undefined; + + beforeEach(() => { + originalNoColor = process.env.NO_COLOR; + // Ensure no theme is set for these tests + mockSettings = createMockSettings({}); + mockConfig.getDebugMode.mockReturnValue(false); + mockConfig.getShowMemoryUsage.mockReturnValue(false); + }); + + afterEach(() => { + process.env.NO_COLOR = originalNoColor; + }); + + it('should display theme dialog if NO_COLOR is not set', async () => { + delete process.env.NO_COLOR; + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + expect(lastFrame()).toContain("I'm Feeling Lucky (esc to cancel"); + }); + + it('should display a message if NO_COLOR is set', async () => { + process.env.NO_COLOR = 'true'; + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + expect(lastFrame()).toContain("I'm Feeling Lucky (esc to cancel"); + expect(lastFrame()).not.toContain('Select Theme'); + }); + }); + + it('should render the initial UI correctly', () => { + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + expect(lastFrame()).toMatchSnapshot(); + }); + + it('should render correctly with the prompt input box', () => { + vi.mocked(useGeminiStream).mockReturnValue({ + streamingState: StreamingState.Idle, + submitQuery: vi.fn(), + initError: null, + pendingHistoryItems: [], + thought: null, + }); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + expect(lastFrame()).toMatchSnapshot(); + }); + + describe('with initial prompt from --prompt-interactive', () => { + it('should submit the initial prompt automatically', async () => { + const mockSubmitQuery = vi.fn(); + + mockConfig.getQuestion = vi.fn(() => 'hello from prompt-interactive'); + + vi.mocked(useGeminiStream).mockReturnValue({ + streamingState: StreamingState.Idle, + submitQuery: mockSubmitQuery, + initError: null, + pendingHistoryItems: [], + thought: null, + }); + + mockConfig.getGeminiClient.mockReturnValue({ + isInitialized: vi.fn(() => true), + getUserTier: vi.fn(), + } as unknown as GeminiClient); + + const { unmount, rerender } = renderWithProviders( + , + ); + currentUnmount = unmount; + + // Force a re-render to trigger useEffect + rerender( + , + ); + + await new Promise((resolve) => setTimeout(resolve, 0)); + + expect(mockSubmitQuery).toHaveBeenCalledWith( + 'hello from prompt-interactive', + ); + }); + }); + + describe('errorCount', () => { + it('should correctly sum the counts of error messages', async () => { + const mockConsoleMessages: ConsoleMessageItem[] = [ + { type: 'error', content: 'First error', count: 1 }, + { type: 'log', content: 'some log', count: 1 }, + { type: 'error', content: 'Second error', count: 3 }, + { type: 'warn', content: 'a warning', count: 1 }, + { type: 'error', content: 'Third error', count: 1 }, + ]; + + vi.mocked(useConsoleMessages).mockReturnValue({ + consoleMessages: mockConsoleMessages, + handleNewMessage: vi.fn(), + clearConsoleMessages: vi.fn(), + }); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + + // Total error count should be 1 + 3 + 1 = 5 + expect(lastFrame()).toContain('5 errors'); + }); + }); + + describe('auth validation', () => { + it('should call validateAuthMethod when useExternalAuth is false', async () => { + const validateAuthMethodSpy = vi.spyOn(auth, 'validateAuthMethod'); + mockSettings = createMockSettings({ + workspace: { + security: { + auth: { + selectedType: 'USE_GEMINI' as AuthType, + useExternal: false, + }, + }, + ui: { theme: 'Default' }, + }, + }); + + const { unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + expect(validateAuthMethodSpy).toHaveBeenCalledWith('USE_GEMINI'); + }); + + it('should NOT call validateAuthMethod when useExternalAuth is true', async () => { + const validateAuthMethodSpy = vi.spyOn(auth, 'validateAuthMethod'); + mockSettings = createMockSettings({ + workspace: { + security: { + auth: { + selectedType: 'USE_GEMINI' as AuthType, + useExternal: true, + }, + }, + ui: { theme: 'Default' }, + }, + }); + + const { unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + expect(validateAuthMethodSpy).not.toHaveBeenCalled(); + }); + }); + + describe('when in a narrow terminal', () => { + it('should render with a column layout', () => { + vi.spyOn(useTerminalSize, 'useTerminalSize').mockReturnValue({ + columns: 60, + rows: 24, + }); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + expect(lastFrame()).toMatchSnapshot(); + }); + }); + + describe('NO_COLOR smoke test', () => { + let originalNoColor: string | undefined; + + beforeEach(() => { + originalNoColor = process.env.NO_COLOR; + }); + + afterEach(() => { + process.env.NO_COLOR = originalNoColor; + }); + + it('should render without errors when NO_COLOR is set', async () => { + process.env.NO_COLOR = 'true'; + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + expect(lastFrame()).toBeTruthy(); + expect(lastFrame()).toContain('Type your message or @path/to/file'); + }); + }); + + describe('FolderTrustDialog', () => { + it('should display the folder trust dialog when isFolderTrustDialogOpen is true', async () => { + const { useFolderTrust } = await import('./hooks/useFolderTrust.js'); + vi.mocked(useFolderTrust).mockReturnValue({ + isFolderTrustDialogOpen: true, + handleFolderTrustSelect: vi.fn(), + }); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).toContain('Do you trust this folder?'); + }); + + it('should display the folder trust dialog when the feature is enabled but the folder is not trusted', async () => { + const { useFolderTrust } = await import('./hooks/useFolderTrust.js'); + vi.mocked(useFolderTrust).mockReturnValue({ + isFolderTrustDialogOpen: true, + handleFolderTrustSelect: vi.fn(), + }); + mockConfig.isTrustedFolder.mockReturnValue(false); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).toContain('Do you trust this folder?'); + }); + + it('should not display the folder trust dialog when the feature is disabled', async () => { + const { useFolderTrust } = await import('./hooks/useFolderTrust.js'); + vi.mocked(useFolderTrust).mockReturnValue({ + isFolderTrustDialogOpen: false, + handleFolderTrustSelect: vi.fn(), + }); + mockConfig.isTrustedFolder.mockReturnValue(false); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).not.toContain('Do you trust this folder?'); + }); + }); + + describe('Message Queuing', () => { + let mockSubmitQuery: typeof vi.fn; + + beforeEach(() => { + mockSubmitQuery = vi.fn(); + vi.useFakeTimers(); + }); + + afterEach(() => { + vi.useRealTimers(); + }); + + it('should queue messages when handleFinalSubmit is called during streaming', () => { + vi.mocked(useGeminiStream).mockReturnValue({ + streamingState: StreamingState.Responding, + submitQuery: mockSubmitQuery, + initError: null, + pendingHistoryItems: [], + thought: null, + }); + + const { unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + // The message should not be sent immediately during streaming + expect(mockSubmitQuery).not.toHaveBeenCalled(); + }); + + it('should auto-send queued messages when transitioning from Responding to Idle', async () => { + const mockSubmitQueryFn = vi.fn(); + + // Start with Responding state + vi.mocked(useGeminiStream).mockReturnValue({ + streamingState: StreamingState.Responding, + submitQuery: mockSubmitQueryFn, + initError: null, + pendingHistoryItems: [], + thought: null, + }); + + const { unmount, rerender } = renderWithProviders( + , + ); + currentUnmount = unmount; + + // Simulate the hook returning Idle state (streaming completed) + vi.mocked(useGeminiStream).mockReturnValue({ + streamingState: StreamingState.Idle, + submitQuery: mockSubmitQueryFn, + initError: null, + pendingHistoryItems: [], + thought: null, + }); + + // Rerender to trigger the useEffect with new state + rerender( + , + ); + + // The effect uses setTimeout(100ms) before sending + await vi.advanceTimersByTimeAsync(100); + + // Note: In the actual implementation, messages would be queued first + // This test verifies the auto-send mechanism works when state transitions + }); + + it('should display queued messages with dimmed color', () => { + // This test would require being able to simulate handleFinalSubmit + // and then checking the rendered output for the queued messages + // with the ▸ prefix and dimColor styling + + vi.mocked(useGeminiStream).mockReturnValue({ + streamingState: StreamingState.Responding, + submitQuery: mockSubmitQuery, + initError: null, + pendingHistoryItems: [], + thought: 'Processing...', + }); + + const { unmount, lastFrame } = renderWithProviders( + , + ); + currentUnmount = unmount; + + // The actual queued messages display is tested visually + // since we need to trigger handleFinalSubmit which is internal + const output = lastFrame(); + expect(output).toBeDefined(); + }); + + it('should clear message queue after sending', async () => { + const mockSubmitQueryFn = vi.fn(); + + // Start with idle to allow message queue to process + vi.mocked(useGeminiStream).mockReturnValue({ + streamingState: StreamingState.Idle, + submitQuery: mockSubmitQueryFn, + initError: null, + pendingHistoryItems: [], + thought: null, + }); + + const { unmount, lastFrame } = renderWithProviders( + , + ); + currentUnmount = unmount; + + // After sending, the queue should be cleared + // This is handled internally by setMessageQueue([]) in the useEffect + await vi.advanceTimersByTimeAsync(100); + + // Verify the component renders without errors + expect(lastFrame()).toBeDefined(); + }); + + it('should handle empty messages by filtering them out', () => { + // The handleFinalSubmit function trims and checks if length > 0 + // before adding to queue, so empty messages are filtered + + vi.mocked(useGeminiStream).mockReturnValue({ + streamingState: StreamingState.Idle, + submitQuery: mockSubmitQuery, + initError: null, + pendingHistoryItems: [], + thought: null, + }); + + const { unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + // Empty or whitespace-only messages won't be added to queue + // This is enforced by the trimmedValue.length > 0 check + expect(mockSubmitQuery).not.toHaveBeenCalled(); + }); + + it('should combine multiple queued messages with double newlines', async () => { + // This test verifies that when multiple messages are queued, + // they are combined with '\n\n' as the separator + + const mockSubmitQueryFn = vi.fn(); + + vi.mocked(useGeminiStream).mockReturnValue({ + streamingState: StreamingState.Idle, + submitQuery: mockSubmitQueryFn, + initError: null, + pendingHistoryItems: [], + thought: null, + }); + + const { unmount, lastFrame } = renderWithProviders( + , + ); + currentUnmount = unmount; + + // The combining logic uses messageQueue.join('\n\n') + // This is tested by the implementation in the useEffect + await vi.advanceTimersByTimeAsync(100); + + expect(lastFrame()).toBeDefined(); + }); + + it('should limit displayed messages to MAX_DISPLAYED_QUEUED_MESSAGES', () => { + // This test verifies the display logic handles multiple messages correctly + // by checking that the MAX_DISPLAYED_QUEUED_MESSAGES constant is respected + + vi.mocked(useGeminiStream).mockReturnValue({ + streamingState: StreamingState.Responding, + submitQuery: mockSubmitQuery, + initError: null, + pendingHistoryItems: [], + thought: 'Processing...', + }); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + const output = lastFrame(); + + // Verify the display logic exists and can handle multiple messages + // The actual queue behavior is tested in the useMessageQueue hook tests + expect(output).toBeDefined(); + + // Check that the component renders without errors when there are messages to display + expect(output).not.toContain('Error'); + }); + + it('should render message queue display without errors', () => { + // Test that the message queue display logic renders correctly + // This verifies the UI changes for performance improvements work + + vi.mocked(useGeminiStream).mockReturnValue({ + streamingState: StreamingState.Responding, + submitQuery: mockSubmitQuery, + initError: null, + pendingHistoryItems: [], + thought: 'Processing...', + }); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + const output = lastFrame(); + + // Verify component renders without errors + expect(output).toBeDefined(); + expect(output).not.toContain('Error'); + + // Verify the component structure is intact (loading indicator should be present) + expect(output).toContain('esc to cancel'); + }); + }); + + describe('debug keystroke logging', () => { + let consoleLogSpy: ReturnType; + + beforeEach(() => { + consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + }); + + afterEach(() => { + consoleLogSpy.mockRestore(); + }); + + it('should pass debugKeystrokeLogging setting to KeypressProvider', () => { + const mockSettingsWithDebug = createMockSettings({ + workspace: { + ui: { theme: 'Default' }, + advanced: { debugKeystrokeLogging: true }, + }, + }); + + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + const output = lastFrame(); + + expect(output).toBeDefined(); + expect(mockSettingsWithDebug.merged.advanced?.debugKeystrokeLogging).toBe( + true, + ); + }); + + it('should use default false value when debugKeystrokeLogging is not set', () => { + const { lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + const output = lastFrame(); + + expect(output).toBeDefined(); + expect( + mockSettings.merged.advanced?.debugKeystrokeLogging, + ).toBeUndefined(); + }); + }); + + describe('Ctrl+C behavior', () => { + it('should call cancel but only clear the prompt when a tool is executing', async () => { + const mockCancel = vi.fn(); + let onCancelSubmitCallback = () => {}; + + // Simulate a tool in the "Executing" state. + vi.mocked(useGeminiStream).mockImplementation( + ( + _client, + _history, + _addItem, + _config, + _settings, + _onDebugMessage, + _handleSlashCommand, + _shellModeActive, + _getPreferredEditor, + _onAuthError, + _performMemoryRefresh, + _modelSwitchedFromQuotaError, + _setModelSwitchedFromQuotaError, + _onEditorClose, + onCancelSubmit, // Capture the cancel callback from App.tsx + ) => { + onCancelSubmitCallback = onCancelSubmit; + return { + streamingState: StreamingState.Responding, + submitQuery: vi.fn(), + initError: null, + pendingHistoryItems: [ + { + type: 'tool_group', + tools: [ + { + name: 'test_tool', + status: 'Executing', + result: '', + args: {}, + }, + ], + }, + ], + thought: null, + cancelOngoingRequest: () => { + mockCancel(); + onCancelSubmitCallback(); // <--- This is the key change + }, + }; + }, + ); + + const { stdin, lastFrame, unmount } = renderWithProviders( + , + ); + currentUnmount = unmount; + + // Simulate user typing something into the prompt while a tool is running. + stdin.write('some text'); + await new Promise((resolve) => setTimeout(resolve, 100)); + + // Verify the text is in the prompt. + expect(lastFrame()).toContain('some text'); + + // Simulate Ctrl+C. + stdin.write('\x03'); + await new Promise((resolve) => setTimeout(resolve, 100)); + + // The main cancellation handler SHOULD be called. + expect(mockCancel).toHaveBeenCalled(); + + // The prompt should now be empty as a result of the cancellation handler's logic. + // We can't directly test the buffer's state, but we can see the rendered output. + await waitFor(() => { + expect(lastFrame()).not.toContain('some text'); + }); + }); + }); +}); diff --git a/projects/gemini-cli/packages/cli/src/ui/App.tsx b/projects/gemini-cli/packages/cli/src/ui/App.tsx new file mode 100644 index 0000000000000000000000000000000000000000..2783942c2f0408bf0636e2c1a47c45c3a71247b2 --- /dev/null +++ b/projects/gemini-cli/packages/cli/src/ui/App.tsx @@ -0,0 +1,1389 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { useCallback, useEffect, useMemo, useState, useRef } from 'react'; +import { + Box, + type DOMElement, + measureElement, + Static, + Text, + useStdin, + useStdout, +} from 'ink'; +import { + StreamingState, + type HistoryItem, + MessageType, + ToolCallStatus, + type HistoryItemWithoutId, +} from './types.js'; +import { useTerminalSize } from './hooks/useTerminalSize.js'; +import { useGeminiStream } from './hooks/useGeminiStream.js'; +import { useLoadingIndicator } from './hooks/useLoadingIndicator.js'; +import { useThemeCommand } from './hooks/useThemeCommand.js'; +import { useAuthCommand } from './hooks/useAuthCommand.js'; +import { useFolderTrust } from './hooks/useFolderTrust.js'; +import { useEditorSettings } from './hooks/useEditorSettings.js'; +import { useSlashCommandProcessor } from './hooks/slashCommandProcessor.js'; +import { useAutoAcceptIndicator } from './hooks/useAutoAcceptIndicator.js'; +import { useMessageQueue } from './hooks/useMessageQueue.js'; +import { useConsoleMessages } from './hooks/useConsoleMessages.js'; +import { Header } from './components/Header.js'; +import { LoadingIndicator } from './components/LoadingIndicator.js'; +import { AutoAcceptIndicator } from './components/AutoAcceptIndicator.js'; +import { ShellModeIndicator } from './components/ShellModeIndicator.js'; +import { InputPrompt } from './components/InputPrompt.js'; +import { Footer } from './components/Footer.js'; +import { ThemeDialog } from './components/ThemeDialog.js'; +import { AuthDialog } from './components/AuthDialog.js'; +import { AuthInProgress } from './components/AuthInProgress.js'; +import { EditorSettingsDialog } from './components/EditorSettingsDialog.js'; +import { FolderTrustDialog } from './components/FolderTrustDialog.js'; +import { ShellConfirmationDialog } from './components/ShellConfirmationDialog.js'; +import { RadioButtonSelect } from './components/shared/RadioButtonSelect.js'; +import { Colors } from './colors.js'; +import { loadHierarchicalGeminiMemory } from '../config/config.js'; +import type { LoadedSettings } from '../config/settings.js'; +import { SettingScope } from '../config/settings.js'; +import { Tips } from './components/Tips.js'; +import { ConsolePatcher } from './utils/ConsolePatcher.js'; +import { registerCleanup } from '../utils/cleanup.js'; +import { DetailedMessagesDisplay } from './components/DetailedMessagesDisplay.js'; +import { HistoryItemDisplay } from './components/HistoryItemDisplay.js'; +import { ContextSummaryDisplay } from './components/ContextSummaryDisplay.js'; +import { useHistory } from './hooks/useHistoryManager.js'; +import { useInputHistoryStore } from './hooks/useInputHistoryStore.js'; +import process from 'node:process'; +import type { EditorType, Config, IdeContext } from '@google/gemini-cli-core'; +import { + ApprovalMode, + getAllGeminiMdFilenames, + isEditorAvailable, + getErrorMessage, + AuthType, + logFlashFallback, + FlashFallbackEvent, + ideContext, + isProQuotaExceededError, + isGenericQuotaExceededError, + UserTierId, + DEFAULT_GEMINI_FLASH_MODEL, +} from '@google/gemini-cli-core'; +import type { IdeIntegrationNudgeResult } from './IdeIntegrationNudge.js'; +import { IdeIntegrationNudge } from './IdeIntegrationNudge.js'; +import { validateAuthMethod } from '../config/auth.js'; +import { useLogger } from './hooks/useLogger.js'; +import { StreamingContext } from './contexts/StreamingContext.js'; +import { + SessionStatsProvider, + useSessionStats, +} from './contexts/SessionContext.js'; +import { useGitBranchName } from './hooks/useGitBranchName.js'; +import { useFocus } from './hooks/useFocus.js'; +import { useBracketedPaste } from './hooks/useBracketedPaste.js'; +import { useTextBuffer } from './components/shared/text-buffer.js'; +import { useVimMode, VimModeProvider } from './contexts/VimModeContext.js'; +import { useVim } from './hooks/vim.js'; +import type { Key } from './hooks/useKeypress.js'; +import { useKeypress } from './hooks/useKeypress.js'; +import { KeypressProvider } from './contexts/KeypressContext.js'; +import { useKittyKeyboardProtocol } from './hooks/useKittyKeyboardProtocol.js'; +import { keyMatchers, Command } from './keyMatchers.js'; +import * as fs from 'node:fs'; +import { UpdateNotification } from './components/UpdateNotification.js'; +import type { UpdateObject } from './utils/updateCheck.js'; +import ansiEscapes from 'ansi-escapes'; +import { OverflowProvider } from './contexts/OverflowContext.js'; +import { ShowMoreLines } from './components/ShowMoreLines.js'; +import { PrivacyNotice } from './privacy/PrivacyNotice.js'; +import { useSettingsCommand } from './hooks/useSettingsCommand.js'; +import { SettingsDialog } from './components/SettingsDialog.js'; +import { ProQuotaDialog } from './components/ProQuotaDialog.js'; +import { setUpdateHandler } from '../utils/handleAutoUpdate.js'; +import { appEvents, AppEvent } from '../utils/events.js'; +import { isNarrowWidth } from './utils/isNarrowWidth.js'; +import { useWorkspaceMigration } from './hooks/useWorkspaceMigration.js'; +import { WorkspaceMigrationDialog } from './components/WorkspaceMigrationDialog.js'; +import { isWorkspaceTrusted } from '../config/trustedFolders.js'; + +const CTRL_EXIT_PROMPT_DURATION_MS = 1000; +// Maximum number of queued messages to display in UI to prevent performance issues +const MAX_DISPLAYED_QUEUED_MESSAGES = 3; + +interface AppProps { + config: Config; + settings: LoadedSettings; + startupWarnings?: string[]; + version: string; +} + +function isToolExecuting(pendingHistoryItems: HistoryItemWithoutId[]) { + return pendingHistoryItems.some((item) => { + if (item && item.type === 'tool_group') { + return item.tools.some( + (tool) => ToolCallStatus.Executing === tool.status, + ); + } + return false; + }); +} + +export const AppWrapper = (props: AppProps) => { + const kittyProtocolStatus = useKittyKeyboardProtocol(); + return ( + + + + + + + + ); +}; + +const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { + const isFocused = useFocus(); + useBracketedPaste(); + const [updateInfo, setUpdateInfo] = useState(null); + const { stdout } = useStdout(); + const nightly = version.includes('nightly'); + const { history, addItem, clearItems, loadHistory } = useHistory(); + + const [idePromptAnswered, setIdePromptAnswered] = useState(false); + const currentIDE = config.getIdeClient().getCurrentIde(); + useEffect(() => { + registerCleanup(() => config.getIdeClient().disconnect()); + }, [config]); + const shouldShowIdePrompt = + currentIDE && + !config.getIdeMode() && + !settings.merged.ide?.hasSeenNudge && + !idePromptAnswered; + + useEffect(() => { + const cleanup = setUpdateHandler(addItem, setUpdateInfo); + return cleanup; + }, [addItem]); + + const { + consoleMessages, + handleNewMessage, + clearConsoleMessages: clearConsoleMessagesState, + } = useConsoleMessages(); + + useEffect(() => { + const consolePatcher = new ConsolePatcher({ + onNewMessage: handleNewMessage, + debugMode: config.getDebugMode(), + }); + consolePatcher.patch(); + registerCleanup(consolePatcher.cleanup); + }, [handleNewMessage, config]); + + const { stats: sessionStats } = useSessionStats(); + const [staticNeedsRefresh, setStaticNeedsRefresh] = useState(false); + const [staticKey, setStaticKey] = useState(0); + const refreshStatic = useCallback(() => { + stdout.write(ansiEscapes.clearTerminal); + setStaticKey((prev) => prev + 1); + }, [setStaticKey, stdout]); + + const [geminiMdFileCount, setGeminiMdFileCount] = useState(0); + const [debugMessage, setDebugMessage] = useState(''); + const [themeError, setThemeError] = useState(null); + const [authError, setAuthError] = useState(null); + const [editorError, setEditorError] = useState(null); + const [footerHeight, setFooterHeight] = useState(0); + const [corgiMode, setCorgiMode] = useState(false); + const [isTrustedFolderState, setIsTrustedFolder] = useState( + isWorkspaceTrusted(settings.merged), + ); + const [currentModel, setCurrentModel] = useState(config.getModel()); + const [shellModeActive, setShellModeActive] = useState(false); + const [showErrorDetails, setShowErrorDetails] = useState(false); + const [showToolDescriptions, setShowToolDescriptions] = + useState(false); + + const [ctrlCPressedOnce, setCtrlCPressedOnce] = useState(false); + const [quittingMessages, setQuittingMessages] = useState< + HistoryItem[] | null + >(null); + const ctrlCTimerRef = useRef(null); + const [ctrlDPressedOnce, setCtrlDPressedOnce] = useState(false); + const ctrlDTimerRef = useRef(null); + const [constrainHeight, setConstrainHeight] = useState(true); + const [showPrivacyNotice, setShowPrivacyNotice] = useState(false); + const [modelSwitchedFromQuotaError, setModelSwitchedFromQuotaError] = + useState(false); + const [userTier, setUserTier] = useState(undefined); + const [ideContextState, setIdeContextState] = useState< + IdeContext | undefined + >(); + const [showEscapePrompt, setShowEscapePrompt] = useState(false); + const [isProcessing, setIsProcessing] = useState(false); + + const { + showWorkspaceMigrationDialog, + workspaceExtensions, + onWorkspaceMigrationDialogOpen, + onWorkspaceMigrationDialogClose, + } = useWorkspaceMigration(settings); + + const [isProQuotaDialogOpen, setIsProQuotaDialogOpen] = useState(false); + const [proQuotaDialogResolver, setProQuotaDialogResolver] = useState< + ((value: boolean) => void) | null + >(null); + + useEffect(() => { + const unsubscribe = ideContext.subscribeToIdeContext(setIdeContextState); + // Set the initial value + setIdeContextState(ideContext.getIdeContext()); + return unsubscribe; + }, []); + + useEffect(() => { + const openDebugConsole = () => { + setShowErrorDetails(true); + setConstrainHeight(false); // Make sure the user sees the full message. + }; + appEvents.on(AppEvent.OpenDebugConsole, openDebugConsole); + + const logErrorHandler = (errorMessage: unknown) => { + handleNewMessage({ + type: 'error', + content: String(errorMessage), + count: 1, + }); + }; + appEvents.on(AppEvent.LogError, logErrorHandler); + + return () => { + appEvents.off(AppEvent.OpenDebugConsole, openDebugConsole); + appEvents.off(AppEvent.LogError, logErrorHandler); + }; + }, [handleNewMessage]); + + const openPrivacyNotice = useCallback(() => { + setShowPrivacyNotice(true); + }, []); + + const handleEscapePromptChange = useCallback((showPrompt: boolean) => { + setShowEscapePrompt(showPrompt); + }, []); + + const initialPromptSubmitted = useRef(false); + + const errorCount = useMemo( + () => + consoleMessages + .filter((msg) => msg.type === 'error') + .reduce((total, msg) => total + msg.count, 0), + [consoleMessages], + ); + + const { + isThemeDialogOpen, + openThemeDialog, + handleThemeSelect, + handleThemeHighlight, + } = useThemeCommand(settings, setThemeError, addItem); + + const { isSettingsDialogOpen, openSettingsDialog, closeSettingsDialog } = + useSettingsCommand(); + + const { isFolderTrustDialogOpen, handleFolderTrustSelect, isRestarting } = + useFolderTrust(settings, setIsTrustedFolder); + + const { + isAuthDialogOpen, + openAuthDialog, + handleAuthSelect, + isAuthenticating, + cancelAuthentication, + } = useAuthCommand(settings, setAuthError, config); + + useEffect(() => { + if ( + settings.merged.security?.auth?.selectedType && + !settings.merged.security?.auth?.useExternal + ) { + const error = validateAuthMethod( + settings.merged.security.auth.selectedType, + ); + if (error) { + setAuthError(error); + openAuthDialog(); + } + } + }, [ + settings.merged.security?.auth?.selectedType, + settings.merged.security?.auth?.useExternal, + openAuthDialog, + setAuthError, + ]); + + // Sync user tier from config when authentication changes + useEffect(() => { + // Only sync when not currently authenticating + if (!isAuthenticating) { + setUserTier(config.getGeminiClient()?.getUserTier()); + } + }, [config, isAuthenticating]); + + const { + isEditorDialogOpen, + openEditorDialog, + handleEditorSelect, + exitEditorDialog, + } = useEditorSettings(settings, setEditorError, addItem); + + const toggleCorgiMode = useCallback(() => { + setCorgiMode((prev) => !prev); + }, []); + + const performMemoryRefresh = useCallback(async () => { + addItem( + { + type: MessageType.INFO, + text: 'Refreshing hierarchical memory (GEMINI.md or other context files)...', + }, + Date.now(), + ); + try { + const { memoryContent, fileCount } = await loadHierarchicalGeminiMemory( + process.cwd(), + settings.merged.context?.loadMemoryFromIncludeDirectories + ? config.getWorkspaceContext().getDirectories() + : [], + config.getDebugMode(), + config.getFileService(), + settings.merged, + config.getExtensionContextFilePaths(), + config.getFolderTrust(), + settings.merged.context?.importFormat || 'tree', // Use setting or default to 'tree' + config.getFileFilteringOptions(), + ); + + config.setUserMemory(memoryContent); + config.setGeminiMdFileCount(fileCount); + setGeminiMdFileCount(fileCount); + + addItem( + { + type: MessageType.INFO, + text: `Memory refreshed successfully. ${memoryContent.length > 0 ? `Loaded ${memoryContent.length} characters from ${fileCount} file(s).` : 'No memory content found.'}`, + }, + Date.now(), + ); + if (config.getDebugMode()) { + console.log( + `[DEBUG] Refreshed memory content in config: ${memoryContent.substring(0, 200)}...`, + ); + } + } catch (error) { + const errorMessage = getErrorMessage(error); + addItem( + { + type: MessageType.ERROR, + text: `Error refreshing memory: ${errorMessage}`, + }, + Date.now(), + ); + console.error('Error refreshing memory:', error); + } + }, [config, addItem, settings.merged]); + + // Watch for model changes (e.g., from Flash fallback) + useEffect(() => { + const checkModelChange = () => { + const configModel = config.getModel(); + if (configModel !== currentModel) { + setCurrentModel(configModel); + } + }; + + // Check immediately and then periodically + checkModelChange(); + const interval = setInterval(checkModelChange, 1000); // Check every second + + return () => clearInterval(interval); + }, [config, currentModel]); + + // Set up Flash fallback handler + useEffect(() => { + const flashFallbackHandler = async ( + currentModel: string, + fallbackModel: string, + error?: unknown, + ): Promise => { + // Check if we've already switched to the fallback model + if (config.isInFallbackMode()) { + // If we're already in fallback mode, don't show the dialog again + return false; + } + + let message: string; + + if ( + config.getContentGeneratorConfig().authType === + AuthType.LOGIN_WITH_GOOGLE + ) { + // Use actual user tier if available; otherwise, default to FREE tier behavior (safe default) + const isPaidTier = + userTier === UserTierId.LEGACY || userTier === UserTierId.STANDARD; + + // Check if this is a Pro quota exceeded error + if (error && isProQuotaExceededError(error)) { + if (isPaidTier) { + message = `⚡ You have reached your daily ${currentModel} quota limit. +⚡ You can choose to authenticate with a paid API key or continue with the fallback model. +⚡ To continue accessing the ${currentModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`; + } else { + message = `⚡ You have reached your daily ${currentModel} quota limit. +⚡ You can choose to authenticate with a paid API key or continue with the fallback model. +⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist +⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key +⚡ You can switch authentication methods by typing /auth`; + } + } else if (error && isGenericQuotaExceededError(error)) { + if (isPaidTier) { + message = `⚡ You have reached your daily quota limit. +⚡ Automatically switching from ${currentModel} to ${fallbackModel} for the remainder of this session. +⚡ To continue accessing the ${currentModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`; + } else { + message = `⚡ You have reached your daily quota limit. +⚡ Automatically switching from ${currentModel} to ${fallbackModel} for the remainder of this session. +⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist +⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key +⚡ You can switch authentication methods by typing /auth`; + } + } else { + if (isPaidTier) { + // Default fallback message for other cases (like consecutive 429s) + message = `⚡ Automatically switching from ${currentModel} to ${fallbackModel} for faster responses for the remainder of this session. +⚡ Possible reasons for this are that you have received multiple consecutive capacity errors or you have reached your daily ${currentModel} quota limit +⚡ To continue accessing the ${currentModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`; + } else { + // Default fallback message for other cases (like consecutive 429s) + message = `⚡ Automatically switching from ${currentModel} to ${fallbackModel} for faster responses for the remainder of this session. +⚡ Possible reasons for this are that you have received multiple consecutive capacity errors or you have reached your daily ${currentModel} quota limit +⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist +⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key +⚡ You can switch authentication methods by typing /auth`; + } + } + + // Add message to UI history + addItem( + { + type: MessageType.INFO, + text: message, + }, + Date.now(), + ); + + // For Pro quota errors, show the dialog and wait for user's choice + if (error && isProQuotaExceededError(error)) { + // Set the flag to prevent tool continuation + setModelSwitchedFromQuotaError(true); + // Set global quota error flag to prevent Flash model calls + config.setQuotaErrorOccurred(true); + + // Show the ProQuotaDialog and wait for user's choice + const shouldContinueWithFallback = await new Promise( + (resolve) => { + setIsProQuotaDialogOpen(true); + setProQuotaDialogResolver(() => resolve); + }, + ); + + // If user chose to continue with fallback, we don't need to stop the current prompt + if (shouldContinueWithFallback) { + // Switch to fallback model for future use + config.setModel(fallbackModel); + config.setFallbackMode(true); + logFlashFallback( + config, + new FlashFallbackEvent( + config.getContentGeneratorConfig().authType!, + ), + ); + return true; // Continue with current prompt using fallback model + } + + // If user chose to authenticate, stop current prompt + return false; + } + + // For other quota errors, automatically switch to fallback model + // Set the flag to prevent tool continuation + setModelSwitchedFromQuotaError(true); + // Set global quota error flag to prevent Flash model calls + config.setQuotaErrorOccurred(true); + } + + // Switch model for future use but return false to stop current retry + config.setModel(fallbackModel); + config.setFallbackMode(true); + logFlashFallback( + config, + new FlashFallbackEvent(config.getContentGeneratorConfig().authType!), + ); + return false; // Don't continue with current prompt + }; + + config.setFlashFallbackHandler(flashFallbackHandler); + }, [config, addItem, userTier]); + + // Terminal and UI setup + const { rows: terminalHeight, columns: terminalWidth } = useTerminalSize(); + const isNarrow = isNarrowWidth(terminalWidth); + const { stdin, setRawMode } = useStdin(); + const isInitialMount = useRef(true); + + const widthFraction = 0.9; + const inputWidth = Math.max( + 20, + Math.floor(terminalWidth * widthFraction) - 3, + ); + const suggestionsWidth = Math.max(20, Math.floor(terminalWidth * 0.8)); + + // Utility callbacks + const isValidPath = useCallback((filePath: string): boolean => { + try { + return fs.existsSync(filePath) && fs.statSync(filePath).isFile(); + } catch (_e) { + return false; + } + }, []); + + const getPreferredEditor = useCallback(() => { + const editorType = settings.merged.general?.preferredEditor; + const isValidEditor = isEditorAvailable(editorType); + if (!isValidEditor) { + openEditorDialog(); + return; + } + return editorType as EditorType; + }, [settings, openEditorDialog]); + + const onAuthError = useCallback(() => { + setAuthError('reauth required'); + openAuthDialog(); + }, [openAuthDialog, setAuthError]); + + // Core hooks and processors + const { + vimEnabled: vimModeEnabled, + vimMode, + toggleVimEnabled, + } = useVimMode(); + + const { + handleSlashCommand, + slashCommands, + pendingHistoryItems: pendingSlashCommandHistoryItems, + commandContext, + shellConfirmationRequest, + confirmationRequest, + } = useSlashCommandProcessor( + config, + settings, + addItem, + clearItems, + loadHistory, + refreshStatic, + setDebugMessage, + openThemeDialog, + openAuthDialog, + openEditorDialog, + toggleCorgiMode, + setQuittingMessages, + openPrivacyNotice, + openSettingsDialog, + toggleVimEnabled, + setIsProcessing, + setGeminiMdFileCount, + ); + + const buffer = useTextBuffer({ + initialText: '', + viewport: { height: 10, width: inputWidth }, + stdin, + setRawMode, + isValidPath, + shellModeActive, + }); + + // Independent input history management (unaffected by /clear) + const inputHistoryStore = useInputHistoryStore(); + + // Stable reference for cancel handler to avoid circular dependency + const cancelHandlerRef = useRef<() => void>(() => {}); + + const { + streamingState, + submitQuery, + initError, + pendingHistoryItems: pendingGeminiHistoryItems, + thought, + cancelOngoingRequest, + } = useGeminiStream( + config.getGeminiClient(), + history, + addItem, + config, + settings, + setDebugMessage, + handleSlashCommand, + shellModeActive, + getPreferredEditor, + onAuthError, + performMemoryRefresh, + modelSwitchedFromQuotaError, + setModelSwitchedFromQuotaError, + refreshStatic, + () => cancelHandlerRef.current(), + ); + + const pendingHistoryItems = useMemo( + () => [...pendingSlashCommandHistoryItems, ...pendingGeminiHistoryItems], + [pendingSlashCommandHistoryItems, pendingGeminiHistoryItems], + ); + + // Message queue for handling input during streaming + const { messageQueue, addMessage, clearQueue, getQueuedMessagesText } = + useMessageQueue({ + streamingState, + submitQuery, + }); + + // Update the cancel handler with message queue support + cancelHandlerRef.current = useCallback(() => { + if (isToolExecuting(pendingHistoryItems)) { + buffer.setText(''); // Just clear the prompt + return; + } + + const lastUserMessage = inputHistoryStore.inputHistory.at(-1); + let textToSet = lastUserMessage || ''; + + // Append queued messages if any exist + const queuedText = getQueuedMessagesText(); + if (queuedText) { + textToSet = textToSet ? `${textToSet}\n\n${queuedText}` : queuedText; + clearQueue(); + } + + if (textToSet) { + buffer.setText(textToSet); + } + }, [ + buffer, + inputHistoryStore.inputHistory, + getQueuedMessagesText, + clearQueue, + pendingHistoryItems, + ]); + + // Input handling - queue messages for processing + const handleFinalSubmit = useCallback( + (submittedValue: string) => { + const trimmedValue = submittedValue.trim(); + if (trimmedValue.length > 0) { + // Add to independent input history + inputHistoryStore.addInput(trimmedValue); + } + // Always add to message queue + addMessage(submittedValue); + }, + [addMessage, inputHistoryStore], + ); + + const handleIdePromptComplete = useCallback( + (result: IdeIntegrationNudgeResult) => { + if (result.userSelection === 'yes') { + if (result.isExtensionPreInstalled) { + handleSlashCommand('/ide enable'); + } else { + handleSlashCommand('/ide install'); + } + settings.setValue( + SettingScope.User, + 'hasSeenIdeIntegrationNudge', + true, + ); + } else if (result.userSelection === 'dismiss') { + settings.setValue( + SettingScope.User, + 'hasSeenIdeIntegrationNudge', + true, + ); + } + setIdePromptAnswered(true); + }, + [handleSlashCommand, settings], + ); + + const { handleInput: vimHandleInput } = useVim(buffer, handleFinalSubmit); + + const { elapsedTime, currentLoadingPhrase } = + useLoadingIndicator(streamingState); + const showAutoAcceptIndicator = useAutoAcceptIndicator({ config, addItem }); + + const handleExit = useCallback( + ( + pressedOnce: boolean, + setPressedOnce: (value: boolean) => void, + timerRef: ReturnType>, + ) => { + if (pressedOnce) { + if (timerRef.current) { + clearTimeout(timerRef.current); + } + // Directly invoke the central command handler. + handleSlashCommand('/quit'); + } else { + setPressedOnce(true); + timerRef.current = setTimeout(() => { + setPressedOnce(false); + timerRef.current = null; + }, CTRL_EXIT_PROMPT_DURATION_MS); + } + }, + [handleSlashCommand], + ); + + const handleGlobalKeypress = useCallback( + (key: Key) => { + // Debug log keystrokes if enabled + if (settings.merged.general?.debugKeystrokeLogging) { + console.log('[DEBUG] Keystroke:', JSON.stringify(key)); + } + + let enteringConstrainHeightMode = false; + if (!constrainHeight) { + enteringConstrainHeightMode = true; + setConstrainHeight(true); + } + + if (keyMatchers[Command.SHOW_ERROR_DETAILS](key)) { + setShowErrorDetails((prev) => !prev); + } else if (keyMatchers[Command.TOGGLE_TOOL_DESCRIPTIONS](key)) { + const newValue = !showToolDescriptions; + setShowToolDescriptions(newValue); + + const mcpServers = config.getMcpServers(); + if (Object.keys(mcpServers || {}).length > 0) { + handleSlashCommand(newValue ? '/mcp desc' : '/mcp nodesc'); + } + } else if ( + keyMatchers[Command.TOGGLE_IDE_CONTEXT_DETAIL](key) && + config.getIdeMode() && + ideContextState + ) { + // Show IDE status when in IDE mode and context is available. + handleSlashCommand('/ide status'); + } else if (keyMatchers[Command.QUIT](key)) { + // When authenticating, let AuthInProgress component handle Ctrl+C. + if (isAuthenticating) { + return; + } + if (!ctrlCPressedOnce) { + cancelOngoingRequest?.(); + } + handleExit(ctrlCPressedOnce, setCtrlCPressedOnce, ctrlCTimerRef); + } else if (keyMatchers[Command.EXIT](key)) { + if (buffer.text.length > 0) { + return; + } + handleExit(ctrlDPressedOnce, setCtrlDPressedOnce, ctrlDTimerRef); + } else if ( + keyMatchers[Command.SHOW_MORE_LINES](key) && + !enteringConstrainHeightMode + ) { + setConstrainHeight(false); + } + }, + [ + constrainHeight, + setConstrainHeight, + setShowErrorDetails, + showToolDescriptions, + setShowToolDescriptions, + config, + ideContextState, + handleExit, + ctrlCPressedOnce, + setCtrlCPressedOnce, + ctrlCTimerRef, + buffer.text.length, + ctrlDPressedOnce, + setCtrlDPressedOnce, + ctrlDTimerRef, + handleSlashCommand, + isAuthenticating, + cancelOngoingRequest, + settings.merged.general?.debugKeystrokeLogging, + ], + ); + + useKeypress(handleGlobalKeypress, { + isActive: true, + }); + + useEffect(() => { + if (config) { + setGeminiMdFileCount(config.getGeminiMdFileCount()); + } + }, [config, config.getGeminiMdFileCount]); + + const logger = useLogger(config.storage); + + // Initialize independent input history from logger + useEffect(() => { + inputHistoryStore.initializeFromLogger(logger); + }, [logger, inputHistoryStore]); + + const isInputActive = + (streamingState === StreamingState.Idle || + streamingState === StreamingState.Responding) && + !initError && + !isProcessing && + !isProQuotaDialogOpen; + + const handleClearScreen = useCallback(() => { + clearItems(); + clearConsoleMessagesState(); + console.clear(); + refreshStatic(); + }, [clearItems, clearConsoleMessagesState, refreshStatic]); + + const mainControlsRef = useRef(null); + const pendingHistoryItemRef = useRef(null); + + useEffect(() => { + if (mainControlsRef.current) { + const fullFooterMeasurement = measureElement(mainControlsRef.current); + setFooterHeight(fullFooterMeasurement.height); + } + }, [terminalHeight, consoleMessages, showErrorDetails]); + + const staticExtraHeight = /* margins and padding */ 3; + const availableTerminalHeight = useMemo( + () => terminalHeight - footerHeight - staticExtraHeight, + [terminalHeight, footerHeight], + ); + + useEffect(() => { + // skip refreshing Static during first mount + if (isInitialMount.current) { + isInitialMount.current = false; + return; + } + + // debounce so it doesn't fire up too often during resize + const handler = setTimeout(() => { + setStaticNeedsRefresh(false); + refreshStatic(); + }, 300); + + return () => { + clearTimeout(handler); + }; + }, [terminalWidth, terminalHeight, refreshStatic]); + + useEffect(() => { + if (streamingState === StreamingState.Idle && staticNeedsRefresh) { + setStaticNeedsRefresh(false); + refreshStatic(); + } + }, [streamingState, refreshStatic, staticNeedsRefresh]); + + const filteredConsoleMessages = useMemo(() => { + if (config.getDebugMode()) { + return consoleMessages; + } + return consoleMessages.filter((msg) => msg.type !== 'debug'); + }, [consoleMessages, config]); + + const branchName = useGitBranchName(config.getTargetDir()); + + const contextFileNames = useMemo(() => { + const fromSettings = settings.merged.context?.fileName; + if (fromSettings) { + return Array.isArray(fromSettings) ? fromSettings : [fromSettings]; + } + return getAllGeminiMdFilenames(); + }, [settings.merged.context?.fileName]); + + const initialPrompt = useMemo(() => config.getQuestion(), [config]); + const geminiClient = config.getGeminiClient(); + + useEffect(() => { + if ( + initialPrompt && + !initialPromptSubmitted.current && + !isAuthenticating && + !isAuthDialogOpen && + !isThemeDialogOpen && + !isEditorDialogOpen && + !showPrivacyNotice && + geminiClient?.isInitialized?.() + ) { + submitQuery(initialPrompt); + initialPromptSubmitted.current = true; + } + }, [ + initialPrompt, + submitQuery, + isAuthenticating, + isAuthDialogOpen, + isThemeDialogOpen, + isEditorDialogOpen, + showPrivacyNotice, + geminiClient, + ]); + + if (quittingMessages) { + return ( + + {quittingMessages.map((item) => ( + + ))} + + ); + } + + const mainAreaWidth = Math.floor(terminalWidth * 0.9); + const debugConsoleMaxHeight = Math.floor(Math.max(terminalHeight * 0.2, 5)); + // Arbitrary threshold to ensure that items in the static area are large + // enough but not too large to make the terminal hard to use. + const staticAreaMaxItemHeight = Math.max(terminalHeight * 4, 100); + const placeholder = vimModeEnabled + ? " Press 'i' for INSERT mode and 'Esc' for NORMAL mode." + : ' Type your message or @path/to/file'; + + return ( + + + {/* + * The Static component is an Ink intrinsic in which there can only be 1 per application. + * Because of this restriction we're hacking it slightly by having a 'header' item here to + * ensure that it's statically rendered. + * + * Background on the Static Item: Anything in the Static component is written a single time + * to the console. Think of it like doing a console.log and then never using ANSI codes to + * clear that content ever again. Effectively it has a moving frame that every time new static + * content is set it'll flush content to the terminal and move the area which it's "clearing" + * down a notch. Without Static the area which gets erased and redrawn continuously grows. + */} + + {!( + settings.merged.ui?.hideBanner || config.getScreenReader() + ) &&
} + {!(settings.merged.ui?.hideTips || config.getScreenReader()) && ( + + )} + , + ...history.map((h) => ( + + )), + ]} + > + {(item) => item} + + + + {pendingHistoryItems.map((item, i) => ( + + ))} + + + + + + {/* Move UpdateNotification to render update notification above input area */} + {updateInfo && } + {startupWarnings.length > 0 && ( + + {startupWarnings.map((warning, index) => ( + + {warning} + + ))} + + )} + {showWorkspaceMigrationDialog ? ( + + ) : shouldShowIdePrompt && currentIDE ? ( + + ) : isProQuotaDialogOpen ? ( + { + setIsProQuotaDialogOpen(false); + if (!proQuotaDialogResolver) return; + + const resolveValue = choice !== 'auth'; + proQuotaDialogResolver(resolveValue); + setProQuotaDialogResolver(null); + + if (choice === 'auth') { + openAuthDialog(); + } else { + addItem( + { + type: MessageType.INFO, + text: 'Switched to fallback model. Tip: Press Ctrl+P to recall your previous prompt and submit it again if you wish.', + }, + Date.now(), + ); + } + }} + /> + ) : isFolderTrustDialogOpen ? ( + + ) : shellConfirmationRequest ? ( + + ) : confirmationRequest ? ( + + {confirmationRequest.prompt} + + { + confirmationRequest.onConfirm(value); + }} + /> + + + ) : isThemeDialogOpen ? ( + + {themeError && ( + + {themeError} + + )} + + + ) : isSettingsDialogOpen ? ( + + closeSettingsDialog()} + onRestartRequest={() => process.exit(0)} + /> + + ) : isAuthenticating ? ( + <> + { + setAuthError('Authentication timed out. Please try again.'); + cancelAuthentication(); + openAuthDialog(); + }} + /> + {showErrorDetails && ( + + + + + + + )} + + ) : isAuthDialogOpen ? ( + + + + ) : isEditorDialogOpen ? ( + + {editorError && ( + + {editorError} + + )} + + + ) : showPrivacyNotice ? ( + setShowPrivacyNotice(false)} + config={config} + /> + ) : ( + <> + + + {/* Display queued messages below loading indicator */} + {messageQueue.length > 0 && ( + + {messageQueue + .slice(0, MAX_DISPLAYED_QUEUED_MESSAGES) + .map((message, index) => { + // Ensure multi-line messages are collapsed for the preview. + // Replace all whitespace (including newlines) with a single space. + const preview = message.replace(/\s+/g, ' '); + + return ( + // Ensure the Box takes full width so truncation calculates correctly + + {/* Use wrap="truncate" to ensure it fits the terminal width and doesn't wrap */} + + {preview} + + + ); + })} + {messageQueue.length > MAX_DISPLAYED_QUEUED_MESSAGES && ( + + + ... (+ + {messageQueue.length - MAX_DISPLAYED_QUEUED_MESSAGES} + more) + + + )} + + )} + + + + {process.env['GEMINI_SYSTEM_MD'] && ( + |⌐■_■| + )} + {ctrlCPressedOnce ? ( + + Press Ctrl+C again to exit. + + ) : ctrlDPressedOnce ? ( + + Press Ctrl+D again to exit. + + ) : showEscapePrompt ? ( + Press Esc again to clear. + ) : ( + + )} + + + {showAutoAcceptIndicator !== ApprovalMode.DEFAULT && + !shellModeActive && ( + + )} + {shellModeActive && } + + + + {showErrorDetails && ( + + + + + + + )} + + {isInputActive && ( + + )} + + )} + + {initError && streamingState !== StreamingState.Responding && ( + + {history.find( + (item) => + item.type === 'error' && item.text?.includes(initError), + )?.text ? ( + + { + history.find( + (item) => + item.type === 'error' && item.text?.includes(initError), + )?.text + } + + ) : ( + <> + + Initialization Error: {initError} + + + {' '} + Please check API key and configuration. + + + )} + + )} + {!settings.merged.ui?.hideFooter && ( +