Add files using upload-large-folder tool
Browse files- projects/ui/qwen-code/packages/core/src/code_assist/converter.test.ts +353 -0
- projects/ui/qwen-code/packages/core/src/code_assist/converter.ts +243 -0
- projects/ui/qwen-code/packages/core/src/code_assist/oauth2.test.ts +590 -0
- projects/ui/qwen-code/packages/core/src/code_assist/oauth2.ts +447 -0
- projects/ui/qwen-code/packages/core/src/code_assist/server.test.ts +218 -0
- projects/ui/qwen-code/packages/core/src/code_assist/server.ts +221 -0
- projects/ui/qwen-code/packages/core/src/code_assist/setup.test.ts +223 -0
- projects/ui/qwen-code/packages/core/src/code_assist/setup.ts +124 -0
- projects/ui/qwen-code/packages/core/src/code_assist/types.ts +185 -0
- projects/ui/qwen-code/packages/core/src/config/config.test.ts +592 -0
- projects/ui/qwen-code/packages/core/src/config/config.ts +919 -0
- projects/ui/qwen-code/packages/core/src/config/flashFallback.test.ts +102 -0
- projects/ui/qwen-code/packages/core/src/config/models.ts +15 -0
- projects/ui/qwen-code/packages/core/src/core/client.test.ts +2176 -0
- projects/ui/qwen-code/packages/core/src/core/client.ts +1001 -0
- projects/ui/qwen-code/packages/core/src/core/contentGenerator.test.ts +190 -0
- projects/ui/qwen-code/packages/core/src/core/contentGenerator.ts +245 -0
- projects/ui/qwen-code/packages/core/src/core/coreToolScheduler.test.ts +961 -0
- projects/ui/qwen-code/packages/core/src/core/coreToolScheduler.ts +966 -0
- projects/ui/qwen-code/packages/core/src/core/geminiChat.test.ts +890 -0
projects/ui/qwen-code/packages/core/src/code_assist/converter.test.ts
ADDED
|
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import { describe, it, expect } from 'vitest';
|
| 8 |
+
import {
|
| 9 |
+
toGenerateContentRequest,
|
| 10 |
+
fromGenerateContentResponse,
|
| 11 |
+
CaGenerateContentResponse,
|
| 12 |
+
toContents,
|
| 13 |
+
} from './converter.js';
|
| 14 |
+
import {
|
| 15 |
+
ContentListUnion,
|
| 16 |
+
GenerateContentParameters,
|
| 17 |
+
GenerateContentResponse,
|
| 18 |
+
FinishReason,
|
| 19 |
+
BlockedReason,
|
| 20 |
+
} from '@google/genai';
|
| 21 |
+
|
| 22 |
+
describe('converter', () => {
|
| 23 |
+
describe('toCodeAssistRequest', () => {
|
| 24 |
+
it('should convert a simple request with project', () => {
|
| 25 |
+
const genaiReq: GenerateContentParameters = {
|
| 26 |
+
model: 'gemini-pro',
|
| 27 |
+
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
| 28 |
+
};
|
| 29 |
+
const codeAssistReq = toGenerateContentRequest(
|
| 30 |
+
genaiReq,
|
| 31 |
+
'my-prompt',
|
| 32 |
+
'my-project',
|
| 33 |
+
'my-session',
|
| 34 |
+
);
|
| 35 |
+
expect(codeAssistReq).toEqual({
|
| 36 |
+
model: 'gemini-pro',
|
| 37 |
+
project: 'my-project',
|
| 38 |
+
request: {
|
| 39 |
+
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
| 40 |
+
systemInstruction: undefined,
|
| 41 |
+
cachedContent: undefined,
|
| 42 |
+
tools: undefined,
|
| 43 |
+
toolConfig: undefined,
|
| 44 |
+
labels: undefined,
|
| 45 |
+
safetySettings: undefined,
|
| 46 |
+
generationConfig: undefined,
|
| 47 |
+
session_id: 'my-session',
|
| 48 |
+
},
|
| 49 |
+
user_prompt_id: 'my-prompt',
|
| 50 |
+
});
|
| 51 |
+
});
|
| 52 |
+
|
| 53 |
+
it('should convert a request without a project', () => {
|
| 54 |
+
const genaiReq: GenerateContentParameters = {
|
| 55 |
+
model: 'gemini-pro',
|
| 56 |
+
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
| 57 |
+
};
|
| 58 |
+
const codeAssistReq = toGenerateContentRequest(
|
| 59 |
+
genaiReq,
|
| 60 |
+
'my-prompt',
|
| 61 |
+
undefined,
|
| 62 |
+
'my-session',
|
| 63 |
+
);
|
| 64 |
+
expect(codeAssistReq).toEqual({
|
| 65 |
+
model: 'gemini-pro',
|
| 66 |
+
project: undefined,
|
| 67 |
+
request: {
|
| 68 |
+
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
| 69 |
+
systemInstruction: undefined,
|
| 70 |
+
cachedContent: undefined,
|
| 71 |
+
tools: undefined,
|
| 72 |
+
toolConfig: undefined,
|
| 73 |
+
labels: undefined,
|
| 74 |
+
safetySettings: undefined,
|
| 75 |
+
generationConfig: undefined,
|
| 76 |
+
session_id: 'my-session',
|
| 77 |
+
},
|
| 78 |
+
user_prompt_id: 'my-prompt',
|
| 79 |
+
});
|
| 80 |
+
});
|
| 81 |
+
|
| 82 |
+
it('should convert a request with sessionId', () => {
|
| 83 |
+
const genaiReq: GenerateContentParameters = {
|
| 84 |
+
model: 'gemini-pro',
|
| 85 |
+
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
| 86 |
+
};
|
| 87 |
+
const codeAssistReq = toGenerateContentRequest(
|
| 88 |
+
genaiReq,
|
| 89 |
+
'my-prompt',
|
| 90 |
+
'my-project',
|
| 91 |
+
'session-123',
|
| 92 |
+
);
|
| 93 |
+
expect(codeAssistReq).toEqual({
|
| 94 |
+
model: 'gemini-pro',
|
| 95 |
+
project: 'my-project',
|
| 96 |
+
request: {
|
| 97 |
+
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
| 98 |
+
systemInstruction: undefined,
|
| 99 |
+
cachedContent: undefined,
|
| 100 |
+
tools: undefined,
|
| 101 |
+
toolConfig: undefined,
|
| 102 |
+
labels: undefined,
|
| 103 |
+
safetySettings: undefined,
|
| 104 |
+
generationConfig: undefined,
|
| 105 |
+
session_id: 'session-123',
|
| 106 |
+
},
|
| 107 |
+
user_prompt_id: 'my-prompt',
|
| 108 |
+
});
|
| 109 |
+
});
|
| 110 |
+
|
| 111 |
+
it('should handle string content', () => {
|
| 112 |
+
const genaiReq: GenerateContentParameters = {
|
| 113 |
+
model: 'gemini-pro',
|
| 114 |
+
contents: 'Hello',
|
| 115 |
+
};
|
| 116 |
+
const codeAssistReq = toGenerateContentRequest(
|
| 117 |
+
genaiReq,
|
| 118 |
+
'my-prompt',
|
| 119 |
+
'my-project',
|
| 120 |
+
'my-session',
|
| 121 |
+
);
|
| 122 |
+
expect(codeAssistReq.request.contents).toEqual([
|
| 123 |
+
{ role: 'user', parts: [{ text: 'Hello' }] },
|
| 124 |
+
]);
|
| 125 |
+
});
|
| 126 |
+
|
| 127 |
+
it('should handle Part[] content', () => {
|
| 128 |
+
const genaiReq: GenerateContentParameters = {
|
| 129 |
+
model: 'gemini-pro',
|
| 130 |
+
contents: [{ text: 'Hello' }, { text: 'World' }],
|
| 131 |
+
};
|
| 132 |
+
const codeAssistReq = toGenerateContentRequest(
|
| 133 |
+
genaiReq,
|
| 134 |
+
'my-prompt',
|
| 135 |
+
'my-project',
|
| 136 |
+
'my-session',
|
| 137 |
+
);
|
| 138 |
+
expect(codeAssistReq.request.contents).toEqual([
|
| 139 |
+
{ role: 'user', parts: [{ text: 'Hello' }] },
|
| 140 |
+
{ role: 'user', parts: [{ text: 'World' }] },
|
| 141 |
+
]);
|
| 142 |
+
});
|
| 143 |
+
|
| 144 |
+
it('should handle system instructions', () => {
|
| 145 |
+
const genaiReq: GenerateContentParameters = {
|
| 146 |
+
model: 'gemini-pro',
|
| 147 |
+
contents: 'Hello',
|
| 148 |
+
config: {
|
| 149 |
+
systemInstruction: 'You are a helpful assistant.',
|
| 150 |
+
},
|
| 151 |
+
};
|
| 152 |
+
const codeAssistReq = toGenerateContentRequest(
|
| 153 |
+
genaiReq,
|
| 154 |
+
'my-prompt',
|
| 155 |
+
'my-project',
|
| 156 |
+
'my-session',
|
| 157 |
+
);
|
| 158 |
+
expect(codeAssistReq.request.systemInstruction).toEqual({
|
| 159 |
+
role: 'user',
|
| 160 |
+
parts: [{ text: 'You are a helpful assistant.' }],
|
| 161 |
+
});
|
| 162 |
+
});
|
| 163 |
+
|
| 164 |
+
it('should handle generation config', () => {
|
| 165 |
+
const genaiReq: GenerateContentParameters = {
|
| 166 |
+
model: 'gemini-pro',
|
| 167 |
+
contents: 'Hello',
|
| 168 |
+
config: {
|
| 169 |
+
temperature: 0.8,
|
| 170 |
+
topK: 40,
|
| 171 |
+
},
|
| 172 |
+
};
|
| 173 |
+
const codeAssistReq = toGenerateContentRequest(
|
| 174 |
+
genaiReq,
|
| 175 |
+
'my-prompt',
|
| 176 |
+
'my-project',
|
| 177 |
+
'my-session',
|
| 178 |
+
);
|
| 179 |
+
expect(codeAssistReq.request.generationConfig).toEqual({
|
| 180 |
+
temperature: 0.8,
|
| 181 |
+
topK: 40,
|
| 182 |
+
});
|
| 183 |
+
});
|
| 184 |
+
|
| 185 |
+
it('should handle all generation config fields', () => {
|
| 186 |
+
const genaiReq: GenerateContentParameters = {
|
| 187 |
+
model: 'gemini-pro',
|
| 188 |
+
contents: 'Hello',
|
| 189 |
+
config: {
|
| 190 |
+
temperature: 0.1,
|
| 191 |
+
topP: 0.2,
|
| 192 |
+
topK: 3,
|
| 193 |
+
candidateCount: 4,
|
| 194 |
+
maxOutputTokens: 5,
|
| 195 |
+
stopSequences: ['a'],
|
| 196 |
+
responseLogprobs: true,
|
| 197 |
+
logprobs: 6,
|
| 198 |
+
presencePenalty: 0.7,
|
| 199 |
+
frequencyPenalty: 0.8,
|
| 200 |
+
seed: 9,
|
| 201 |
+
responseMimeType: 'application/json',
|
| 202 |
+
},
|
| 203 |
+
};
|
| 204 |
+
const codeAssistReq = toGenerateContentRequest(
|
| 205 |
+
genaiReq,
|
| 206 |
+
'my-prompt',
|
| 207 |
+
'my-project',
|
| 208 |
+
'my-session',
|
| 209 |
+
);
|
| 210 |
+
expect(codeAssistReq.request.generationConfig).toEqual({
|
| 211 |
+
temperature: 0.1,
|
| 212 |
+
topP: 0.2,
|
| 213 |
+
topK: 3,
|
| 214 |
+
candidateCount: 4,
|
| 215 |
+
maxOutputTokens: 5,
|
| 216 |
+
stopSequences: ['a'],
|
| 217 |
+
responseLogprobs: true,
|
| 218 |
+
logprobs: 6,
|
| 219 |
+
presencePenalty: 0.7,
|
| 220 |
+
frequencyPenalty: 0.8,
|
| 221 |
+
seed: 9,
|
| 222 |
+
responseMimeType: 'application/json',
|
| 223 |
+
});
|
| 224 |
+
});
|
| 225 |
+
});
|
| 226 |
+
|
| 227 |
+
describe('fromCodeAssistResponse', () => {
|
| 228 |
+
it('should convert a simple response', () => {
|
| 229 |
+
const codeAssistRes: CaGenerateContentResponse = {
|
| 230 |
+
response: {
|
| 231 |
+
candidates: [
|
| 232 |
+
{
|
| 233 |
+
index: 0,
|
| 234 |
+
content: {
|
| 235 |
+
role: 'model',
|
| 236 |
+
parts: [{ text: 'Hi there!' }],
|
| 237 |
+
},
|
| 238 |
+
finishReason: FinishReason.STOP,
|
| 239 |
+
safetyRatings: [],
|
| 240 |
+
},
|
| 241 |
+
],
|
| 242 |
+
},
|
| 243 |
+
};
|
| 244 |
+
const genaiRes = fromGenerateContentResponse(codeAssistRes);
|
| 245 |
+
expect(genaiRes).toBeInstanceOf(GenerateContentResponse);
|
| 246 |
+
expect(genaiRes.candidates).toEqual(codeAssistRes.response.candidates);
|
| 247 |
+
});
|
| 248 |
+
|
| 249 |
+
it('should handle prompt feedback and usage metadata', () => {
|
| 250 |
+
const codeAssistRes: CaGenerateContentResponse = {
|
| 251 |
+
response: {
|
| 252 |
+
candidates: [],
|
| 253 |
+
promptFeedback: {
|
| 254 |
+
blockReason: BlockedReason.SAFETY,
|
| 255 |
+
safetyRatings: [],
|
| 256 |
+
},
|
| 257 |
+
usageMetadata: {
|
| 258 |
+
promptTokenCount: 10,
|
| 259 |
+
candidatesTokenCount: 20,
|
| 260 |
+
totalTokenCount: 30,
|
| 261 |
+
},
|
| 262 |
+
},
|
| 263 |
+
};
|
| 264 |
+
const genaiRes = fromGenerateContentResponse(codeAssistRes);
|
| 265 |
+
expect(genaiRes.promptFeedback).toEqual(
|
| 266 |
+
codeAssistRes.response.promptFeedback,
|
| 267 |
+
);
|
| 268 |
+
expect(genaiRes.usageMetadata).toEqual(
|
| 269 |
+
codeAssistRes.response.usageMetadata,
|
| 270 |
+
);
|
| 271 |
+
});
|
| 272 |
+
|
| 273 |
+
it('should handle automatic function calling history', () => {
|
| 274 |
+
const codeAssistRes: CaGenerateContentResponse = {
|
| 275 |
+
response: {
|
| 276 |
+
candidates: [],
|
| 277 |
+
automaticFunctionCallingHistory: [
|
| 278 |
+
{
|
| 279 |
+
role: 'model',
|
| 280 |
+
parts: [
|
| 281 |
+
{
|
| 282 |
+
functionCall: {
|
| 283 |
+
name: 'test_function',
|
| 284 |
+
args: {
|
| 285 |
+
foo: 'bar',
|
| 286 |
+
},
|
| 287 |
+
},
|
| 288 |
+
},
|
| 289 |
+
],
|
| 290 |
+
},
|
| 291 |
+
],
|
| 292 |
+
},
|
| 293 |
+
};
|
| 294 |
+
const genaiRes = fromGenerateContentResponse(codeAssistRes);
|
| 295 |
+
expect(genaiRes.automaticFunctionCallingHistory).toEqual(
|
| 296 |
+
codeAssistRes.response.automaticFunctionCallingHistory,
|
| 297 |
+
);
|
| 298 |
+
});
|
| 299 |
+
});
|
| 300 |
+
|
| 301 |
+
describe('toContents', () => {
|
| 302 |
+
it('should handle Content', () => {
|
| 303 |
+
const content: ContentListUnion = {
|
| 304 |
+
role: 'user',
|
| 305 |
+
parts: [{ text: 'hello' }],
|
| 306 |
+
};
|
| 307 |
+
expect(toContents(content)).toEqual([
|
| 308 |
+
{ role: 'user', parts: [{ text: 'hello' }] },
|
| 309 |
+
]);
|
| 310 |
+
});
|
| 311 |
+
|
| 312 |
+
it('should handle array of Contents', () => {
|
| 313 |
+
const contents: ContentListUnion = [
|
| 314 |
+
{ role: 'user', parts: [{ text: 'hello' }] },
|
| 315 |
+
{ role: 'model', parts: [{ text: 'hi' }] },
|
| 316 |
+
];
|
| 317 |
+
expect(toContents(contents)).toEqual([
|
| 318 |
+
{ role: 'user', parts: [{ text: 'hello' }] },
|
| 319 |
+
{ role: 'model', parts: [{ text: 'hi' }] },
|
| 320 |
+
]);
|
| 321 |
+
});
|
| 322 |
+
|
| 323 |
+
it('should handle Part', () => {
|
| 324 |
+
const part: ContentListUnion = { text: 'a part' };
|
| 325 |
+
expect(toContents(part)).toEqual([
|
| 326 |
+
{ role: 'user', parts: [{ text: 'a part' }] },
|
| 327 |
+
]);
|
| 328 |
+
});
|
| 329 |
+
|
| 330 |
+
it('should handle array of Parts', () => {
|
| 331 |
+
const parts = [{ text: 'part 1' }, 'part 2'];
|
| 332 |
+
expect(toContents(parts)).toEqual([
|
| 333 |
+
{ role: 'user', parts: [{ text: 'part 1' }] },
|
| 334 |
+
{ role: 'user', parts: [{ text: 'part 2' }] },
|
| 335 |
+
]);
|
| 336 |
+
});
|
| 337 |
+
|
| 338 |
+
it('should handle string', () => {
|
| 339 |
+
const str: ContentListUnion = 'a string';
|
| 340 |
+
expect(toContents(str)).toEqual([
|
| 341 |
+
{ role: 'user', parts: [{ text: 'a string' }] },
|
| 342 |
+
]);
|
| 343 |
+
});
|
| 344 |
+
|
| 345 |
+
it('should handle array of strings', () => {
|
| 346 |
+
const strings: ContentListUnion = ['string 1', 'string 2'];
|
| 347 |
+
expect(toContents(strings)).toEqual([
|
| 348 |
+
{ role: 'user', parts: [{ text: 'string 1' }] },
|
| 349 |
+
{ role: 'user', parts: [{ text: 'string 2' }] },
|
| 350 |
+
]);
|
| 351 |
+
});
|
| 352 |
+
});
|
| 353 |
+
});
|
projects/ui/qwen-code/packages/core/src/code_assist/converter.ts
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import {
|
| 8 |
+
Content,
|
| 9 |
+
ContentListUnion,
|
| 10 |
+
ContentUnion,
|
| 11 |
+
GenerateContentConfig,
|
| 12 |
+
GenerateContentParameters,
|
| 13 |
+
CountTokensParameters,
|
| 14 |
+
CountTokensResponse,
|
| 15 |
+
GenerateContentResponse,
|
| 16 |
+
GenerationConfigRoutingConfig,
|
| 17 |
+
MediaResolution,
|
| 18 |
+
Candidate,
|
| 19 |
+
ModelSelectionConfig,
|
| 20 |
+
GenerateContentResponsePromptFeedback,
|
| 21 |
+
GenerateContentResponseUsageMetadata,
|
| 22 |
+
Part,
|
| 23 |
+
SafetySetting,
|
| 24 |
+
PartUnion,
|
| 25 |
+
SpeechConfigUnion,
|
| 26 |
+
ThinkingConfig,
|
| 27 |
+
ToolListUnion,
|
| 28 |
+
ToolConfig,
|
| 29 |
+
} from '@google/genai';
|
| 30 |
+
|
| 31 |
+
export interface CAGenerateContentRequest {
|
| 32 |
+
model: string;
|
| 33 |
+
project?: string;
|
| 34 |
+
user_prompt_id?: string;
|
| 35 |
+
request: VertexGenerateContentRequest;
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
interface VertexGenerateContentRequest {
|
| 39 |
+
contents: Content[];
|
| 40 |
+
systemInstruction?: Content;
|
| 41 |
+
cachedContent?: string;
|
| 42 |
+
tools?: ToolListUnion;
|
| 43 |
+
toolConfig?: ToolConfig;
|
| 44 |
+
labels?: Record<string, string>;
|
| 45 |
+
safetySettings?: SafetySetting[];
|
| 46 |
+
generationConfig?: VertexGenerationConfig;
|
| 47 |
+
session_id?: string;
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
interface VertexGenerationConfig {
|
| 51 |
+
temperature?: number;
|
| 52 |
+
topP?: number;
|
| 53 |
+
topK?: number;
|
| 54 |
+
candidateCount?: number;
|
| 55 |
+
maxOutputTokens?: number;
|
| 56 |
+
stopSequences?: string[];
|
| 57 |
+
responseLogprobs?: boolean;
|
| 58 |
+
logprobs?: number;
|
| 59 |
+
presencePenalty?: number;
|
| 60 |
+
frequencyPenalty?: number;
|
| 61 |
+
seed?: number;
|
| 62 |
+
responseMimeType?: string;
|
| 63 |
+
responseJsonSchema?: unknown;
|
| 64 |
+
responseSchema?: unknown;
|
| 65 |
+
routingConfig?: GenerationConfigRoutingConfig;
|
| 66 |
+
modelSelectionConfig?: ModelSelectionConfig;
|
| 67 |
+
responseModalities?: string[];
|
| 68 |
+
mediaResolution?: MediaResolution;
|
| 69 |
+
speechConfig?: SpeechConfigUnion;
|
| 70 |
+
audioTimestamp?: boolean;
|
| 71 |
+
thinkingConfig?: ThinkingConfig;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
export interface CaGenerateContentResponse {
|
| 75 |
+
response: VertexGenerateContentResponse;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
interface VertexGenerateContentResponse {
|
| 79 |
+
candidates: Candidate[];
|
| 80 |
+
automaticFunctionCallingHistory?: Content[];
|
| 81 |
+
promptFeedback?: GenerateContentResponsePromptFeedback;
|
| 82 |
+
usageMetadata?: GenerateContentResponseUsageMetadata;
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
export interface CaCountTokenRequest {
|
| 86 |
+
request: VertexCountTokenRequest;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
interface VertexCountTokenRequest {
|
| 90 |
+
model: string;
|
| 91 |
+
contents: Content[];
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
export interface CaCountTokenResponse {
|
| 95 |
+
totalTokens: number;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
export function toCountTokenRequest(
|
| 99 |
+
req: CountTokensParameters,
|
| 100 |
+
): CaCountTokenRequest {
|
| 101 |
+
return {
|
| 102 |
+
request: {
|
| 103 |
+
model: 'models/' + req.model,
|
| 104 |
+
contents: toContents(req.contents),
|
| 105 |
+
},
|
| 106 |
+
};
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
export function fromCountTokenResponse(
|
| 110 |
+
res: CaCountTokenResponse,
|
| 111 |
+
): CountTokensResponse {
|
| 112 |
+
return {
|
| 113 |
+
totalTokens: res.totalTokens,
|
| 114 |
+
};
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
export function toGenerateContentRequest(
|
| 118 |
+
req: GenerateContentParameters,
|
| 119 |
+
userPromptId: string,
|
| 120 |
+
project?: string,
|
| 121 |
+
sessionId?: string,
|
| 122 |
+
): CAGenerateContentRequest {
|
| 123 |
+
return {
|
| 124 |
+
model: req.model,
|
| 125 |
+
project,
|
| 126 |
+
user_prompt_id: userPromptId,
|
| 127 |
+
request: toVertexGenerateContentRequest(req, sessionId),
|
| 128 |
+
};
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
export function fromGenerateContentResponse(
|
| 132 |
+
res: CaGenerateContentResponse,
|
| 133 |
+
): GenerateContentResponse {
|
| 134 |
+
const inres = res.response;
|
| 135 |
+
const out = new GenerateContentResponse();
|
| 136 |
+
out.candidates = inres.candidates;
|
| 137 |
+
out.automaticFunctionCallingHistory = inres.automaticFunctionCallingHistory;
|
| 138 |
+
out.promptFeedback = inres.promptFeedback;
|
| 139 |
+
out.usageMetadata = inres.usageMetadata;
|
| 140 |
+
return out;
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
function toVertexGenerateContentRequest(
|
| 144 |
+
req: GenerateContentParameters,
|
| 145 |
+
sessionId?: string,
|
| 146 |
+
): VertexGenerateContentRequest {
|
| 147 |
+
return {
|
| 148 |
+
contents: toContents(req.contents),
|
| 149 |
+
systemInstruction: maybeToContent(req.config?.systemInstruction),
|
| 150 |
+
cachedContent: req.config?.cachedContent,
|
| 151 |
+
tools: req.config?.tools,
|
| 152 |
+
toolConfig: req.config?.toolConfig,
|
| 153 |
+
labels: req.config?.labels,
|
| 154 |
+
safetySettings: req.config?.safetySettings,
|
| 155 |
+
generationConfig: toVertexGenerationConfig(req.config),
|
| 156 |
+
session_id: sessionId,
|
| 157 |
+
};
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
export function toContents(contents: ContentListUnion): Content[] {
|
| 161 |
+
if (Array.isArray(contents)) {
|
| 162 |
+
// it's a Content[] or a PartsUnion[]
|
| 163 |
+
return contents.map(toContent);
|
| 164 |
+
}
|
| 165 |
+
// it's a Content or a PartsUnion
|
| 166 |
+
return [toContent(contents)];
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
function maybeToContent(content?: ContentUnion): Content | undefined {
|
| 170 |
+
if (!content) {
|
| 171 |
+
return undefined;
|
| 172 |
+
}
|
| 173 |
+
return toContent(content);
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
function toContent(content: ContentUnion): Content {
|
| 177 |
+
if (Array.isArray(content)) {
|
| 178 |
+
// it's a PartsUnion[]
|
| 179 |
+
return {
|
| 180 |
+
role: 'user',
|
| 181 |
+
parts: toParts(content),
|
| 182 |
+
};
|
| 183 |
+
}
|
| 184 |
+
if (typeof content === 'string') {
|
| 185 |
+
// it's a string
|
| 186 |
+
return {
|
| 187 |
+
role: 'user',
|
| 188 |
+
parts: [{ text: content }],
|
| 189 |
+
};
|
| 190 |
+
}
|
| 191 |
+
if ('parts' in content) {
|
| 192 |
+
// it's a Content
|
| 193 |
+
return content;
|
| 194 |
+
}
|
| 195 |
+
// it's a Part
|
| 196 |
+
return {
|
| 197 |
+
role: 'user',
|
| 198 |
+
parts: [content as Part],
|
| 199 |
+
};
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
function toParts(parts: PartUnion[]): Part[] {
|
| 203 |
+
return parts.map(toPart);
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
function toPart(part: PartUnion): Part {
|
| 207 |
+
if (typeof part === 'string') {
|
| 208 |
+
// it's a string
|
| 209 |
+
return { text: part };
|
| 210 |
+
}
|
| 211 |
+
return part;
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
function toVertexGenerationConfig(
|
| 215 |
+
config?: GenerateContentConfig,
|
| 216 |
+
): VertexGenerationConfig | undefined {
|
| 217 |
+
if (!config) {
|
| 218 |
+
return undefined;
|
| 219 |
+
}
|
| 220 |
+
return {
|
| 221 |
+
temperature: config.temperature,
|
| 222 |
+
topP: config.topP,
|
| 223 |
+
topK: config.topK,
|
| 224 |
+
candidateCount: config.candidateCount,
|
| 225 |
+
maxOutputTokens: config.maxOutputTokens,
|
| 226 |
+
stopSequences: config.stopSequences,
|
| 227 |
+
responseLogprobs: config.responseLogprobs,
|
| 228 |
+
logprobs: config.logprobs,
|
| 229 |
+
presencePenalty: config.presencePenalty,
|
| 230 |
+
frequencyPenalty: config.frequencyPenalty,
|
| 231 |
+
seed: config.seed,
|
| 232 |
+
responseMimeType: config.responseMimeType,
|
| 233 |
+
responseSchema: config.responseSchema,
|
| 234 |
+
responseJsonSchema: config.responseJsonSchema,
|
| 235 |
+
routingConfig: config.routingConfig,
|
| 236 |
+
modelSelectionConfig: config.modelSelectionConfig,
|
| 237 |
+
responseModalities: config.responseModalities,
|
| 238 |
+
mediaResolution: config.mediaResolution,
|
| 239 |
+
speechConfig: config.speechConfig,
|
| 240 |
+
audioTimestamp: config.audioTimestamp,
|
| 241 |
+
thinkingConfig: config.thinkingConfig,
|
| 242 |
+
};
|
| 243 |
+
}
|
projects/ui/qwen-code/packages/core/src/code_assist/oauth2.test.ts
ADDED
|
@@ -0,0 +1,590 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import { describe, it, expect, vi, beforeEach, afterEach, Mock } from 'vitest';
|
| 8 |
+
import {
|
| 9 |
+
getOauthClient,
|
| 10 |
+
resetOauthClientForTesting,
|
| 11 |
+
clearCachedCredentialFile,
|
| 12 |
+
clearOauthClientCache,
|
| 13 |
+
} from './oauth2.js';
|
| 14 |
+
import { getCachedGoogleAccount } from '../utils/user_account.js';
|
| 15 |
+
import { OAuth2Client, Compute } from 'google-auth-library';
|
| 16 |
+
import * as fs from 'fs';
|
| 17 |
+
import * as path from 'path';
|
| 18 |
+
import http from 'http';
|
| 19 |
+
import open from 'open';
|
| 20 |
+
import crypto from 'crypto';
|
| 21 |
+
import * as os from 'os';
|
| 22 |
+
import { AuthType } from '../core/contentGenerator.js';
|
| 23 |
+
import { Config } from '../config/config.js';
|
| 24 |
+
import readline from 'node:readline';
|
| 25 |
+
import { QWEN_DIR } from '../utils/paths.js';
|
| 26 |
+
|
| 27 |
+
vi.mock('os', async (importOriginal) => {
|
| 28 |
+
const os = await importOriginal<typeof import('os')>();
|
| 29 |
+
return {
|
| 30 |
+
...os,
|
| 31 |
+
homedir: vi.fn(),
|
| 32 |
+
};
|
| 33 |
+
});
|
| 34 |
+
|
| 35 |
+
vi.mock('google-auth-library');
|
| 36 |
+
vi.mock('http');
|
| 37 |
+
vi.mock('open');
|
| 38 |
+
vi.mock('crypto');
|
| 39 |
+
vi.mock('node:readline');
|
| 40 |
+
vi.mock('../utils/browser.js', () => ({
|
| 41 |
+
shouldAttemptBrowserLaunch: () => true,
|
| 42 |
+
}));
|
| 43 |
+
|
| 44 |
+
const mockConfig = {
|
| 45 |
+
getNoBrowser: () => false,
|
| 46 |
+
getProxy: () => 'http://test.proxy.com:8080',
|
| 47 |
+
isBrowserLaunchSuppressed: () => false,
|
| 48 |
+
} as unknown as Config;
|
| 49 |
+
|
| 50 |
+
// Mock fetch globally
|
| 51 |
+
global.fetch = vi.fn();
|
| 52 |
+
|
| 53 |
+
describe('oauth2', () => {
|
| 54 |
+
let tempHomeDir: string;
|
| 55 |
+
|
| 56 |
+
beforeEach(() => {
|
| 57 |
+
tempHomeDir = fs.mkdtempSync(
|
| 58 |
+
path.join(os.tmpdir(), 'qwen-code-test-home-'),
|
| 59 |
+
);
|
| 60 |
+
(os.homedir as Mock).mockReturnValue(tempHomeDir);
|
| 61 |
+
});
|
| 62 |
+
afterEach(() => {
|
| 63 |
+
fs.rmSync(tempHomeDir, { recursive: true, force: true });
|
| 64 |
+
vi.clearAllMocks();
|
| 65 |
+
resetOauthClientForTesting();
|
| 66 |
+
vi.unstubAllEnvs();
|
| 67 |
+
});
|
| 68 |
+
|
| 69 |
+
it('should perform a web login', async () => {
|
| 70 |
+
const mockAuthUrl = 'https://example.com/auth';
|
| 71 |
+
const mockCode = 'test-code';
|
| 72 |
+
const mockState = 'test-state';
|
| 73 |
+
const mockTokens = {
|
| 74 |
+
access_token: 'test-access-token',
|
| 75 |
+
refresh_token: 'test-refresh-token',
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
const mockGenerateAuthUrl = vi.fn().mockReturnValue(mockAuthUrl);
|
| 79 |
+
const mockGetToken = vi.fn().mockResolvedValue({ tokens: mockTokens });
|
| 80 |
+
const mockSetCredentials = vi.fn();
|
| 81 |
+
const mockGetAccessToken = vi
|
| 82 |
+
.fn()
|
| 83 |
+
.mockResolvedValue({ token: 'mock-access-token' });
|
| 84 |
+
const mockOAuth2Client = {
|
| 85 |
+
generateAuthUrl: mockGenerateAuthUrl,
|
| 86 |
+
getToken: mockGetToken,
|
| 87 |
+
setCredentials: mockSetCredentials,
|
| 88 |
+
getAccessToken: mockGetAccessToken,
|
| 89 |
+
credentials: mockTokens,
|
| 90 |
+
on: vi.fn(),
|
| 91 |
+
} as unknown as OAuth2Client;
|
| 92 |
+
(OAuth2Client as unknown as Mock).mockImplementation(
|
| 93 |
+
() => mockOAuth2Client,
|
| 94 |
+
);
|
| 95 |
+
|
| 96 |
+
vi.spyOn(crypto, 'randomBytes').mockReturnValue(mockState as never);
|
| 97 |
+
(open as Mock).mockImplementation(async () => ({ on: vi.fn() }) as never);
|
| 98 |
+
|
| 99 |
+
// Mock the UserInfo API response
|
| 100 |
+
(global.fetch as Mock).mockResolvedValue({
|
| 101 |
+
ok: true,
|
| 102 |
+
json: vi
|
| 103 |
+
.fn()
|
| 104 |
+
.mockResolvedValue({ email: 'test-google-account@gmail.com' }),
|
| 105 |
+
} as unknown as Response);
|
| 106 |
+
|
| 107 |
+
let requestCallback!: http.RequestListener<
|
| 108 |
+
typeof http.IncomingMessage,
|
| 109 |
+
typeof http.ServerResponse
|
| 110 |
+
>;
|
| 111 |
+
|
| 112 |
+
let serverListeningCallback: (value: unknown) => void;
|
| 113 |
+
const serverListeningPromise = new Promise(
|
| 114 |
+
(resolve) => (serverListeningCallback = resolve),
|
| 115 |
+
);
|
| 116 |
+
|
| 117 |
+
let capturedPort = 0;
|
| 118 |
+
const mockHttpServer = {
|
| 119 |
+
listen: vi.fn((port: number, _host: string, callback?: () => void) => {
|
| 120 |
+
capturedPort = port;
|
| 121 |
+
if (callback) {
|
| 122 |
+
callback();
|
| 123 |
+
}
|
| 124 |
+
serverListeningCallback(undefined);
|
| 125 |
+
}),
|
| 126 |
+
close: vi.fn((callback?: () => void) => {
|
| 127 |
+
if (callback) {
|
| 128 |
+
callback();
|
| 129 |
+
}
|
| 130 |
+
}),
|
| 131 |
+
on: vi.fn(),
|
| 132 |
+
address: () => ({ port: capturedPort }),
|
| 133 |
+
};
|
| 134 |
+
(http.createServer as Mock).mockImplementation((cb) => {
|
| 135 |
+
requestCallback = cb as http.RequestListener<
|
| 136 |
+
typeof http.IncomingMessage,
|
| 137 |
+
typeof http.ServerResponse
|
| 138 |
+
>;
|
| 139 |
+
return mockHttpServer as unknown as http.Server;
|
| 140 |
+
});
|
| 141 |
+
|
| 142 |
+
const clientPromise = getOauthClient(
|
| 143 |
+
AuthType.LOGIN_WITH_GOOGLE,
|
| 144 |
+
mockConfig,
|
| 145 |
+
);
|
| 146 |
+
|
| 147 |
+
// wait for server to start listening.
|
| 148 |
+
await serverListeningPromise;
|
| 149 |
+
|
| 150 |
+
const mockReq = {
|
| 151 |
+
url: `/oauth2callback?code=${mockCode}&state=${mockState}`,
|
| 152 |
+
} as http.IncomingMessage;
|
| 153 |
+
const mockRes = {
|
| 154 |
+
writeHead: vi.fn(),
|
| 155 |
+
end: vi.fn(),
|
| 156 |
+
} as unknown as http.ServerResponse;
|
| 157 |
+
|
| 158 |
+
await requestCallback(mockReq, mockRes);
|
| 159 |
+
|
| 160 |
+
const client = await clientPromise;
|
| 161 |
+
expect(client).toBe(mockOAuth2Client);
|
| 162 |
+
|
| 163 |
+
expect(open).toHaveBeenCalledWith(mockAuthUrl);
|
| 164 |
+
expect(mockGetToken).toHaveBeenCalledWith({
|
| 165 |
+
code: mockCode,
|
| 166 |
+
redirect_uri: `http://localhost:${capturedPort}/oauth2callback`,
|
| 167 |
+
});
|
| 168 |
+
expect(mockSetCredentials).toHaveBeenCalledWith(mockTokens);
|
| 169 |
+
|
| 170 |
+
// Verify Google Account was cached
|
| 171 |
+
const googleAccountPath = path.join(
|
| 172 |
+
tempHomeDir,
|
| 173 |
+
QWEN_DIR,
|
| 174 |
+
'google_accounts.json',
|
| 175 |
+
);
|
| 176 |
+
expect(fs.existsSync(googleAccountPath)).toBe(true);
|
| 177 |
+
const cachedGoogleAccount = fs.readFileSync(googleAccountPath, 'utf-8');
|
| 178 |
+
expect(JSON.parse(cachedGoogleAccount)).toEqual({
|
| 179 |
+
active: 'test-google-account@gmail.com',
|
| 180 |
+
old: [],
|
| 181 |
+
});
|
| 182 |
+
|
| 183 |
+
// Verify the getCachedGoogleAccount function works
|
| 184 |
+
expect(getCachedGoogleAccount()).toBe('test-google-account@gmail.com');
|
| 185 |
+
});
|
| 186 |
+
|
| 187 |
+
it('should perform login with user code', async () => {
|
| 188 |
+
const mockConfigWithNoBrowser = {
|
| 189 |
+
getNoBrowser: () => true,
|
| 190 |
+
getProxy: () => 'http://test.proxy.com:8080',
|
| 191 |
+
isBrowserLaunchSuppressed: () => true,
|
| 192 |
+
} as unknown as Config;
|
| 193 |
+
|
| 194 |
+
const mockCodeVerifier = {
|
| 195 |
+
codeChallenge: 'test-challenge',
|
| 196 |
+
codeVerifier: 'test-verifier',
|
| 197 |
+
};
|
| 198 |
+
const mockAuthUrl = 'https://example.com/auth-user-code';
|
| 199 |
+
const mockCode = 'test-user-code';
|
| 200 |
+
const mockTokens = {
|
| 201 |
+
access_token: 'test-access-token-user-code',
|
| 202 |
+
refresh_token: 'test-refresh-token-user-code',
|
| 203 |
+
};
|
| 204 |
+
|
| 205 |
+
const mockGenerateAuthUrl = vi.fn().mockReturnValue(mockAuthUrl);
|
| 206 |
+
const mockGetToken = vi.fn().mockResolvedValue({ tokens: mockTokens });
|
| 207 |
+
const mockSetCredentials = vi.fn();
|
| 208 |
+
const mockGenerateCodeVerifierAsync = vi
|
| 209 |
+
.fn()
|
| 210 |
+
.mockResolvedValue(mockCodeVerifier);
|
| 211 |
+
|
| 212 |
+
const mockOAuth2Client = {
|
| 213 |
+
generateAuthUrl: mockGenerateAuthUrl,
|
| 214 |
+
getToken: mockGetToken,
|
| 215 |
+
setCredentials: mockSetCredentials,
|
| 216 |
+
generateCodeVerifierAsync: mockGenerateCodeVerifierAsync,
|
| 217 |
+
on: vi.fn(),
|
| 218 |
+
} as unknown as OAuth2Client;
|
| 219 |
+
(OAuth2Client as unknown as Mock).mockImplementation(
|
| 220 |
+
() => mockOAuth2Client,
|
| 221 |
+
);
|
| 222 |
+
|
| 223 |
+
const mockReadline = {
|
| 224 |
+
question: vi.fn((_query, callback) => callback(mockCode)),
|
| 225 |
+
close: vi.fn(),
|
| 226 |
+
};
|
| 227 |
+
(readline.createInterface as Mock).mockReturnValue(mockReadline);
|
| 228 |
+
|
| 229 |
+
const consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {});
|
| 230 |
+
|
| 231 |
+
const client = await getOauthClient(
|
| 232 |
+
AuthType.LOGIN_WITH_GOOGLE,
|
| 233 |
+
mockConfigWithNoBrowser,
|
| 234 |
+
);
|
| 235 |
+
|
| 236 |
+
expect(client).toBe(mockOAuth2Client);
|
| 237 |
+
|
| 238 |
+
// Verify the auth flow
|
| 239 |
+
expect(mockGenerateCodeVerifierAsync).toHaveBeenCalled();
|
| 240 |
+
expect(mockGenerateAuthUrl).toHaveBeenCalled();
|
| 241 |
+
expect(consoleLogSpy).toHaveBeenCalledWith(
|
| 242 |
+
expect.stringContaining(mockAuthUrl),
|
| 243 |
+
);
|
| 244 |
+
expect(mockReadline.question).toHaveBeenCalledWith(
|
| 245 |
+
'Enter the authorization code: ',
|
| 246 |
+
expect.any(Function),
|
| 247 |
+
);
|
| 248 |
+
expect(mockGetToken).toHaveBeenCalledWith({
|
| 249 |
+
code: mockCode,
|
| 250 |
+
codeVerifier: mockCodeVerifier.codeVerifier,
|
| 251 |
+
redirect_uri: 'https://codeassist.google.com/authcode',
|
| 252 |
+
});
|
| 253 |
+
expect(mockSetCredentials).toHaveBeenCalledWith(mockTokens);
|
| 254 |
+
|
| 255 |
+
consoleLogSpy.mockRestore();
|
| 256 |
+
});
|
| 257 |
+
|
| 258 |
+
describe('in Cloud Shell', () => {
|
| 259 |
+
const mockGetAccessToken = vi.fn();
|
| 260 |
+
let mockComputeClient: Compute;
|
| 261 |
+
|
| 262 |
+
beforeEach(() => {
|
| 263 |
+
mockGetAccessToken.mockResolvedValue({ token: 'test-access-token' });
|
| 264 |
+
mockComputeClient = {
|
| 265 |
+
credentials: { refresh_token: 'test-refresh-token' },
|
| 266 |
+
getAccessToken: mockGetAccessToken,
|
| 267 |
+
} as unknown as Compute;
|
| 268 |
+
|
| 269 |
+
(Compute as unknown as Mock).mockImplementation(() => mockComputeClient);
|
| 270 |
+
});
|
| 271 |
+
|
| 272 |
+
it('should attempt to load cached credentials first', async () => {
|
| 273 |
+
const cachedCreds = { refresh_token: 'cached-token' };
|
| 274 |
+
const credsPath = path.join(tempHomeDir, QWEN_DIR, 'oauth_creds.json');
|
| 275 |
+
await fs.promises.mkdir(path.dirname(credsPath), { recursive: true });
|
| 276 |
+
await fs.promises.writeFile(credsPath, JSON.stringify(cachedCreds));
|
| 277 |
+
|
| 278 |
+
const mockClient = {
|
| 279 |
+
setCredentials: vi.fn(),
|
| 280 |
+
getAccessToken: vi.fn().mockResolvedValue({ token: 'test-token' }),
|
| 281 |
+
getTokenInfo: vi.fn().mockResolvedValue({}),
|
| 282 |
+
on: vi.fn(),
|
| 283 |
+
};
|
| 284 |
+
|
| 285 |
+
// To mock the new OAuth2Client() inside the function
|
| 286 |
+
(OAuth2Client as unknown as Mock).mockImplementation(
|
| 287 |
+
() => mockClient as unknown as OAuth2Client,
|
| 288 |
+
);
|
| 289 |
+
|
| 290 |
+
await getOauthClient(AuthType.LOGIN_WITH_GOOGLE, mockConfig);
|
| 291 |
+
|
| 292 |
+
expect(mockClient.setCredentials).toHaveBeenCalledWith(cachedCreds);
|
| 293 |
+
expect(mockClient.getAccessToken).toHaveBeenCalled();
|
| 294 |
+
expect(mockClient.getTokenInfo).toHaveBeenCalled();
|
| 295 |
+
expect(Compute).not.toHaveBeenCalled(); // Should not fetch new client if cache is valid
|
| 296 |
+
});
|
| 297 |
+
|
| 298 |
+
it('should use Compute to get a client if no cached credentials exist', async () => {
|
| 299 |
+
await getOauthClient(AuthType.CLOUD_SHELL, mockConfig);
|
| 300 |
+
|
| 301 |
+
expect(Compute).toHaveBeenCalledWith({});
|
| 302 |
+
expect(mockGetAccessToken).toHaveBeenCalled();
|
| 303 |
+
});
|
| 304 |
+
|
| 305 |
+
it('should not cache the credentials after fetching them via ADC', async () => {
|
| 306 |
+
const newCredentials = { refresh_token: 'new-adc-token' };
|
| 307 |
+
mockComputeClient.credentials = newCredentials;
|
| 308 |
+
mockGetAccessToken.mockResolvedValue({ token: 'new-adc-token' });
|
| 309 |
+
|
| 310 |
+
await getOauthClient(AuthType.CLOUD_SHELL, mockConfig);
|
| 311 |
+
|
| 312 |
+
const credsPath = path.join(tempHomeDir, QWEN_DIR, 'oauth_creds.json');
|
| 313 |
+
expect(fs.existsSync(credsPath)).toBe(false);
|
| 314 |
+
});
|
| 315 |
+
|
| 316 |
+
it('should return the Compute client on successful ADC authentication', async () => {
|
| 317 |
+
const client = await getOauthClient(AuthType.CLOUD_SHELL, mockConfig);
|
| 318 |
+
expect(client).toBe(mockComputeClient);
|
| 319 |
+
});
|
| 320 |
+
|
| 321 |
+
it('should throw an error if ADC fails', async () => {
|
| 322 |
+
const testError = new Error('ADC Failed');
|
| 323 |
+
mockGetAccessToken.mockRejectedValue(testError);
|
| 324 |
+
|
| 325 |
+
await expect(
|
| 326 |
+
getOauthClient(AuthType.CLOUD_SHELL, mockConfig),
|
| 327 |
+
).rejects.toThrow(
|
| 328 |
+
'Could not authenticate using Cloud Shell credentials. Please select a different authentication method or ensure you are in a properly configured environment. Error: ADC Failed',
|
| 329 |
+
);
|
| 330 |
+
});
|
| 331 |
+
});
|
| 332 |
+
|
| 333 |
+
describe('credential loading order', () => {
|
| 334 |
+
it('should prioritize default cached credentials over GOOGLE_APPLICATION_CREDENTIALS', async () => {
|
| 335 |
+
// Setup default cached credentials
|
| 336 |
+
const defaultCreds = { refresh_token: 'default-cached-token' };
|
| 337 |
+
const defaultCredsPath = path.join(
|
| 338 |
+
tempHomeDir,
|
| 339 |
+
QWEN_DIR,
|
| 340 |
+
'oauth_creds.json',
|
| 341 |
+
);
|
| 342 |
+
await fs.promises.mkdir(path.dirname(defaultCredsPath), {
|
| 343 |
+
recursive: true,
|
| 344 |
+
});
|
| 345 |
+
await fs.promises.writeFile(
|
| 346 |
+
defaultCredsPath,
|
| 347 |
+
JSON.stringify(defaultCreds),
|
| 348 |
+
);
|
| 349 |
+
|
| 350 |
+
// Setup credentials via environment variable
|
| 351 |
+
const envCreds = { refresh_token: 'env-var-token' };
|
| 352 |
+
const envCredsPath = path.join(tempHomeDir, 'env_creds.json');
|
| 353 |
+
await fs.promises.writeFile(envCredsPath, JSON.stringify(envCreds));
|
| 354 |
+
vi.stubEnv('GOOGLE_APPLICATION_CREDENTIALS', envCredsPath);
|
| 355 |
+
|
| 356 |
+
const mockClient = {
|
| 357 |
+
setCredentials: vi.fn(),
|
| 358 |
+
getAccessToken: vi.fn().mockResolvedValue({ token: 'test-token' }),
|
| 359 |
+
getTokenInfo: vi.fn().mockResolvedValue({}),
|
| 360 |
+
on: vi.fn(),
|
| 361 |
+
};
|
| 362 |
+
(OAuth2Client as unknown as Mock).mockImplementation(
|
| 363 |
+
() => mockClient as unknown as OAuth2Client,
|
| 364 |
+
);
|
| 365 |
+
|
| 366 |
+
await getOauthClient(AuthType.LOGIN_WITH_GOOGLE, mockConfig);
|
| 367 |
+
|
| 368 |
+
// Assert the correct credentials were used
|
| 369 |
+
expect(mockClient.setCredentials).toHaveBeenCalledWith(defaultCreds);
|
| 370 |
+
expect(mockClient.setCredentials).not.toHaveBeenCalledWith(envCreds);
|
| 371 |
+
});
|
| 372 |
+
|
| 373 |
+
it('should fall back to GOOGLE_APPLICATION_CREDENTIALS if default cache is missing', async () => {
|
| 374 |
+
// Setup credentials via environment variable
|
| 375 |
+
const envCreds = { refresh_token: 'env-var-token' };
|
| 376 |
+
const envCredsPath = path.join(tempHomeDir, 'env_creds.json');
|
| 377 |
+
await fs.promises.writeFile(envCredsPath, JSON.stringify(envCreds));
|
| 378 |
+
vi.stubEnv('GOOGLE_APPLICATION_CREDENTIALS', envCredsPath);
|
| 379 |
+
|
| 380 |
+
const mockClient = {
|
| 381 |
+
setCredentials: vi.fn(),
|
| 382 |
+
getAccessToken: vi.fn().mockResolvedValue({ token: 'test-token' }),
|
| 383 |
+
getTokenInfo: vi.fn().mockResolvedValue({}),
|
| 384 |
+
on: vi.fn(),
|
| 385 |
+
};
|
| 386 |
+
(OAuth2Client as unknown as Mock).mockImplementation(
|
| 387 |
+
() => mockClient as unknown as OAuth2Client,
|
| 388 |
+
);
|
| 389 |
+
|
| 390 |
+
await getOauthClient(AuthType.LOGIN_WITH_GOOGLE, mockConfig);
|
| 391 |
+
|
| 392 |
+
// Assert the correct credentials were used
|
| 393 |
+
expect(mockClient.setCredentials).toHaveBeenCalledWith(envCreds);
|
| 394 |
+
});
|
| 395 |
+
});
|
| 396 |
+
|
| 397 |
+
describe('with GCP environment variables', () => {
|
| 398 |
+
it('should use GOOGLE_CLOUD_ACCESS_TOKEN when GOOGLE_GENAI_USE_GCA is true', async () => {
|
| 399 |
+
vi.stubEnv('GOOGLE_GENAI_USE_GCA', 'true');
|
| 400 |
+
vi.stubEnv('GOOGLE_CLOUD_ACCESS_TOKEN', 'gcp-access-token');
|
| 401 |
+
|
| 402 |
+
const mockSetCredentials = vi.fn();
|
| 403 |
+
const mockGetAccessToken = vi
|
| 404 |
+
.fn()
|
| 405 |
+
.mockResolvedValue({ token: 'gcp-access-token' });
|
| 406 |
+
const mockOAuth2Client = {
|
| 407 |
+
setCredentials: mockSetCredentials,
|
| 408 |
+
getAccessToken: mockGetAccessToken,
|
| 409 |
+
on: vi.fn(),
|
| 410 |
+
} as unknown as OAuth2Client;
|
| 411 |
+
(OAuth2Client as unknown as Mock).mockImplementation(
|
| 412 |
+
() => mockOAuth2Client,
|
| 413 |
+
);
|
| 414 |
+
|
| 415 |
+
// Mock the UserInfo API response for fetchAndCacheUserInfo
|
| 416 |
+
(global.fetch as Mock).mockResolvedValue({
|
| 417 |
+
ok: true,
|
| 418 |
+
json: vi
|
| 419 |
+
.fn()
|
| 420 |
+
.mockResolvedValue({ email: 'test-gcp-account@gmail.com' }),
|
| 421 |
+
} as unknown as Response);
|
| 422 |
+
|
| 423 |
+
const client = await getOauthClient(
|
| 424 |
+
AuthType.LOGIN_WITH_GOOGLE,
|
| 425 |
+
mockConfig,
|
| 426 |
+
);
|
| 427 |
+
|
| 428 |
+
expect(client).toBe(mockOAuth2Client);
|
| 429 |
+
expect(mockSetCredentials).toHaveBeenCalledWith({
|
| 430 |
+
access_token: 'gcp-access-token',
|
| 431 |
+
});
|
| 432 |
+
|
| 433 |
+
// Verify fetchAndCacheUserInfo was effectively called
|
| 434 |
+
expect(mockGetAccessToken).toHaveBeenCalled();
|
| 435 |
+
expect(global.fetch).toHaveBeenCalledWith(
|
| 436 |
+
'https://www.googleapis.com/oauth2/v2/userinfo',
|
| 437 |
+
{
|
| 438 |
+
headers: {
|
| 439 |
+
Authorization: 'Bearer gcp-access-token',
|
| 440 |
+
},
|
| 441 |
+
},
|
| 442 |
+
);
|
| 443 |
+
|
| 444 |
+
// Verify Google Account was cached
|
| 445 |
+
const googleAccountPath = path.join(
|
| 446 |
+
tempHomeDir,
|
| 447 |
+
QWEN_DIR,
|
| 448 |
+
'google_accounts.json',
|
| 449 |
+
);
|
| 450 |
+
const cachedContent = fs.readFileSync(googleAccountPath, 'utf-8');
|
| 451 |
+
expect(JSON.parse(cachedContent)).toEqual({
|
| 452 |
+
active: 'test-gcp-account@gmail.com',
|
| 453 |
+
old: [],
|
| 454 |
+
});
|
| 455 |
+
});
|
| 456 |
+
|
| 457 |
+
it('should not use GCP token if GOOGLE_CLOUD_ACCESS_TOKEN is not set', async () => {
|
| 458 |
+
vi.stubEnv('GOOGLE_GENAI_USE_GCA', 'true');
|
| 459 |
+
|
| 460 |
+
const mockSetCredentials = vi.fn();
|
| 461 |
+
const mockGetAccessToken = vi
|
| 462 |
+
.fn()
|
| 463 |
+
.mockResolvedValue({ token: 'cached-access-token' });
|
| 464 |
+
const mockGetTokenInfo = vi.fn().mockResolvedValue({});
|
| 465 |
+
const mockOAuth2Client = {
|
| 466 |
+
setCredentials: mockSetCredentials,
|
| 467 |
+
getAccessToken: mockGetAccessToken,
|
| 468 |
+
getTokenInfo: mockGetTokenInfo,
|
| 469 |
+
on: vi.fn(),
|
| 470 |
+
} as unknown as OAuth2Client;
|
| 471 |
+
(OAuth2Client as unknown as Mock).mockImplementation(
|
| 472 |
+
() => mockOAuth2Client,
|
| 473 |
+
);
|
| 474 |
+
|
| 475 |
+
// Make it fall through to cached credentials path
|
| 476 |
+
const cachedCreds = { refresh_token: 'cached-token' };
|
| 477 |
+
const credsPath = path.join(tempHomeDir, QWEN_DIR, 'oauth_creds.json');
|
| 478 |
+
await fs.promises.mkdir(path.dirname(credsPath), { recursive: true });
|
| 479 |
+
await fs.promises.writeFile(credsPath, JSON.stringify(cachedCreds));
|
| 480 |
+
|
| 481 |
+
await getOauthClient(AuthType.LOGIN_WITH_GOOGLE, mockConfig);
|
| 482 |
+
|
| 483 |
+
// It should be called with the cached credentials, not the GCP access token.
|
| 484 |
+
expect(mockSetCredentials).toHaveBeenCalledTimes(1);
|
| 485 |
+
expect(mockSetCredentials).toHaveBeenCalledWith(cachedCreds);
|
| 486 |
+
});
|
| 487 |
+
|
| 488 |
+
it('should not use GCP token if GOOGLE_GENAI_USE_GCA is not set', async () => {
|
| 489 |
+
vi.stubEnv('GOOGLE_CLOUD_ACCESS_TOKEN', 'gcp-access-token');
|
| 490 |
+
|
| 491 |
+
const mockSetCredentials = vi.fn();
|
| 492 |
+
const mockGetAccessToken = vi
|
| 493 |
+
.fn()
|
| 494 |
+
.mockResolvedValue({ token: 'cached-access-token' });
|
| 495 |
+
const mockGetTokenInfo = vi.fn().mockResolvedValue({});
|
| 496 |
+
const mockOAuth2Client = {
|
| 497 |
+
setCredentials: mockSetCredentials,
|
| 498 |
+
getAccessToken: mockGetAccessToken,
|
| 499 |
+
getTokenInfo: mockGetTokenInfo,
|
| 500 |
+
on: vi.fn(),
|
| 501 |
+
} as unknown as OAuth2Client;
|
| 502 |
+
(OAuth2Client as unknown as Mock).mockImplementation(
|
| 503 |
+
() => mockOAuth2Client,
|
| 504 |
+
);
|
| 505 |
+
|
| 506 |
+
// Make it fall through to cached credentials path
|
| 507 |
+
const cachedCreds = { refresh_token: 'cached-token' };
|
| 508 |
+
const credsPath = path.join(tempHomeDir, QWEN_DIR, 'oauth_creds.json');
|
| 509 |
+
await fs.promises.mkdir(path.dirname(credsPath), { recursive: true });
|
| 510 |
+
await fs.promises.writeFile(credsPath, JSON.stringify(cachedCreds));
|
| 511 |
+
|
| 512 |
+
await getOauthClient(AuthType.LOGIN_WITH_GOOGLE, mockConfig);
|
| 513 |
+
|
| 514 |
+
// It should be called with the cached credentials, not the GCP access token.
|
| 515 |
+
expect(mockSetCredentials).toHaveBeenCalledTimes(1);
|
| 516 |
+
expect(mockSetCredentials).toHaveBeenCalledWith(cachedCreds);
|
| 517 |
+
});
|
| 518 |
+
});
|
| 519 |
+
describe('clearCachedCredentialFile', () => {
|
| 520 |
+
it('should clear cached credentials and Google account', async () => {
|
| 521 |
+
const cachedCreds = { refresh_token: 'test-token' };
|
| 522 |
+
const credsPath = path.join(tempHomeDir, QWEN_DIR, 'oauth_creds.json');
|
| 523 |
+
await fs.promises.mkdir(path.dirname(credsPath), { recursive: true });
|
| 524 |
+
await fs.promises.writeFile(credsPath, JSON.stringify(cachedCreds));
|
| 525 |
+
|
| 526 |
+
const googleAccountPath = path.join(
|
| 527 |
+
tempHomeDir,
|
| 528 |
+
QWEN_DIR,
|
| 529 |
+
'google_accounts.json',
|
| 530 |
+
);
|
| 531 |
+
const accountData = { active: 'test@example.com', old: [] };
|
| 532 |
+
await fs.promises.writeFile(
|
| 533 |
+
googleAccountPath,
|
| 534 |
+
JSON.stringify(accountData),
|
| 535 |
+
);
|
| 536 |
+
|
| 537 |
+
expect(fs.existsSync(credsPath)).toBe(true);
|
| 538 |
+
expect(fs.existsSync(googleAccountPath)).toBe(true);
|
| 539 |
+
expect(getCachedGoogleAccount()).toBe('test@example.com');
|
| 540 |
+
|
| 541 |
+
await clearCachedCredentialFile();
|
| 542 |
+
expect(fs.existsSync(credsPath)).toBe(false);
|
| 543 |
+
expect(getCachedGoogleAccount()).toBeNull();
|
| 544 |
+
const updatedAccountData = JSON.parse(
|
| 545 |
+
fs.readFileSync(googleAccountPath, 'utf-8'),
|
| 546 |
+
);
|
| 547 |
+
expect(updatedAccountData.active).toBeNull();
|
| 548 |
+
expect(updatedAccountData.old).toContain('test@example.com');
|
| 549 |
+
});
|
| 550 |
+
|
| 551 |
+
it('should clear the in-memory OAuth client cache', async () => {
|
| 552 |
+
const mockSetCredentials = vi.fn();
|
| 553 |
+
const mockGetAccessToken = vi
|
| 554 |
+
.fn()
|
| 555 |
+
.mockResolvedValue({ token: 'test-token' });
|
| 556 |
+
const mockGetTokenInfo = vi.fn().mockResolvedValue({});
|
| 557 |
+
const mockOAuth2Client = {
|
| 558 |
+
setCredentials: mockSetCredentials,
|
| 559 |
+
getAccessToken: mockGetAccessToken,
|
| 560 |
+
getTokenInfo: mockGetTokenInfo,
|
| 561 |
+
on: vi.fn(),
|
| 562 |
+
} as unknown as OAuth2Client;
|
| 563 |
+
(OAuth2Client as unknown as Mock).mockImplementation(
|
| 564 |
+
() => mockOAuth2Client,
|
| 565 |
+
);
|
| 566 |
+
|
| 567 |
+
// Pre-populate credentials to make getOauthClient resolve quickly
|
| 568 |
+
const credsPath = path.join(tempHomeDir, QWEN_DIR, 'oauth_creds.json');
|
| 569 |
+
await fs.promises.mkdir(path.dirname(credsPath), { recursive: true });
|
| 570 |
+
await fs.promises.writeFile(
|
| 571 |
+
credsPath,
|
| 572 |
+
JSON.stringify({ refresh_token: 'token' }),
|
| 573 |
+
);
|
| 574 |
+
|
| 575 |
+
// First call, should create a client
|
| 576 |
+
await getOauthClient(AuthType.LOGIN_WITH_GOOGLE, mockConfig);
|
| 577 |
+
expect(OAuth2Client).toHaveBeenCalledTimes(1);
|
| 578 |
+
|
| 579 |
+
// Second call, should use cached client
|
| 580 |
+
await getOauthClient(AuthType.LOGIN_WITH_GOOGLE, mockConfig);
|
| 581 |
+
expect(OAuth2Client).toHaveBeenCalledTimes(1);
|
| 582 |
+
|
| 583 |
+
clearOauthClientCache();
|
| 584 |
+
|
| 585 |
+
// Third call, after clearing cache, should create a new client
|
| 586 |
+
await getOauthClient(AuthType.LOGIN_WITH_GOOGLE, mockConfig);
|
| 587 |
+
expect(OAuth2Client).toHaveBeenCalledTimes(2);
|
| 588 |
+
});
|
| 589 |
+
});
|
| 590 |
+
});
|
projects/ui/qwen-code/packages/core/src/code_assist/oauth2.ts
ADDED
|
@@ -0,0 +1,447 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import {
|
| 8 |
+
OAuth2Client,
|
| 9 |
+
Credentials,
|
| 10 |
+
Compute,
|
| 11 |
+
CodeChallengeMethod,
|
| 12 |
+
} from 'google-auth-library';
|
| 13 |
+
import * as http from 'http';
|
| 14 |
+
import url from 'url';
|
| 15 |
+
import crypto from 'crypto';
|
| 16 |
+
import * as net from 'net';
|
| 17 |
+
import open from 'open';
|
| 18 |
+
import path from 'node:path';
|
| 19 |
+
import { promises as fs } from 'node:fs';
|
| 20 |
+
import * as os from 'os';
|
| 21 |
+
import { Config } from '../config/config.js';
|
| 22 |
+
import { getErrorMessage } from '../utils/errors.js';
|
| 23 |
+
import {
|
| 24 |
+
cacheGoogleAccount,
|
| 25 |
+
getCachedGoogleAccount,
|
| 26 |
+
clearCachedGoogleAccount,
|
| 27 |
+
} from '../utils/user_account.js';
|
| 28 |
+
import { AuthType } from '../core/contentGenerator.js';
|
| 29 |
+
import readline from 'node:readline';
|
| 30 |
+
import { QWEN_DIR } from '../utils/paths.js';
|
| 31 |
+
|
| 32 |
+
// OAuth Client ID used to initiate OAuth2Client class.
|
| 33 |
+
const OAUTH_CLIENT_ID =
|
| 34 |
+
'681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com';
|
| 35 |
+
|
| 36 |
+
// OAuth Secret value used to initiate OAuth2Client class.
|
| 37 |
+
// Note: It's ok to save this in git because this is an installed application
|
| 38 |
+
// as described here: https://developers.google.com/identity/protocols/oauth2#installed
|
| 39 |
+
// "The process results in a client ID and, in some cases, a client secret,
|
| 40 |
+
// which you embed in the source code of your application. (In this context,
|
| 41 |
+
// the client secret is obviously not treated as a secret.)"
|
| 42 |
+
const OAUTH_CLIENT_SECRET = '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';
|
| 43 |
+
|
| 44 |
+
// OAuth Scopes for Cloud Code authorization.
|
| 45 |
+
const OAUTH_SCOPE = [
|
| 46 |
+
'https://www.googleapis.com/auth/cloud-platform',
|
| 47 |
+
'https://www.googleapis.com/auth/userinfo.email',
|
| 48 |
+
'https://www.googleapis.com/auth/userinfo.profile',
|
| 49 |
+
];
|
| 50 |
+
|
| 51 |
+
const HTTP_REDIRECT = 301;
|
| 52 |
+
const SIGN_IN_SUCCESS_URL =
|
| 53 |
+
'https://developers.google.com/gemini-code-assist/auth_success_gemini';
|
| 54 |
+
const SIGN_IN_FAILURE_URL =
|
| 55 |
+
'https://developers.google.com/gemini-code-assist/auth_failure_gemini';
|
| 56 |
+
|
| 57 |
+
const CREDENTIAL_FILENAME = 'oauth_creds.json';
|
| 58 |
+
|
| 59 |
+
/**
|
| 60 |
+
* An Authentication URL for updating the credentials of a Oauth2Client
|
| 61 |
+
* as well as a promise that will resolve when the credentials have
|
| 62 |
+
* been refreshed (or which throws error when refreshing credentials failed).
|
| 63 |
+
*/
|
| 64 |
+
export interface OauthWebLogin {
|
| 65 |
+
authUrl: string;
|
| 66 |
+
loginCompletePromise: Promise<void>;
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
const oauthClientPromises = new Map<AuthType, Promise<OAuth2Client>>();
|
| 70 |
+
|
| 71 |
+
async function initOauthClient(
|
| 72 |
+
authType: AuthType,
|
| 73 |
+
config: Config,
|
| 74 |
+
): Promise<OAuth2Client> {
|
| 75 |
+
const client = new OAuth2Client({
|
| 76 |
+
clientId: OAUTH_CLIENT_ID,
|
| 77 |
+
clientSecret: OAUTH_CLIENT_SECRET,
|
| 78 |
+
transporterOptions: {
|
| 79 |
+
proxy: config.getProxy(),
|
| 80 |
+
},
|
| 81 |
+
});
|
| 82 |
+
|
| 83 |
+
if (
|
| 84 |
+
process.env['GOOGLE_GENAI_USE_GCA'] &&
|
| 85 |
+
process.env['GOOGLE_CLOUD_ACCESS_TOKEN']
|
| 86 |
+
) {
|
| 87 |
+
client.setCredentials({
|
| 88 |
+
access_token: process.env['GOOGLE_CLOUD_ACCESS_TOKEN'],
|
| 89 |
+
});
|
| 90 |
+
await fetchAndCacheUserInfo(client);
|
| 91 |
+
return client;
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
client.on('tokens', async (tokens: Credentials) => {
|
| 95 |
+
await cacheCredentials(tokens);
|
| 96 |
+
});
|
| 97 |
+
|
| 98 |
+
// If there are cached creds on disk, they always take precedence
|
| 99 |
+
if (await loadCachedCredentials(client)) {
|
| 100 |
+
// Found valid cached credentials.
|
| 101 |
+
// Check if we need to retrieve Google Account ID or Email
|
| 102 |
+
if (!getCachedGoogleAccount()) {
|
| 103 |
+
try {
|
| 104 |
+
await fetchAndCacheUserInfo(client);
|
| 105 |
+
} catch {
|
| 106 |
+
// Non-fatal, continue with existing auth.
|
| 107 |
+
}
|
| 108 |
+
}
|
| 109 |
+
console.log('Loaded cached credentials.');
|
| 110 |
+
return client;
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
// In Google Cloud Shell, we can use Application Default Credentials (ADC)
|
| 114 |
+
// provided via its metadata server to authenticate non-interactively using
|
| 115 |
+
// the identity of the user logged into Cloud Shell.
|
| 116 |
+
if (authType === AuthType.CLOUD_SHELL) {
|
| 117 |
+
try {
|
| 118 |
+
console.log("Attempting to authenticate via Cloud Shell VM's ADC.");
|
| 119 |
+
const computeClient = new Compute({
|
| 120 |
+
// We can leave this empty, since the metadata server will provide
|
| 121 |
+
// the service account email.
|
| 122 |
+
});
|
| 123 |
+
await computeClient.getAccessToken();
|
| 124 |
+
console.log('Authentication successful.');
|
| 125 |
+
|
| 126 |
+
// Do not cache creds in this case; note that Compute client will handle its own refresh
|
| 127 |
+
return computeClient;
|
| 128 |
+
} catch (e) {
|
| 129 |
+
throw new Error(
|
| 130 |
+
`Could not authenticate using Cloud Shell credentials. Please select a different authentication method or ensure you are in a properly configured environment. Error: ${getErrorMessage(
|
| 131 |
+
e,
|
| 132 |
+
)}`,
|
| 133 |
+
);
|
| 134 |
+
}
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
if (config.isBrowserLaunchSuppressed()) {
|
| 138 |
+
let success = false;
|
| 139 |
+
const maxRetries = 2;
|
| 140 |
+
for (let i = 0; !success && i < maxRetries; i++) {
|
| 141 |
+
success = await authWithUserCode(client);
|
| 142 |
+
if (!success) {
|
| 143 |
+
console.error(
|
| 144 |
+
'\nFailed to authenticate with user code.',
|
| 145 |
+
i === maxRetries - 1 ? '' : 'Retrying...\n',
|
| 146 |
+
);
|
| 147 |
+
}
|
| 148 |
+
}
|
| 149 |
+
if (!success) {
|
| 150 |
+
process.exit(1);
|
| 151 |
+
}
|
| 152 |
+
} else {
|
| 153 |
+
const webLogin = await authWithWeb(client);
|
| 154 |
+
|
| 155 |
+
console.log(
|
| 156 |
+
`\n\nCode Assist login required.\n` +
|
| 157 |
+
`Attempting to open authentication page in your browser.\n` +
|
| 158 |
+
`Otherwise navigate to:\n\n${webLogin.authUrl}\n\n`,
|
| 159 |
+
);
|
| 160 |
+
try {
|
| 161 |
+
// Attempt to open the authentication URL in the default browser.
|
| 162 |
+
// We do not use the `wait` option here because the main script's execution
|
| 163 |
+
// is already paused by `loginCompletePromise`, which awaits the server callback.
|
| 164 |
+
const childProcess = await open(webLogin.authUrl);
|
| 165 |
+
|
| 166 |
+
// IMPORTANT: Attach an error handler to the returned child process.
|
| 167 |
+
// Without this, if `open` fails to spawn a process (e.g., `xdg-open` is not found
|
| 168 |
+
// in a minimal Docker container), it will emit an unhandled 'error' event,
|
| 169 |
+
// causing the entire Node.js process to crash.
|
| 170 |
+
childProcess.on('error', (_) => {
|
| 171 |
+
console.error(
|
| 172 |
+
'Failed to open browser automatically. Please try running again with NO_BROWSER=true set.',
|
| 173 |
+
);
|
| 174 |
+
process.exit(1);
|
| 175 |
+
});
|
| 176 |
+
} catch (err) {
|
| 177 |
+
console.error(
|
| 178 |
+
'An unexpected error occurred while trying to open the browser:',
|
| 179 |
+
err,
|
| 180 |
+
'\nPlease try running again with NO_BROWSER=true set.',
|
| 181 |
+
);
|
| 182 |
+
process.exit(1);
|
| 183 |
+
}
|
| 184 |
+
console.log('Waiting for authentication...');
|
| 185 |
+
|
| 186 |
+
await webLogin.loginCompletePromise;
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
return client;
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
export async function getOauthClient(
|
| 193 |
+
authType: AuthType,
|
| 194 |
+
config: Config,
|
| 195 |
+
): Promise<OAuth2Client> {
|
| 196 |
+
if (!oauthClientPromises.has(authType)) {
|
| 197 |
+
oauthClientPromises.set(authType, initOauthClient(authType, config));
|
| 198 |
+
}
|
| 199 |
+
return oauthClientPromises.get(authType)!;
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
async function authWithUserCode(client: OAuth2Client): Promise<boolean> {
|
| 203 |
+
const redirectUri = 'https://codeassist.google.com/authcode';
|
| 204 |
+
const codeVerifier = await client.generateCodeVerifierAsync();
|
| 205 |
+
const state = crypto.randomBytes(32).toString('hex');
|
| 206 |
+
const authUrl: string = client.generateAuthUrl({
|
| 207 |
+
redirect_uri: redirectUri,
|
| 208 |
+
access_type: 'offline',
|
| 209 |
+
scope: OAUTH_SCOPE,
|
| 210 |
+
code_challenge_method: CodeChallengeMethod.S256,
|
| 211 |
+
code_challenge: codeVerifier.codeChallenge,
|
| 212 |
+
state,
|
| 213 |
+
});
|
| 214 |
+
console.log('Please visit the following URL to authorize the application:');
|
| 215 |
+
console.log('');
|
| 216 |
+
console.log(authUrl);
|
| 217 |
+
console.log('');
|
| 218 |
+
|
| 219 |
+
const code = await new Promise<string>((resolve) => {
|
| 220 |
+
const rl = readline.createInterface({
|
| 221 |
+
input: process.stdin,
|
| 222 |
+
output: process.stdout,
|
| 223 |
+
});
|
| 224 |
+
rl.question('Enter the authorization code: ', (code) => {
|
| 225 |
+
rl.close();
|
| 226 |
+
resolve(code.trim());
|
| 227 |
+
});
|
| 228 |
+
});
|
| 229 |
+
|
| 230 |
+
if (!code) {
|
| 231 |
+
console.error('Authorization code is required.');
|
| 232 |
+
return false;
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
try {
|
| 236 |
+
const { tokens } = await client.getToken({
|
| 237 |
+
code,
|
| 238 |
+
codeVerifier: codeVerifier.codeVerifier,
|
| 239 |
+
redirect_uri: redirectUri,
|
| 240 |
+
});
|
| 241 |
+
client.setCredentials(tokens);
|
| 242 |
+
} catch (_error) {
|
| 243 |
+
return false;
|
| 244 |
+
}
|
| 245 |
+
return true;
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
async function authWithWeb(client: OAuth2Client): Promise<OauthWebLogin> {
|
| 249 |
+
const port = await getAvailablePort();
|
| 250 |
+
// The hostname used for the HTTP server binding (e.g., '0.0.0.0' in Docker).
|
| 251 |
+
const host = process.env['OAUTH_CALLBACK_HOST'] || 'localhost';
|
| 252 |
+
// The `redirectUri` sent to Google's authorization server MUST use a loopback IP literal
|
| 253 |
+
// (i.e., 'localhost' or '127.0.0.1'). This is a strict security policy for credentials of
|
| 254 |
+
// type 'Desktop app' or 'Web application' (when using loopback flow) to mitigate
|
| 255 |
+
// authorization code interception attacks.
|
| 256 |
+
const redirectUri = `http://localhost:${port}/oauth2callback`;
|
| 257 |
+
const state = crypto.randomBytes(32).toString('hex');
|
| 258 |
+
const authUrl = client.generateAuthUrl({
|
| 259 |
+
redirect_uri: redirectUri,
|
| 260 |
+
access_type: 'offline',
|
| 261 |
+
scope: OAUTH_SCOPE,
|
| 262 |
+
state,
|
| 263 |
+
});
|
| 264 |
+
|
| 265 |
+
const loginCompletePromise = new Promise<void>((resolve, reject) => {
|
| 266 |
+
const server = http.createServer(async (req, res) => {
|
| 267 |
+
try {
|
| 268 |
+
if (req.url!.indexOf('/oauth2callback') === -1) {
|
| 269 |
+
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_FAILURE_URL });
|
| 270 |
+
res.end();
|
| 271 |
+
reject(new Error('Unexpected request: ' + req.url));
|
| 272 |
+
}
|
| 273 |
+
// acquire the code from the querystring, and close the web server.
|
| 274 |
+
const qs = new url.URL(req.url!, 'http://localhost:3000').searchParams;
|
| 275 |
+
if (qs.get('error')) {
|
| 276 |
+
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_FAILURE_URL });
|
| 277 |
+
res.end();
|
| 278 |
+
|
| 279 |
+
reject(new Error(`Error during authentication: ${qs.get('error')}`));
|
| 280 |
+
} else if (qs.get('state') !== state) {
|
| 281 |
+
res.end('State mismatch. Possible CSRF attack');
|
| 282 |
+
|
| 283 |
+
reject(new Error('State mismatch. Possible CSRF attack'));
|
| 284 |
+
} else if (qs.get('code')) {
|
| 285 |
+
const { tokens } = await client.getToken({
|
| 286 |
+
code: qs.get('code')!,
|
| 287 |
+
redirect_uri: redirectUri,
|
| 288 |
+
});
|
| 289 |
+
client.setCredentials(tokens);
|
| 290 |
+
// Retrieve and cache Google Account ID during authentication
|
| 291 |
+
try {
|
| 292 |
+
await fetchAndCacheUserInfo(client);
|
| 293 |
+
} catch (error) {
|
| 294 |
+
console.error(
|
| 295 |
+
'Failed to retrieve Google Account ID during authentication:',
|
| 296 |
+
error,
|
| 297 |
+
);
|
| 298 |
+
// Don't fail the auth flow if Google Account ID retrieval fails
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_SUCCESS_URL });
|
| 302 |
+
res.end();
|
| 303 |
+
resolve();
|
| 304 |
+
} else {
|
| 305 |
+
reject(new Error('No code found in request'));
|
| 306 |
+
}
|
| 307 |
+
} catch (e) {
|
| 308 |
+
reject(e);
|
| 309 |
+
} finally {
|
| 310 |
+
server.close();
|
| 311 |
+
}
|
| 312 |
+
});
|
| 313 |
+
server.listen(port, host);
|
| 314 |
+
});
|
| 315 |
+
|
| 316 |
+
return {
|
| 317 |
+
authUrl,
|
| 318 |
+
loginCompletePromise,
|
| 319 |
+
};
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
export function getAvailablePort(): Promise<number> {
|
| 323 |
+
return new Promise((resolve, reject) => {
|
| 324 |
+
let port = 0;
|
| 325 |
+
try {
|
| 326 |
+
const portStr = process.env['OAUTH_CALLBACK_PORT'];
|
| 327 |
+
if (portStr) {
|
| 328 |
+
port = parseInt(portStr, 10);
|
| 329 |
+
if (isNaN(port) || port <= 0 || port > 65535) {
|
| 330 |
+
return reject(
|
| 331 |
+
new Error(`Invalid value for OAUTH_CALLBACK_PORT: "${portStr}"`),
|
| 332 |
+
);
|
| 333 |
+
}
|
| 334 |
+
return resolve(port);
|
| 335 |
+
}
|
| 336 |
+
const server = net.createServer();
|
| 337 |
+
server.listen(0, () => {
|
| 338 |
+
const address = server.address()! as net.AddressInfo;
|
| 339 |
+
port = address.port;
|
| 340 |
+
});
|
| 341 |
+
server.on('listening', () => {
|
| 342 |
+
server.close();
|
| 343 |
+
server.unref();
|
| 344 |
+
});
|
| 345 |
+
server.on('error', (e) => reject(e));
|
| 346 |
+
server.on('close', () => resolve(port));
|
| 347 |
+
} catch (e) {
|
| 348 |
+
reject(e);
|
| 349 |
+
}
|
| 350 |
+
});
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
async function loadCachedCredentials(client: OAuth2Client): Promise<boolean> {
|
| 354 |
+
const pathsToTry = [
|
| 355 |
+
getCachedCredentialPath(),
|
| 356 |
+
process.env['GOOGLE_APPLICATION_CREDENTIALS'],
|
| 357 |
+
].filter((p): p is string => !!p);
|
| 358 |
+
|
| 359 |
+
for (const keyFile of pathsToTry) {
|
| 360 |
+
try {
|
| 361 |
+
const creds = await fs.readFile(keyFile, 'utf-8');
|
| 362 |
+
client.setCredentials(JSON.parse(creds));
|
| 363 |
+
|
| 364 |
+
// This will verify locally that the credentials look good.
|
| 365 |
+
const { token } = await client.getAccessToken();
|
| 366 |
+
if (!token) {
|
| 367 |
+
continue;
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
// This will check with the server to see if it hasn't been revoked.
|
| 371 |
+
await client.getTokenInfo(token);
|
| 372 |
+
|
| 373 |
+
return true;
|
| 374 |
+
} catch (_) {
|
| 375 |
+
// Ignore and try next path.
|
| 376 |
+
}
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
return false;
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
async function cacheCredentials(credentials: Credentials) {
|
| 383 |
+
const filePath = getCachedCredentialPath();
|
| 384 |
+
await fs.mkdir(path.dirname(filePath), { recursive: true });
|
| 385 |
+
|
| 386 |
+
const credString = JSON.stringify(credentials, null, 2);
|
| 387 |
+
await fs.writeFile(filePath, credString, { mode: 0o600 });
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
function getCachedCredentialPath(): string {
|
| 391 |
+
return path.join(os.homedir(), QWEN_DIR, CREDENTIAL_FILENAME);
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
export function clearOauthClientCache() {
|
| 395 |
+
oauthClientPromises.clear();
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
export async function clearCachedCredentialFile() {
|
| 399 |
+
try {
|
| 400 |
+
await fs.rm(getCachedCredentialPath(), { force: true });
|
| 401 |
+
// Clear the Google Account ID cache when credentials are cleared
|
| 402 |
+
await clearCachedGoogleAccount();
|
| 403 |
+
// Clear the in-memory OAuth client cache to force re-authentication
|
| 404 |
+
clearOauthClientCache();
|
| 405 |
+
} catch (e) {
|
| 406 |
+
console.error('Failed to clear cached credentials:', e);
|
| 407 |
+
}
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
async function fetchAndCacheUserInfo(client: OAuth2Client): Promise<void> {
|
| 411 |
+
try {
|
| 412 |
+
const { token } = await client.getAccessToken();
|
| 413 |
+
if (!token) {
|
| 414 |
+
return;
|
| 415 |
+
}
|
| 416 |
+
|
| 417 |
+
const response = await fetch(
|
| 418 |
+
'https://www.googleapis.com/oauth2/v2/userinfo',
|
| 419 |
+
{
|
| 420 |
+
headers: {
|
| 421 |
+
Authorization: `Bearer ${token}`,
|
| 422 |
+
},
|
| 423 |
+
},
|
| 424 |
+
);
|
| 425 |
+
|
| 426 |
+
if (!response.ok) {
|
| 427 |
+
console.error(
|
| 428 |
+
'Failed to fetch user info:',
|
| 429 |
+
response.status,
|
| 430 |
+
response.statusText,
|
| 431 |
+
);
|
| 432 |
+
return;
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
const userInfo = await response.json();
|
| 436 |
+
if (userInfo.email) {
|
| 437 |
+
await cacheGoogleAccount(userInfo.email);
|
| 438 |
+
}
|
| 439 |
+
} catch (error) {
|
| 440 |
+
console.error('Error retrieving user info:', error);
|
| 441 |
+
}
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
// Helper to ensure test isolation
|
| 445 |
+
export function resetOauthClientForTesting() {
|
| 446 |
+
oauthClientPromises.clear();
|
| 447 |
+
}
|
projects/ui/qwen-code/packages/core/src/code_assist/server.test.ts
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import { beforeEach, describe, it, expect, vi } from 'vitest';
|
| 8 |
+
import { CodeAssistServer } from './server.js';
|
| 9 |
+
import { OAuth2Client } from 'google-auth-library';
|
| 10 |
+
import { UserTierId } from './types.js';
|
| 11 |
+
|
| 12 |
+
vi.mock('google-auth-library');
|
| 13 |
+
|
| 14 |
+
describe('CodeAssistServer', () => {
|
| 15 |
+
beforeEach(() => {
|
| 16 |
+
vi.resetAllMocks();
|
| 17 |
+
});
|
| 18 |
+
|
| 19 |
+
it('should be able to be constructed', () => {
|
| 20 |
+
const auth = new OAuth2Client();
|
| 21 |
+
const server = new CodeAssistServer(
|
| 22 |
+
auth,
|
| 23 |
+
'test-project',
|
| 24 |
+
{},
|
| 25 |
+
'test-session',
|
| 26 |
+
UserTierId.FREE,
|
| 27 |
+
);
|
| 28 |
+
expect(server).toBeInstanceOf(CodeAssistServer);
|
| 29 |
+
});
|
| 30 |
+
|
| 31 |
+
it('should call the generateContent endpoint', async () => {
|
| 32 |
+
const client = new OAuth2Client();
|
| 33 |
+
const server = new CodeAssistServer(
|
| 34 |
+
client,
|
| 35 |
+
'test-project',
|
| 36 |
+
{},
|
| 37 |
+
'test-session',
|
| 38 |
+
UserTierId.FREE,
|
| 39 |
+
);
|
| 40 |
+
const mockResponse = {
|
| 41 |
+
response: {
|
| 42 |
+
candidates: [
|
| 43 |
+
{
|
| 44 |
+
index: 0,
|
| 45 |
+
content: {
|
| 46 |
+
role: 'model',
|
| 47 |
+
parts: [{ text: 'response' }],
|
| 48 |
+
},
|
| 49 |
+
finishReason: 'STOP',
|
| 50 |
+
safetyRatings: [],
|
| 51 |
+
},
|
| 52 |
+
],
|
| 53 |
+
},
|
| 54 |
+
};
|
| 55 |
+
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
|
| 56 |
+
|
| 57 |
+
const response = await server.generateContent(
|
| 58 |
+
{
|
| 59 |
+
model: 'test-model',
|
| 60 |
+
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
|
| 61 |
+
},
|
| 62 |
+
'user-prompt-id',
|
| 63 |
+
);
|
| 64 |
+
|
| 65 |
+
expect(server.requestPost).toHaveBeenCalledWith(
|
| 66 |
+
'generateContent',
|
| 67 |
+
expect.any(Object),
|
| 68 |
+
undefined,
|
| 69 |
+
);
|
| 70 |
+
expect(response.candidates?.[0]?.content?.parts?.[0]?.text).toBe(
|
| 71 |
+
'response',
|
| 72 |
+
);
|
| 73 |
+
});
|
| 74 |
+
|
| 75 |
+
it('should call the generateContentStream endpoint', async () => {
|
| 76 |
+
const client = new OAuth2Client();
|
| 77 |
+
const server = new CodeAssistServer(
|
| 78 |
+
client,
|
| 79 |
+
'test-project',
|
| 80 |
+
{},
|
| 81 |
+
'test-session',
|
| 82 |
+
UserTierId.FREE,
|
| 83 |
+
);
|
| 84 |
+
const mockResponse = (async function* () {
|
| 85 |
+
yield {
|
| 86 |
+
response: {
|
| 87 |
+
candidates: [
|
| 88 |
+
{
|
| 89 |
+
index: 0,
|
| 90 |
+
content: {
|
| 91 |
+
role: 'model',
|
| 92 |
+
parts: [{ text: 'response' }],
|
| 93 |
+
},
|
| 94 |
+
finishReason: 'STOP',
|
| 95 |
+
safetyRatings: [],
|
| 96 |
+
},
|
| 97 |
+
],
|
| 98 |
+
},
|
| 99 |
+
};
|
| 100 |
+
})();
|
| 101 |
+
vi.spyOn(server, 'requestStreamingPost').mockResolvedValue(mockResponse);
|
| 102 |
+
|
| 103 |
+
const stream = await server.generateContentStream(
|
| 104 |
+
{
|
| 105 |
+
model: 'test-model',
|
| 106 |
+
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
|
| 107 |
+
},
|
| 108 |
+
'user-prompt-id',
|
| 109 |
+
);
|
| 110 |
+
|
| 111 |
+
for await (const res of stream) {
|
| 112 |
+
expect(server.requestStreamingPost).toHaveBeenCalledWith(
|
| 113 |
+
'streamGenerateContent',
|
| 114 |
+
expect.any(Object),
|
| 115 |
+
undefined,
|
| 116 |
+
);
|
| 117 |
+
expect(res.candidates?.[0]?.content?.parts?.[0]?.text).toBe('response');
|
| 118 |
+
}
|
| 119 |
+
});
|
| 120 |
+
|
| 121 |
+
it('should call the onboardUser endpoint', async () => {
|
| 122 |
+
const client = new OAuth2Client();
|
| 123 |
+
const server = new CodeAssistServer(
|
| 124 |
+
client,
|
| 125 |
+
'test-project',
|
| 126 |
+
{},
|
| 127 |
+
'test-session',
|
| 128 |
+
UserTierId.FREE,
|
| 129 |
+
);
|
| 130 |
+
const mockResponse = {
|
| 131 |
+
name: 'operations/123',
|
| 132 |
+
done: true,
|
| 133 |
+
};
|
| 134 |
+
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
|
| 135 |
+
|
| 136 |
+
const response = await server.onboardUser({
|
| 137 |
+
tierId: 'test-tier',
|
| 138 |
+
cloudaicompanionProject: 'test-project',
|
| 139 |
+
metadata: {},
|
| 140 |
+
});
|
| 141 |
+
|
| 142 |
+
expect(server.requestPost).toHaveBeenCalledWith(
|
| 143 |
+
'onboardUser',
|
| 144 |
+
expect.any(Object),
|
| 145 |
+
);
|
| 146 |
+
expect(response.name).toBe('operations/123');
|
| 147 |
+
});
|
| 148 |
+
|
| 149 |
+
it('should call the loadCodeAssist endpoint', async () => {
|
| 150 |
+
const client = new OAuth2Client();
|
| 151 |
+
const server = new CodeAssistServer(
|
| 152 |
+
client,
|
| 153 |
+
'test-project',
|
| 154 |
+
{},
|
| 155 |
+
'test-session',
|
| 156 |
+
UserTierId.FREE,
|
| 157 |
+
);
|
| 158 |
+
const mockResponse = {
|
| 159 |
+
currentTier: {
|
| 160 |
+
id: UserTierId.FREE,
|
| 161 |
+
name: 'Free',
|
| 162 |
+
description: 'free tier',
|
| 163 |
+
},
|
| 164 |
+
allowedTiers: [],
|
| 165 |
+
ineligibleTiers: [],
|
| 166 |
+
cloudaicompanionProject: 'projects/test',
|
| 167 |
+
};
|
| 168 |
+
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
|
| 169 |
+
|
| 170 |
+
const response = await server.loadCodeAssist({
|
| 171 |
+
metadata: {},
|
| 172 |
+
});
|
| 173 |
+
|
| 174 |
+
expect(server.requestPost).toHaveBeenCalledWith(
|
| 175 |
+
'loadCodeAssist',
|
| 176 |
+
expect.any(Object),
|
| 177 |
+
);
|
| 178 |
+
expect(response).toEqual(mockResponse);
|
| 179 |
+
});
|
| 180 |
+
|
| 181 |
+
it('should return 0 for countTokens', async () => {
|
| 182 |
+
const client = new OAuth2Client();
|
| 183 |
+
const server = new CodeAssistServer(
|
| 184 |
+
client,
|
| 185 |
+
'test-project',
|
| 186 |
+
{},
|
| 187 |
+
'test-session',
|
| 188 |
+
UserTierId.FREE,
|
| 189 |
+
);
|
| 190 |
+
const mockResponse = {
|
| 191 |
+
totalTokens: 100,
|
| 192 |
+
};
|
| 193 |
+
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
|
| 194 |
+
|
| 195 |
+
const response = await server.countTokens({
|
| 196 |
+
model: 'test-model',
|
| 197 |
+
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
|
| 198 |
+
});
|
| 199 |
+
expect(response.totalTokens).toBe(100);
|
| 200 |
+
});
|
| 201 |
+
|
| 202 |
+
it('should throw an error for embedContent', async () => {
|
| 203 |
+
const client = new OAuth2Client();
|
| 204 |
+
const server = new CodeAssistServer(
|
| 205 |
+
client,
|
| 206 |
+
'test-project',
|
| 207 |
+
{},
|
| 208 |
+
'test-session',
|
| 209 |
+
UserTierId.FREE,
|
| 210 |
+
);
|
| 211 |
+
await expect(
|
| 212 |
+
server.embedContent({
|
| 213 |
+
model: 'test-model',
|
| 214 |
+
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
|
| 215 |
+
}),
|
| 216 |
+
).rejects.toThrow();
|
| 217 |
+
});
|
| 218 |
+
});
|
projects/ui/qwen-code/packages/core/src/code_assist/server.ts
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import { OAuth2Client } from 'google-auth-library';
|
| 8 |
+
import {
|
| 9 |
+
CodeAssistGlobalUserSettingResponse,
|
| 10 |
+
LoadCodeAssistRequest,
|
| 11 |
+
LoadCodeAssistResponse,
|
| 12 |
+
LongRunningOperationResponse,
|
| 13 |
+
OnboardUserRequest,
|
| 14 |
+
SetCodeAssistGlobalUserSettingRequest,
|
| 15 |
+
} from './types.js';
|
| 16 |
+
import {
|
| 17 |
+
CountTokensParameters,
|
| 18 |
+
CountTokensResponse,
|
| 19 |
+
EmbedContentParameters,
|
| 20 |
+
EmbedContentResponse,
|
| 21 |
+
GenerateContentParameters,
|
| 22 |
+
GenerateContentResponse,
|
| 23 |
+
} from '@google/genai';
|
| 24 |
+
import * as readline from 'readline';
|
| 25 |
+
import { ContentGenerator } from '../core/contentGenerator.js';
|
| 26 |
+
import { UserTierId } from './types.js';
|
| 27 |
+
import {
|
| 28 |
+
CaCountTokenResponse,
|
| 29 |
+
CaGenerateContentResponse,
|
| 30 |
+
fromCountTokenResponse,
|
| 31 |
+
fromGenerateContentResponse,
|
| 32 |
+
toCountTokenRequest,
|
| 33 |
+
toGenerateContentRequest,
|
| 34 |
+
} from './converter.js';
|
| 35 |
+
|
| 36 |
+
/** HTTP options to be used in each of the requests. */
|
| 37 |
+
export interface HttpOptions {
|
| 38 |
+
/** Additional HTTP headers to be sent with the request. */
|
| 39 |
+
headers?: Record<string, string>;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
export const CODE_ASSIST_ENDPOINT = 'https://localhost:0'; // Disable Google Code Assist API Request
|
| 43 |
+
export const CODE_ASSIST_API_VERSION = 'v1internal';
|
| 44 |
+
|
| 45 |
+
export class CodeAssistServer implements ContentGenerator {
|
| 46 |
+
constructor(
|
| 47 |
+
readonly client: OAuth2Client,
|
| 48 |
+
readonly projectId?: string,
|
| 49 |
+
readonly httpOptions: HttpOptions = {},
|
| 50 |
+
readonly sessionId?: string,
|
| 51 |
+
readonly userTier?: UserTierId,
|
| 52 |
+
) {}
|
| 53 |
+
|
| 54 |
+
async generateContentStream(
|
| 55 |
+
req: GenerateContentParameters,
|
| 56 |
+
userPromptId: string,
|
| 57 |
+
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
| 58 |
+
const resps = await this.requestStreamingPost<CaGenerateContentResponse>(
|
| 59 |
+
'streamGenerateContent',
|
| 60 |
+
toGenerateContentRequest(
|
| 61 |
+
req,
|
| 62 |
+
userPromptId,
|
| 63 |
+
this.projectId,
|
| 64 |
+
this.sessionId,
|
| 65 |
+
),
|
| 66 |
+
req.config?.abortSignal,
|
| 67 |
+
);
|
| 68 |
+
return (async function* (): AsyncGenerator<GenerateContentResponse> {
|
| 69 |
+
for await (const resp of resps) {
|
| 70 |
+
yield fromGenerateContentResponse(resp);
|
| 71 |
+
}
|
| 72 |
+
})();
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
async generateContent(
|
| 76 |
+
req: GenerateContentParameters,
|
| 77 |
+
userPromptId: string,
|
| 78 |
+
): Promise<GenerateContentResponse> {
|
| 79 |
+
const resp = await this.requestPost<CaGenerateContentResponse>(
|
| 80 |
+
'generateContent',
|
| 81 |
+
toGenerateContentRequest(
|
| 82 |
+
req,
|
| 83 |
+
userPromptId,
|
| 84 |
+
this.projectId,
|
| 85 |
+
this.sessionId,
|
| 86 |
+
),
|
| 87 |
+
req.config?.abortSignal,
|
| 88 |
+
);
|
| 89 |
+
return fromGenerateContentResponse(resp);
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
async onboardUser(
|
| 93 |
+
req: OnboardUserRequest,
|
| 94 |
+
): Promise<LongRunningOperationResponse> {
|
| 95 |
+
return await this.requestPost<LongRunningOperationResponse>(
|
| 96 |
+
'onboardUser',
|
| 97 |
+
req,
|
| 98 |
+
);
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
async loadCodeAssist(
|
| 102 |
+
req: LoadCodeAssistRequest,
|
| 103 |
+
): Promise<LoadCodeAssistResponse> {
|
| 104 |
+
return await this.requestPost<LoadCodeAssistResponse>(
|
| 105 |
+
'loadCodeAssist',
|
| 106 |
+
req,
|
| 107 |
+
);
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
async getCodeAssistGlobalUserSetting(): Promise<CodeAssistGlobalUserSettingResponse> {
|
| 111 |
+
return await this.requestGet<CodeAssistGlobalUserSettingResponse>(
|
| 112 |
+
'getCodeAssistGlobalUserSetting',
|
| 113 |
+
);
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
async setCodeAssistGlobalUserSetting(
|
| 117 |
+
req: SetCodeAssistGlobalUserSettingRequest,
|
| 118 |
+
): Promise<CodeAssistGlobalUserSettingResponse> {
|
| 119 |
+
return await this.requestPost<CodeAssistGlobalUserSettingResponse>(
|
| 120 |
+
'setCodeAssistGlobalUserSetting',
|
| 121 |
+
req,
|
| 122 |
+
);
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
async countTokens(req: CountTokensParameters): Promise<CountTokensResponse> {
|
| 126 |
+
const resp = await this.requestPost<CaCountTokenResponse>(
|
| 127 |
+
'countTokens',
|
| 128 |
+
toCountTokenRequest(req),
|
| 129 |
+
);
|
| 130 |
+
return fromCountTokenResponse(resp);
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
async embedContent(
|
| 134 |
+
_req: EmbedContentParameters,
|
| 135 |
+
): Promise<EmbedContentResponse> {
|
| 136 |
+
throw Error();
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
async requestPost<T>(
|
| 140 |
+
method: string,
|
| 141 |
+
req: object,
|
| 142 |
+
signal?: AbortSignal,
|
| 143 |
+
): Promise<T> {
|
| 144 |
+
const res = await this.client.request({
|
| 145 |
+
url: this.getMethodUrl(method),
|
| 146 |
+
method: 'POST',
|
| 147 |
+
headers: {
|
| 148 |
+
'Content-Type': 'application/json',
|
| 149 |
+
...this.httpOptions.headers,
|
| 150 |
+
},
|
| 151 |
+
responseType: 'json',
|
| 152 |
+
body: JSON.stringify(req),
|
| 153 |
+
signal,
|
| 154 |
+
});
|
| 155 |
+
return res.data as T;
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
async requestGet<T>(method: string, signal?: AbortSignal): Promise<T> {
|
| 159 |
+
const res = await this.client.request({
|
| 160 |
+
url: this.getMethodUrl(method),
|
| 161 |
+
method: 'GET',
|
| 162 |
+
headers: {
|
| 163 |
+
'Content-Type': 'application/json',
|
| 164 |
+
...this.httpOptions.headers,
|
| 165 |
+
},
|
| 166 |
+
responseType: 'json',
|
| 167 |
+
signal,
|
| 168 |
+
});
|
| 169 |
+
return res.data as T;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
async requestStreamingPost<T>(
|
| 173 |
+
method: string,
|
| 174 |
+
req: object,
|
| 175 |
+
signal?: AbortSignal,
|
| 176 |
+
): Promise<AsyncGenerator<T>> {
|
| 177 |
+
const res = await this.client.request({
|
| 178 |
+
url: this.getMethodUrl(method),
|
| 179 |
+
method: 'POST',
|
| 180 |
+
params: {
|
| 181 |
+
alt: 'sse',
|
| 182 |
+
},
|
| 183 |
+
headers: {
|
| 184 |
+
'Content-Type': 'application/json',
|
| 185 |
+
...this.httpOptions.headers,
|
| 186 |
+
},
|
| 187 |
+
responseType: 'stream',
|
| 188 |
+
body: JSON.stringify(req),
|
| 189 |
+
signal,
|
| 190 |
+
});
|
| 191 |
+
|
| 192 |
+
return (async function* (): AsyncGenerator<T> {
|
| 193 |
+
const rl = readline.createInterface({
|
| 194 |
+
input: res.data as NodeJS.ReadableStream,
|
| 195 |
+
crlfDelay: Infinity, // Recognizes '\r\n' and '\n' as line breaks
|
| 196 |
+
});
|
| 197 |
+
|
| 198 |
+
let bufferedLines: string[] = [];
|
| 199 |
+
for await (const line of rl) {
|
| 200 |
+
// blank lines are used to separate JSON objects in the stream
|
| 201 |
+
if (line === '') {
|
| 202 |
+
if (bufferedLines.length === 0) {
|
| 203 |
+
continue; // no data to yield
|
| 204 |
+
}
|
| 205 |
+
yield JSON.parse(bufferedLines.join('\n')) as T;
|
| 206 |
+
bufferedLines = []; // Reset the buffer after yielding
|
| 207 |
+
} else if (line.startsWith('data: ')) {
|
| 208 |
+
bufferedLines.push(line.slice(6).trim());
|
| 209 |
+
} else {
|
| 210 |
+
throw new Error(`Unexpected line format in response: ${line}`);
|
| 211 |
+
}
|
| 212 |
+
}
|
| 213 |
+
})();
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
getMethodUrl(method: string): string {
|
| 217 |
+
const endpoint =
|
| 218 |
+
process.env['CODE_ASSIST_ENDPOINT'] ?? CODE_ASSIST_ENDPOINT;
|
| 219 |
+
return `${endpoint}/${CODE_ASSIST_API_VERSION}:${method}`;
|
| 220 |
+
}
|
| 221 |
+
}
|
projects/ui/qwen-code/packages/core/src/code_assist/setup.test.ts
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
| 8 |
+
import { setupUser, ProjectIdRequiredError } from './setup.js';
|
| 9 |
+
import { CodeAssistServer } from '../code_assist/server.js';
|
| 10 |
+
import { OAuth2Client } from 'google-auth-library';
|
| 11 |
+
import { GeminiUserTier, UserTierId } from './types.js';
|
| 12 |
+
|
| 13 |
+
vi.mock('../code_assist/server.js');
|
| 14 |
+
|
| 15 |
+
const mockPaidTier: GeminiUserTier = {
|
| 16 |
+
id: UserTierId.STANDARD,
|
| 17 |
+
name: 'paid',
|
| 18 |
+
description: 'Paid tier',
|
| 19 |
+
isDefault: true,
|
| 20 |
+
};
|
| 21 |
+
|
| 22 |
+
const mockFreeTier: GeminiUserTier = {
|
| 23 |
+
id: UserTierId.FREE,
|
| 24 |
+
name: 'free',
|
| 25 |
+
description: 'Free tier',
|
| 26 |
+
isDefault: true,
|
| 27 |
+
};
|
| 28 |
+
|
| 29 |
+
describe('setupUser for existing user', () => {
|
| 30 |
+
let mockLoad: ReturnType<typeof vi.fn>;
|
| 31 |
+
let mockOnboardUser: ReturnType<typeof vi.fn>;
|
| 32 |
+
|
| 33 |
+
beforeEach(() => {
|
| 34 |
+
vi.resetAllMocks();
|
| 35 |
+
mockLoad = vi.fn();
|
| 36 |
+
mockOnboardUser = vi.fn().mockResolvedValue({
|
| 37 |
+
done: true,
|
| 38 |
+
response: {
|
| 39 |
+
cloudaicompanionProject: {
|
| 40 |
+
id: 'server-project',
|
| 41 |
+
},
|
| 42 |
+
},
|
| 43 |
+
});
|
| 44 |
+
vi.mocked(CodeAssistServer).mockImplementation(
|
| 45 |
+
() =>
|
| 46 |
+
({
|
| 47 |
+
loadCodeAssist: mockLoad,
|
| 48 |
+
onboardUser: mockOnboardUser,
|
| 49 |
+
}) as unknown as CodeAssistServer,
|
| 50 |
+
);
|
| 51 |
+
});
|
| 52 |
+
|
| 53 |
+
afterEach(() => {
|
| 54 |
+
vi.unstubAllEnvs();
|
| 55 |
+
});
|
| 56 |
+
|
| 57 |
+
it('should use GOOGLE_CLOUD_PROJECT when set and project from server is undefined', async () => {
|
| 58 |
+
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
|
| 59 |
+
mockLoad.mockResolvedValue({
|
| 60 |
+
currentTier: mockPaidTier,
|
| 61 |
+
});
|
| 62 |
+
await setupUser({} as OAuth2Client);
|
| 63 |
+
expect(CodeAssistServer).toHaveBeenCalledWith(
|
| 64 |
+
{},
|
| 65 |
+
'test-project',
|
| 66 |
+
{},
|
| 67 |
+
'',
|
| 68 |
+
undefined,
|
| 69 |
+
);
|
| 70 |
+
});
|
| 71 |
+
|
| 72 |
+
it('should ignore GOOGLE_CLOUD_PROJECT when project from server is set', async () => {
|
| 73 |
+
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
|
| 74 |
+
mockLoad.mockResolvedValue({
|
| 75 |
+
cloudaicompanionProject: 'server-project',
|
| 76 |
+
currentTier: mockPaidTier,
|
| 77 |
+
});
|
| 78 |
+
const projectId = await setupUser({} as OAuth2Client);
|
| 79 |
+
expect(CodeAssistServer).toHaveBeenCalledWith(
|
| 80 |
+
{},
|
| 81 |
+
'test-project',
|
| 82 |
+
{},
|
| 83 |
+
'',
|
| 84 |
+
undefined,
|
| 85 |
+
);
|
| 86 |
+
expect(projectId).toEqual({
|
| 87 |
+
projectId: 'server-project',
|
| 88 |
+
userTier: 'standard-tier',
|
| 89 |
+
});
|
| 90 |
+
});
|
| 91 |
+
|
| 92 |
+
it('should throw ProjectIdRequiredError when no project ID is available', async () => {
|
| 93 |
+
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
|
| 94 |
+
// And the server itself requires a project ID internally
|
| 95 |
+
vi.mocked(CodeAssistServer).mockImplementation(() => {
|
| 96 |
+
throw new ProjectIdRequiredError();
|
| 97 |
+
});
|
| 98 |
+
|
| 99 |
+
await expect(setupUser({} as OAuth2Client)).rejects.toThrow(
|
| 100 |
+
ProjectIdRequiredError,
|
| 101 |
+
);
|
| 102 |
+
});
|
| 103 |
+
});
|
| 104 |
+
|
| 105 |
+
describe('setupUser for new user', () => {
|
| 106 |
+
let mockLoad: ReturnType<typeof vi.fn>;
|
| 107 |
+
let mockOnboardUser: ReturnType<typeof vi.fn>;
|
| 108 |
+
|
| 109 |
+
beforeEach(() => {
|
| 110 |
+
vi.resetAllMocks();
|
| 111 |
+
mockLoad = vi.fn();
|
| 112 |
+
mockOnboardUser = vi.fn().mockResolvedValue({
|
| 113 |
+
done: true,
|
| 114 |
+
response: {
|
| 115 |
+
cloudaicompanionProject: {
|
| 116 |
+
id: 'server-project',
|
| 117 |
+
},
|
| 118 |
+
},
|
| 119 |
+
});
|
| 120 |
+
vi.mocked(CodeAssistServer).mockImplementation(
|
| 121 |
+
() =>
|
| 122 |
+
({
|
| 123 |
+
loadCodeAssist: mockLoad,
|
| 124 |
+
onboardUser: mockOnboardUser,
|
| 125 |
+
}) as unknown as CodeAssistServer,
|
| 126 |
+
);
|
| 127 |
+
});
|
| 128 |
+
|
| 129 |
+
afterEach(() => {
|
| 130 |
+
vi.unstubAllEnvs();
|
| 131 |
+
});
|
| 132 |
+
|
| 133 |
+
it('should use GOOGLE_CLOUD_PROJECT when set and onboard a new paid user', async () => {
|
| 134 |
+
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
|
| 135 |
+
mockLoad.mockResolvedValue({
|
| 136 |
+
allowedTiers: [mockPaidTier],
|
| 137 |
+
});
|
| 138 |
+
const userData = await setupUser({} as OAuth2Client);
|
| 139 |
+
expect(CodeAssistServer).toHaveBeenCalledWith(
|
| 140 |
+
{},
|
| 141 |
+
'test-project',
|
| 142 |
+
{},
|
| 143 |
+
'',
|
| 144 |
+
undefined,
|
| 145 |
+
);
|
| 146 |
+
expect(mockLoad).toHaveBeenCalled();
|
| 147 |
+
expect(mockOnboardUser).toHaveBeenCalledWith({
|
| 148 |
+
tierId: 'standard-tier',
|
| 149 |
+
cloudaicompanionProject: 'test-project',
|
| 150 |
+
metadata: {
|
| 151 |
+
ideType: 'IDE_UNSPECIFIED',
|
| 152 |
+
platform: 'PLATFORM_UNSPECIFIED',
|
| 153 |
+
pluginType: 'GEMINI',
|
| 154 |
+
duetProject: 'test-project',
|
| 155 |
+
},
|
| 156 |
+
});
|
| 157 |
+
expect(userData).toEqual({
|
| 158 |
+
projectId: 'server-project',
|
| 159 |
+
userTier: 'standard-tier',
|
| 160 |
+
});
|
| 161 |
+
});
|
| 162 |
+
|
| 163 |
+
it('should onboard a new free user when GOOGLE_CLOUD_PROJECT is not set', async () => {
|
| 164 |
+
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
|
| 165 |
+
mockLoad.mockResolvedValue({
|
| 166 |
+
allowedTiers: [mockFreeTier],
|
| 167 |
+
});
|
| 168 |
+
const userData = await setupUser({} as OAuth2Client);
|
| 169 |
+
expect(CodeAssistServer).toHaveBeenCalledWith(
|
| 170 |
+
{},
|
| 171 |
+
undefined,
|
| 172 |
+
{},
|
| 173 |
+
'',
|
| 174 |
+
undefined,
|
| 175 |
+
);
|
| 176 |
+
expect(mockLoad).toHaveBeenCalled();
|
| 177 |
+
expect(mockOnboardUser).toHaveBeenCalledWith({
|
| 178 |
+
tierId: 'free-tier',
|
| 179 |
+
cloudaicompanionProject: undefined,
|
| 180 |
+
metadata: {
|
| 181 |
+
ideType: 'IDE_UNSPECIFIED',
|
| 182 |
+
platform: 'PLATFORM_UNSPECIFIED',
|
| 183 |
+
pluginType: 'GEMINI',
|
| 184 |
+
},
|
| 185 |
+
});
|
| 186 |
+
expect(userData).toEqual({
|
| 187 |
+
projectId: 'server-project',
|
| 188 |
+
userTier: 'free-tier',
|
| 189 |
+
});
|
| 190 |
+
});
|
| 191 |
+
|
| 192 |
+
it('should use GOOGLE_CLOUD_PROJECT when onboard response has no project ID', async () => {
|
| 193 |
+
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
|
| 194 |
+
mockLoad.mockResolvedValue({
|
| 195 |
+
allowedTiers: [mockPaidTier],
|
| 196 |
+
});
|
| 197 |
+
mockOnboardUser.mockResolvedValue({
|
| 198 |
+
done: true,
|
| 199 |
+
response: {
|
| 200 |
+
cloudaicompanionProject: undefined,
|
| 201 |
+
},
|
| 202 |
+
});
|
| 203 |
+
const userData = await setupUser({} as OAuth2Client);
|
| 204 |
+
expect(userData).toEqual({
|
| 205 |
+
projectId: 'test-project',
|
| 206 |
+
userTier: 'standard-tier',
|
| 207 |
+
});
|
| 208 |
+
});
|
| 209 |
+
|
| 210 |
+
it('should throw ProjectIdRequiredError when no project ID is available', async () => {
|
| 211 |
+
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
|
| 212 |
+
mockLoad.mockResolvedValue({
|
| 213 |
+
allowedTiers: [mockPaidTier],
|
| 214 |
+
});
|
| 215 |
+
mockOnboardUser.mockResolvedValue({
|
| 216 |
+
done: true,
|
| 217 |
+
response: {},
|
| 218 |
+
});
|
| 219 |
+
await expect(setupUser({} as OAuth2Client)).rejects.toThrow(
|
| 220 |
+
ProjectIdRequiredError,
|
| 221 |
+
);
|
| 222 |
+
});
|
| 223 |
+
});
|
projects/ui/qwen-code/packages/core/src/code_assist/setup.ts
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import {
|
| 8 |
+
ClientMetadata,
|
| 9 |
+
GeminiUserTier,
|
| 10 |
+
LoadCodeAssistResponse,
|
| 11 |
+
OnboardUserRequest,
|
| 12 |
+
UserTierId,
|
| 13 |
+
} from './types.js';
|
| 14 |
+
import { CodeAssistServer } from './server.js';
|
| 15 |
+
import { OAuth2Client } from 'google-auth-library';
|
| 16 |
+
|
| 17 |
+
export class ProjectIdRequiredError extends Error {
|
| 18 |
+
constructor() {
|
| 19 |
+
super(
|
| 20 |
+
'This account requires setting the GOOGLE_CLOUD_PROJECT env var. See https://goo.gle/gemini-cli-auth-docs#workspace-gca',
|
| 21 |
+
);
|
| 22 |
+
}
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
export interface UserData {
|
| 26 |
+
projectId: string;
|
| 27 |
+
userTier: UserTierId;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
/**
|
| 31 |
+
*
|
| 32 |
+
* @param projectId the user's project id, if any
|
| 33 |
+
* @returns the user's actual project id
|
| 34 |
+
*/
|
| 35 |
+
export async function setupUser(client: OAuth2Client): Promise<UserData> {
|
| 36 |
+
const projectId = process.env['GOOGLE_CLOUD_PROJECT'] || undefined;
|
| 37 |
+
const caServer = new CodeAssistServer(client, projectId, {}, '', undefined);
|
| 38 |
+
const coreClientMetadata: ClientMetadata = {
|
| 39 |
+
ideType: 'IDE_UNSPECIFIED',
|
| 40 |
+
platform: 'PLATFORM_UNSPECIFIED',
|
| 41 |
+
pluginType: 'GEMINI',
|
| 42 |
+
};
|
| 43 |
+
|
| 44 |
+
const loadRes = await caServer.loadCodeAssist({
|
| 45 |
+
cloudaicompanionProject: projectId,
|
| 46 |
+
metadata: {
|
| 47 |
+
...coreClientMetadata,
|
| 48 |
+
duetProject: projectId,
|
| 49 |
+
},
|
| 50 |
+
});
|
| 51 |
+
|
| 52 |
+
if (loadRes.currentTier) {
|
| 53 |
+
if (!loadRes.cloudaicompanionProject) {
|
| 54 |
+
if (projectId) {
|
| 55 |
+
return {
|
| 56 |
+
projectId,
|
| 57 |
+
userTier: loadRes.currentTier.id,
|
| 58 |
+
};
|
| 59 |
+
}
|
| 60 |
+
throw new ProjectIdRequiredError();
|
| 61 |
+
}
|
| 62 |
+
return {
|
| 63 |
+
projectId: loadRes.cloudaicompanionProject,
|
| 64 |
+
userTier: loadRes.currentTier.id,
|
| 65 |
+
};
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
const tier = getOnboardTier(loadRes);
|
| 69 |
+
|
| 70 |
+
let onboardReq: OnboardUserRequest;
|
| 71 |
+
if (tier.id === UserTierId.FREE) {
|
| 72 |
+
// The free tier uses a managed google cloud project. Setting a project in the `onboardUser` request causes a `Precondition Failed` error.
|
| 73 |
+
onboardReq = {
|
| 74 |
+
tierId: tier.id,
|
| 75 |
+
cloudaicompanionProject: undefined,
|
| 76 |
+
metadata: coreClientMetadata,
|
| 77 |
+
};
|
| 78 |
+
} else {
|
| 79 |
+
onboardReq = {
|
| 80 |
+
tierId: tier.id,
|
| 81 |
+
cloudaicompanionProject: projectId,
|
| 82 |
+
metadata: {
|
| 83 |
+
...coreClientMetadata,
|
| 84 |
+
duetProject: projectId,
|
| 85 |
+
},
|
| 86 |
+
};
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
// Poll onboardUser until long running operation is complete.
|
| 90 |
+
let lroRes = await caServer.onboardUser(onboardReq);
|
| 91 |
+
while (!lroRes.done) {
|
| 92 |
+
await new Promise((f) => setTimeout(f, 5000));
|
| 93 |
+
lroRes = await caServer.onboardUser(onboardReq);
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
if (!lroRes.response?.cloudaicompanionProject?.id) {
|
| 97 |
+
if (projectId) {
|
| 98 |
+
return {
|
| 99 |
+
projectId,
|
| 100 |
+
userTier: tier.id,
|
| 101 |
+
};
|
| 102 |
+
}
|
| 103 |
+
throw new ProjectIdRequiredError();
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
return {
|
| 107 |
+
projectId: lroRes.response.cloudaicompanionProject.id,
|
| 108 |
+
userTier: tier.id,
|
| 109 |
+
};
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
function getOnboardTier(res: LoadCodeAssistResponse): GeminiUserTier {
|
| 113 |
+
for (const tier of res.allowedTiers || []) {
|
| 114 |
+
if (tier.isDefault) {
|
| 115 |
+
return tier;
|
| 116 |
+
}
|
| 117 |
+
}
|
| 118 |
+
return {
|
| 119 |
+
name: '',
|
| 120 |
+
description: '',
|
| 121 |
+
id: UserTierId.LEGACY,
|
| 122 |
+
userDefinedCloudaicompanionProject: true,
|
| 123 |
+
};
|
| 124 |
+
}
|
projects/ui/qwen-code/packages/core/src/code_assist/types.ts
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
export interface ClientMetadata {
|
| 8 |
+
ideType?: ClientMetadataIdeType;
|
| 9 |
+
ideVersion?: string;
|
| 10 |
+
pluginVersion?: string;
|
| 11 |
+
platform?: ClientMetadataPlatform;
|
| 12 |
+
updateChannel?: string;
|
| 13 |
+
duetProject?: string;
|
| 14 |
+
pluginType?: ClientMetadataPluginType;
|
| 15 |
+
ideName?: string;
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
export type ClientMetadataIdeType =
|
| 19 |
+
| 'IDE_UNSPECIFIED'
|
| 20 |
+
| 'VSCODE'
|
| 21 |
+
| 'INTELLIJ'
|
| 22 |
+
| 'VSCODE_CLOUD_WORKSTATION'
|
| 23 |
+
| 'INTELLIJ_CLOUD_WORKSTATION'
|
| 24 |
+
| 'CLOUD_SHELL';
|
| 25 |
+
export type ClientMetadataPlatform =
|
| 26 |
+
| 'PLATFORM_UNSPECIFIED'
|
| 27 |
+
| 'DARWIN_AMD64'
|
| 28 |
+
| 'DARWIN_ARM64'
|
| 29 |
+
| 'LINUX_AMD64'
|
| 30 |
+
| 'LINUX_ARM64'
|
| 31 |
+
| 'WINDOWS_AMD64';
|
| 32 |
+
export type ClientMetadataPluginType =
|
| 33 |
+
| 'PLUGIN_UNSPECIFIED'
|
| 34 |
+
| 'CLOUD_CODE'
|
| 35 |
+
| 'GEMINI'
|
| 36 |
+
| 'AIPLUGIN_INTELLIJ'
|
| 37 |
+
| 'AIPLUGIN_STUDIO';
|
| 38 |
+
|
| 39 |
+
export interface LoadCodeAssistRequest {
|
| 40 |
+
cloudaicompanionProject?: string;
|
| 41 |
+
metadata: ClientMetadata;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
/**
|
| 45 |
+
* Represents LoadCodeAssistResponse proto json field
|
| 46 |
+
* http://google3/google/internal/cloud/code/v1internal/cloudcode.proto;l=224
|
| 47 |
+
*/
|
| 48 |
+
export interface LoadCodeAssistResponse {
|
| 49 |
+
currentTier?: GeminiUserTier | null;
|
| 50 |
+
allowedTiers?: GeminiUserTier[] | null;
|
| 51 |
+
ineligibleTiers?: IneligibleTier[] | null;
|
| 52 |
+
cloudaicompanionProject?: string | null;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
/**
|
| 56 |
+
* GeminiUserTier reflects the structure received from the CodeAssist when calling LoadCodeAssist.
|
| 57 |
+
*/
|
| 58 |
+
export interface GeminiUserTier {
|
| 59 |
+
id: UserTierId;
|
| 60 |
+
name: string;
|
| 61 |
+
description: string;
|
| 62 |
+
// This value is used to declare whether a given tier requires the user to configure the project setting on the IDE settings or not.
|
| 63 |
+
userDefinedCloudaicompanionProject?: boolean | null;
|
| 64 |
+
isDefault?: boolean;
|
| 65 |
+
privacyNotice?: PrivacyNotice;
|
| 66 |
+
hasAcceptedTos?: boolean;
|
| 67 |
+
hasOnboardedPreviously?: boolean;
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
/**
|
| 71 |
+
* Includes information specifying the reasons for a user's ineligibility for a specific tier.
|
| 72 |
+
* @param reasonCode mnemonic code representing the reason for in-eligibility.
|
| 73 |
+
* @param reasonMessage message to display to the user.
|
| 74 |
+
* @param tierId id of the tier.
|
| 75 |
+
* @param tierName name of the tier.
|
| 76 |
+
*/
|
| 77 |
+
export interface IneligibleTier {
|
| 78 |
+
reasonCode: IneligibleTierReasonCode;
|
| 79 |
+
reasonMessage: string;
|
| 80 |
+
tierId: UserTierId;
|
| 81 |
+
tierName: string;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
/**
|
| 85 |
+
* List of predefined reason codes when a tier is blocked from a specific tier.
|
| 86 |
+
* https://source.corp.google.com/piper///depot/google3/google/internal/cloud/code/v1internal/cloudcode.proto;l=378
|
| 87 |
+
*/
|
| 88 |
+
export enum IneligibleTierReasonCode {
|
| 89 |
+
// go/keep-sorted start
|
| 90 |
+
DASHER_USER = 'DASHER_USER',
|
| 91 |
+
INELIGIBLE_ACCOUNT = 'INELIGIBLE_ACCOUNT',
|
| 92 |
+
NON_USER_ACCOUNT = 'NON_USER_ACCOUNT',
|
| 93 |
+
RESTRICTED_AGE = 'RESTRICTED_AGE',
|
| 94 |
+
RESTRICTED_NETWORK = 'RESTRICTED_NETWORK',
|
| 95 |
+
UNKNOWN = 'UNKNOWN',
|
| 96 |
+
UNKNOWN_LOCATION = 'UNKNOWN_LOCATION',
|
| 97 |
+
UNSUPPORTED_LOCATION = 'UNSUPPORTED_LOCATION',
|
| 98 |
+
// go/keep-sorted end
|
| 99 |
+
}
|
| 100 |
+
/**
|
| 101 |
+
* UserTierId represents IDs returned from the Cloud Code Private API representing a user's tier
|
| 102 |
+
*
|
| 103 |
+
* //depot/google3/cloud/developer_experience/cloudcode/pa/service/usertier.go;l=16
|
| 104 |
+
*/
|
| 105 |
+
export enum UserTierId {
|
| 106 |
+
FREE = 'free-tier',
|
| 107 |
+
LEGACY = 'legacy-tier',
|
| 108 |
+
STANDARD = 'standard-tier',
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
/**
|
| 112 |
+
* PrivacyNotice reflects the structure received from the CodeAssist in regards to a tier
|
| 113 |
+
* privacy notice.
|
| 114 |
+
*/
|
| 115 |
+
export interface PrivacyNotice {
|
| 116 |
+
showNotice: boolean;
|
| 117 |
+
noticeText?: string;
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
/**
|
| 121 |
+
* Proto signature of OnboardUserRequest as payload to OnboardUser call
|
| 122 |
+
*/
|
| 123 |
+
export interface OnboardUserRequest {
|
| 124 |
+
tierId: string | undefined;
|
| 125 |
+
cloudaicompanionProject: string | undefined;
|
| 126 |
+
metadata: ClientMetadata | undefined;
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
/**
|
| 130 |
+
* Represents LongRunningOperation proto
|
| 131 |
+
* http://google3/google/longrunning/operations.proto;rcl=698857719;l=107
|
| 132 |
+
*/
|
| 133 |
+
export interface LongRunningOperationResponse {
|
| 134 |
+
name: string;
|
| 135 |
+
done?: boolean;
|
| 136 |
+
response?: OnboardUserResponse;
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
/**
|
| 140 |
+
* Represents OnboardUserResponse proto
|
| 141 |
+
* http://google3/google/internal/cloud/code/v1internal/cloudcode.proto;l=215
|
| 142 |
+
*/
|
| 143 |
+
export interface OnboardUserResponse {
|
| 144 |
+
// tslint:disable-next-line:enforce-name-casing This is the name of the field in the proto.
|
| 145 |
+
cloudaicompanionProject?: {
|
| 146 |
+
id: string;
|
| 147 |
+
name: string;
|
| 148 |
+
};
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
/**
|
| 152 |
+
* Status code of user license status
|
| 153 |
+
* it does not strictly correspond to the proto
|
| 154 |
+
* Error value is an additional value assigned to error responses from OnboardUser
|
| 155 |
+
*/
|
| 156 |
+
export enum OnboardUserStatusCode {
|
| 157 |
+
Default = 'DEFAULT',
|
| 158 |
+
Notice = 'NOTICE',
|
| 159 |
+
Warning = 'WARNING',
|
| 160 |
+
Error = 'ERROR',
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
/**
|
| 164 |
+
* Status of user onboarded to gemini
|
| 165 |
+
*/
|
| 166 |
+
export interface OnboardUserStatus {
|
| 167 |
+
statusCode: OnboardUserStatusCode;
|
| 168 |
+
displayMessage: string;
|
| 169 |
+
helpLink: HelpLinkUrl | undefined;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
export interface HelpLinkUrl {
|
| 173 |
+
description: string;
|
| 174 |
+
url: string;
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
export interface SetCodeAssistGlobalUserSettingRequest {
|
| 178 |
+
cloudaicompanionProject?: string;
|
| 179 |
+
freeTierDataCollectionOptin: boolean;
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
export interface CodeAssistGlobalUserSettingResponse {
|
| 183 |
+
cloudaicompanionProject?: string;
|
| 184 |
+
freeTierDataCollectionOptin: boolean;
|
| 185 |
+
}
|
projects/ui/qwen-code/packages/core/src/config/config.test.ts
ADDED
|
@@ -0,0 +1,592 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
| 8 |
+
import { Mock } from 'vitest';
|
| 9 |
+
import { Config, ConfigParameters, SandboxConfig } from './config.js';
|
| 10 |
+
import * as path from 'path';
|
| 11 |
+
import { setGeminiMdFilename as mockSetGeminiMdFilename } from '../tools/memoryTool.js';
|
| 12 |
+
import {
|
| 13 |
+
DEFAULT_TELEMETRY_TARGET,
|
| 14 |
+
DEFAULT_OTLP_ENDPOINT,
|
| 15 |
+
} from '../telemetry/index.js';
|
| 16 |
+
import {
|
| 17 |
+
AuthType,
|
| 18 |
+
ContentGeneratorConfig,
|
| 19 |
+
createContentGeneratorConfig,
|
| 20 |
+
} from '../core/contentGenerator.js';
|
| 21 |
+
import { GeminiClient } from '../core/client.js';
|
| 22 |
+
import { GitService } from '../services/gitService.js';
|
| 23 |
+
|
| 24 |
+
vi.mock('fs', async (importOriginal) => {
|
| 25 |
+
const actual = await importOriginal<typeof import('fs')>();
|
| 26 |
+
return {
|
| 27 |
+
...actual,
|
| 28 |
+
existsSync: vi.fn().mockReturnValue(true),
|
| 29 |
+
statSync: vi.fn().mockReturnValue({
|
| 30 |
+
isDirectory: vi.fn().mockReturnValue(true),
|
| 31 |
+
}),
|
| 32 |
+
realpathSync: vi.fn((path) => path),
|
| 33 |
+
};
|
| 34 |
+
});
|
| 35 |
+
|
| 36 |
+
vi.mock('fs', async (importOriginal) => {
|
| 37 |
+
const actual = await importOriginal<typeof import('fs')>();
|
| 38 |
+
return {
|
| 39 |
+
...actual,
|
| 40 |
+
existsSync: vi.fn().mockReturnValue(true),
|
| 41 |
+
statSync: vi.fn().mockReturnValue({
|
| 42 |
+
isDirectory: vi.fn().mockReturnValue(true),
|
| 43 |
+
}),
|
| 44 |
+
realpathSync: vi.fn((path) => path),
|
| 45 |
+
};
|
| 46 |
+
});
|
| 47 |
+
|
| 48 |
+
// Mock dependencies that might be called during Config construction or createServerConfig
|
| 49 |
+
vi.mock('../tools/tool-registry', () => {
|
| 50 |
+
const ToolRegistryMock = vi.fn();
|
| 51 |
+
ToolRegistryMock.prototype.registerTool = vi.fn();
|
| 52 |
+
ToolRegistryMock.prototype.discoverAllTools = vi.fn();
|
| 53 |
+
ToolRegistryMock.prototype.getAllTools = vi.fn(() => []); // Mock methods if needed
|
| 54 |
+
ToolRegistryMock.prototype.getTool = vi.fn();
|
| 55 |
+
ToolRegistryMock.prototype.getFunctionDeclarations = vi.fn(() => []);
|
| 56 |
+
return { ToolRegistry: ToolRegistryMock };
|
| 57 |
+
});
|
| 58 |
+
|
| 59 |
+
vi.mock('../utils/memoryDiscovery.js', () => ({
|
| 60 |
+
loadServerHierarchicalMemory: vi.fn(),
|
| 61 |
+
}));
|
| 62 |
+
|
| 63 |
+
// Mock individual tools if their constructors are complex or have side effects
|
| 64 |
+
vi.mock('../tools/ls');
|
| 65 |
+
vi.mock('../tools/read-file');
|
| 66 |
+
vi.mock('../tools/grep');
|
| 67 |
+
vi.mock('../tools/glob');
|
| 68 |
+
vi.mock('../tools/edit');
|
| 69 |
+
vi.mock('../tools/shell');
|
| 70 |
+
vi.mock('../tools/write-file');
|
| 71 |
+
vi.mock('../tools/web-fetch');
|
| 72 |
+
vi.mock('../tools/read-many-files');
|
| 73 |
+
vi.mock('../tools/memoryTool', () => ({
|
| 74 |
+
MemoryTool: vi.fn(),
|
| 75 |
+
setGeminiMdFilename: vi.fn(),
|
| 76 |
+
getCurrentGeminiMdFilename: vi.fn(() => 'QWEN.md'), // Mock the original filename
|
| 77 |
+
DEFAULT_CONTEXT_FILENAME: 'QWEN.md',
|
| 78 |
+
GEMINI_CONFIG_DIR: '.gemini',
|
| 79 |
+
}));
|
| 80 |
+
|
| 81 |
+
vi.mock('../core/contentGenerator.js', async (importOriginal) => {
|
| 82 |
+
const actual =
|
| 83 |
+
await importOriginal<typeof import('../core/contentGenerator.js')>();
|
| 84 |
+
return {
|
| 85 |
+
...actual,
|
| 86 |
+
createContentGeneratorConfig: vi.fn(),
|
| 87 |
+
};
|
| 88 |
+
});
|
| 89 |
+
|
| 90 |
+
vi.mock('../core/client.js', () => ({
|
| 91 |
+
GeminiClient: vi.fn().mockImplementation(() => ({
|
| 92 |
+
initialize: vi.fn().mockResolvedValue(undefined),
|
| 93 |
+
})),
|
| 94 |
+
}));
|
| 95 |
+
|
| 96 |
+
vi.mock('../telemetry/index.js', async (importOriginal) => {
|
| 97 |
+
const actual = await importOriginal<typeof import('../telemetry/index.js')>();
|
| 98 |
+
return {
|
| 99 |
+
...actual,
|
| 100 |
+
initializeTelemetry: vi.fn(),
|
| 101 |
+
};
|
| 102 |
+
});
|
| 103 |
+
|
| 104 |
+
vi.mock('../services/gitService.js', () => {
|
| 105 |
+
const GitServiceMock = vi.fn();
|
| 106 |
+
GitServiceMock.prototype.initialize = vi.fn();
|
| 107 |
+
return { GitService: GitServiceMock };
|
| 108 |
+
});
|
| 109 |
+
|
| 110 |
+
describe('Server Config (config.ts)', () => {
|
| 111 |
+
const MODEL = 'gemini-pro';
|
| 112 |
+
const SANDBOX: SandboxConfig = {
|
| 113 |
+
command: 'docker',
|
| 114 |
+
image: 'qwen-code-sandbox',
|
| 115 |
+
};
|
| 116 |
+
const TARGET_DIR = '/path/to/target';
|
| 117 |
+
const DEBUG_MODE = false;
|
| 118 |
+
const QUESTION = 'test question';
|
| 119 |
+
const FULL_CONTEXT = false;
|
| 120 |
+
const USER_MEMORY = 'Test User Memory';
|
| 121 |
+
const TELEMETRY_SETTINGS = { enabled: false };
|
| 122 |
+
const EMBEDDING_MODEL = 'gemini-embedding';
|
| 123 |
+
const SESSION_ID = 'test-session-id';
|
| 124 |
+
const baseParams: ConfigParameters = {
|
| 125 |
+
cwd: '/tmp',
|
| 126 |
+
embeddingModel: EMBEDDING_MODEL,
|
| 127 |
+
sandbox: SANDBOX,
|
| 128 |
+
targetDir: TARGET_DIR,
|
| 129 |
+
debugMode: DEBUG_MODE,
|
| 130 |
+
question: QUESTION,
|
| 131 |
+
fullContext: FULL_CONTEXT,
|
| 132 |
+
userMemory: USER_MEMORY,
|
| 133 |
+
telemetry: TELEMETRY_SETTINGS,
|
| 134 |
+
sessionId: SESSION_ID,
|
| 135 |
+
model: MODEL,
|
| 136 |
+
usageStatisticsEnabled: false,
|
| 137 |
+
};
|
| 138 |
+
|
| 139 |
+
beforeEach(() => {
|
| 140 |
+
// Reset mocks if necessary
|
| 141 |
+
vi.clearAllMocks();
|
| 142 |
+
});
|
| 143 |
+
|
| 144 |
+
describe('initialize', () => {
|
| 145 |
+
it('should throw an error if checkpointing is enabled and GitService fails', async () => {
|
| 146 |
+
const gitError = new Error('Git is not installed');
|
| 147 |
+
(GitService.prototype.initialize as Mock).mockRejectedValue(gitError);
|
| 148 |
+
|
| 149 |
+
const config = new Config({
|
| 150 |
+
...baseParams,
|
| 151 |
+
checkpointing: true,
|
| 152 |
+
});
|
| 153 |
+
|
| 154 |
+
await expect(config.initialize()).rejects.toThrow(gitError);
|
| 155 |
+
});
|
| 156 |
+
|
| 157 |
+
it('should not throw an error if checkpointing is disabled and GitService fails', async () => {
|
| 158 |
+
const gitError = new Error('Git is not installed');
|
| 159 |
+
(GitService.prototype.initialize as Mock).mockRejectedValue(gitError);
|
| 160 |
+
|
| 161 |
+
const config = new Config({
|
| 162 |
+
...baseParams,
|
| 163 |
+
checkpointing: false,
|
| 164 |
+
});
|
| 165 |
+
|
| 166 |
+
await expect(config.initialize()).resolves.toBeUndefined();
|
| 167 |
+
});
|
| 168 |
+
|
| 169 |
+
it('should throw an error if initialized more than once', async () => {
|
| 170 |
+
const config = new Config({
|
| 171 |
+
...baseParams,
|
| 172 |
+
checkpointing: false,
|
| 173 |
+
});
|
| 174 |
+
|
| 175 |
+
await expect(config.initialize()).resolves.toBeUndefined();
|
| 176 |
+
await expect(config.initialize()).rejects.toThrow(
|
| 177 |
+
'Config was already initialized',
|
| 178 |
+
);
|
| 179 |
+
});
|
| 180 |
+
});
|
| 181 |
+
|
| 182 |
+
describe('refreshAuth', () => {
|
| 183 |
+
it('should refresh auth and update config', async () => {
|
| 184 |
+
const config = new Config(baseParams);
|
| 185 |
+
const authType = AuthType.USE_GEMINI;
|
| 186 |
+
const newModel = 'gemini-flash';
|
| 187 |
+
const mockContentConfig = {
|
| 188 |
+
model: newModel,
|
| 189 |
+
apiKey: 'test-key',
|
| 190 |
+
};
|
| 191 |
+
|
| 192 |
+
(createContentGeneratorConfig as Mock).mockReturnValue(mockContentConfig);
|
| 193 |
+
|
| 194 |
+
// Set fallback mode to true to ensure it gets reset
|
| 195 |
+
config.setFallbackMode(true);
|
| 196 |
+
expect(config.isInFallbackMode()).toBe(true);
|
| 197 |
+
|
| 198 |
+
await config.refreshAuth(authType);
|
| 199 |
+
|
| 200 |
+
expect(createContentGeneratorConfig).toHaveBeenCalledWith(
|
| 201 |
+
config,
|
| 202 |
+
authType,
|
| 203 |
+
);
|
| 204 |
+
// Verify that contentGeneratorConfig is updated with the new model
|
| 205 |
+
expect(config.getContentGeneratorConfig()).toEqual(mockContentConfig);
|
| 206 |
+
expect(config.getContentGeneratorConfig().model).toBe(newModel);
|
| 207 |
+
expect(config.getModel()).toBe(newModel); // getModel() should return the updated model
|
| 208 |
+
expect(GeminiClient).toHaveBeenCalledWith(config);
|
| 209 |
+
// Verify that fallback mode is reset
|
| 210 |
+
expect(config.isInFallbackMode()).toBe(false);
|
| 211 |
+
});
|
| 212 |
+
|
| 213 |
+
it('should preserve conversation history when refreshing auth', async () => {
|
| 214 |
+
const config = new Config(baseParams);
|
| 215 |
+
const authType = AuthType.USE_GEMINI;
|
| 216 |
+
const mockContentConfig = {
|
| 217 |
+
model: 'gemini-pro',
|
| 218 |
+
apiKey: 'test-key',
|
| 219 |
+
};
|
| 220 |
+
|
| 221 |
+
(createContentGeneratorConfig as Mock).mockReturnValue(mockContentConfig);
|
| 222 |
+
|
| 223 |
+
// Mock the existing client with some history
|
| 224 |
+
const mockExistingHistory = [
|
| 225 |
+
{ role: 'user', parts: [{ text: 'Hello' }] },
|
| 226 |
+
{ role: 'model', parts: [{ text: 'Hi there!' }] },
|
| 227 |
+
{ role: 'user', parts: [{ text: 'How are you?' }] },
|
| 228 |
+
];
|
| 229 |
+
|
| 230 |
+
const mockExistingClient = {
|
| 231 |
+
isInitialized: vi.fn().mockReturnValue(true),
|
| 232 |
+
getHistory: vi.fn().mockReturnValue(mockExistingHistory),
|
| 233 |
+
};
|
| 234 |
+
|
| 235 |
+
const mockNewClient = {
|
| 236 |
+
isInitialized: vi.fn().mockReturnValue(true),
|
| 237 |
+
getHistory: vi.fn().mockReturnValue([]),
|
| 238 |
+
setHistory: vi.fn(),
|
| 239 |
+
initialize: vi.fn().mockResolvedValue(undefined),
|
| 240 |
+
};
|
| 241 |
+
|
| 242 |
+
// Set the existing client
|
| 243 |
+
(
|
| 244 |
+
config as unknown as { geminiClient: typeof mockExistingClient }
|
| 245 |
+
).geminiClient = mockExistingClient;
|
| 246 |
+
(GeminiClient as Mock).mockImplementation(() => mockNewClient);
|
| 247 |
+
|
| 248 |
+
await config.refreshAuth(authType);
|
| 249 |
+
|
| 250 |
+
// Verify that existing history was retrieved
|
| 251 |
+
expect(mockExistingClient.getHistory).toHaveBeenCalled();
|
| 252 |
+
|
| 253 |
+
// Verify that new client was created and initialized
|
| 254 |
+
expect(GeminiClient).toHaveBeenCalledWith(config);
|
| 255 |
+
expect(mockNewClient.initialize).toHaveBeenCalledWith(mockContentConfig);
|
| 256 |
+
|
| 257 |
+
// Verify that history was restored to the new client
|
| 258 |
+
expect(mockNewClient.setHistory).toHaveBeenCalledWith(
|
| 259 |
+
mockExistingHistory,
|
| 260 |
+
{ stripThoughts: false },
|
| 261 |
+
);
|
| 262 |
+
});
|
| 263 |
+
|
| 264 |
+
it('should handle case when no existing client is initialized', async () => {
|
| 265 |
+
const config = new Config(baseParams);
|
| 266 |
+
const authType = AuthType.USE_GEMINI;
|
| 267 |
+
const mockContentConfig = {
|
| 268 |
+
model: 'gemini-pro',
|
| 269 |
+
apiKey: 'test-key',
|
| 270 |
+
};
|
| 271 |
+
|
| 272 |
+
(createContentGeneratorConfig as Mock).mockReturnValue(mockContentConfig);
|
| 273 |
+
|
| 274 |
+
const mockNewClient = {
|
| 275 |
+
isInitialized: vi.fn().mockReturnValue(true),
|
| 276 |
+
getHistory: vi.fn().mockReturnValue([]),
|
| 277 |
+
setHistory: vi.fn(),
|
| 278 |
+
initialize: vi.fn().mockResolvedValue(undefined),
|
| 279 |
+
};
|
| 280 |
+
|
| 281 |
+
// No existing client
|
| 282 |
+
(config as unknown as { geminiClient: null }).geminiClient = null;
|
| 283 |
+
(GeminiClient as Mock).mockImplementation(() => mockNewClient);
|
| 284 |
+
|
| 285 |
+
await config.refreshAuth(authType);
|
| 286 |
+
|
| 287 |
+
// Verify that new client was created and initialized
|
| 288 |
+
expect(GeminiClient).toHaveBeenCalledWith(config);
|
| 289 |
+
expect(mockNewClient.initialize).toHaveBeenCalledWith(mockContentConfig);
|
| 290 |
+
|
| 291 |
+
// Verify that setHistory was not called since there was no existing history
|
| 292 |
+
expect(mockNewClient.setHistory).not.toHaveBeenCalled();
|
| 293 |
+
});
|
| 294 |
+
|
| 295 |
+
it('should strip thoughts when switching from GenAI to Vertex', async () => {
|
| 296 |
+
const config = new Config(baseParams);
|
| 297 |
+
const mockContentConfig = {
|
| 298 |
+
model: 'gemini-pro',
|
| 299 |
+
apiKey: 'test-key',
|
| 300 |
+
authType: AuthType.USE_GEMINI,
|
| 301 |
+
};
|
| 302 |
+
(
|
| 303 |
+
config as unknown as { contentGeneratorConfig: ContentGeneratorConfig }
|
| 304 |
+
).contentGeneratorConfig = mockContentConfig;
|
| 305 |
+
|
| 306 |
+
(createContentGeneratorConfig as Mock).mockReturnValue({
|
| 307 |
+
...mockContentConfig,
|
| 308 |
+
authType: AuthType.LOGIN_WITH_GOOGLE,
|
| 309 |
+
});
|
| 310 |
+
|
| 311 |
+
const mockExistingHistory = [
|
| 312 |
+
{ role: 'user', parts: [{ text: 'Hello' }] },
|
| 313 |
+
];
|
| 314 |
+
const mockExistingClient = {
|
| 315 |
+
isInitialized: vi.fn().mockReturnValue(true),
|
| 316 |
+
getHistory: vi.fn().mockReturnValue(mockExistingHistory),
|
| 317 |
+
};
|
| 318 |
+
const mockNewClient = {
|
| 319 |
+
isInitialized: vi.fn().mockReturnValue(true),
|
| 320 |
+
getHistory: vi.fn().mockReturnValue([]),
|
| 321 |
+
setHistory: vi.fn(),
|
| 322 |
+
initialize: vi.fn().mockResolvedValue(undefined),
|
| 323 |
+
};
|
| 324 |
+
|
| 325 |
+
(
|
| 326 |
+
config as unknown as { geminiClient: typeof mockExistingClient }
|
| 327 |
+
).geminiClient = mockExistingClient;
|
| 328 |
+
(GeminiClient as Mock).mockImplementation(() => mockNewClient);
|
| 329 |
+
|
| 330 |
+
await config.refreshAuth(AuthType.LOGIN_WITH_GOOGLE);
|
| 331 |
+
|
| 332 |
+
expect(mockNewClient.setHistory).toHaveBeenCalledWith(
|
| 333 |
+
mockExistingHistory,
|
| 334 |
+
{ stripThoughts: true },
|
| 335 |
+
);
|
| 336 |
+
});
|
| 337 |
+
|
| 338 |
+
it('should not strip thoughts when switching from Vertex to GenAI', async () => {
|
| 339 |
+
const config = new Config(baseParams);
|
| 340 |
+
const mockContentConfig = {
|
| 341 |
+
model: 'gemini-pro',
|
| 342 |
+
apiKey: 'test-key',
|
| 343 |
+
authType: AuthType.LOGIN_WITH_GOOGLE,
|
| 344 |
+
};
|
| 345 |
+
(
|
| 346 |
+
config as unknown as { contentGeneratorConfig: ContentGeneratorConfig }
|
| 347 |
+
).contentGeneratorConfig = mockContentConfig;
|
| 348 |
+
|
| 349 |
+
(createContentGeneratorConfig as Mock).mockReturnValue({
|
| 350 |
+
...mockContentConfig,
|
| 351 |
+
authType: AuthType.USE_GEMINI,
|
| 352 |
+
});
|
| 353 |
+
|
| 354 |
+
const mockExistingHistory = [
|
| 355 |
+
{ role: 'user', parts: [{ text: 'Hello' }] },
|
| 356 |
+
];
|
| 357 |
+
const mockExistingClient = {
|
| 358 |
+
isInitialized: vi.fn().mockReturnValue(true),
|
| 359 |
+
getHistory: vi.fn().mockReturnValue(mockExistingHistory),
|
| 360 |
+
};
|
| 361 |
+
const mockNewClient = {
|
| 362 |
+
isInitialized: vi.fn().mockReturnValue(true),
|
| 363 |
+
getHistory: vi.fn().mockReturnValue([]),
|
| 364 |
+
setHistory: vi.fn(),
|
| 365 |
+
initialize: vi.fn().mockResolvedValue(undefined),
|
| 366 |
+
};
|
| 367 |
+
|
| 368 |
+
(
|
| 369 |
+
config as unknown as { geminiClient: typeof mockExistingClient }
|
| 370 |
+
).geminiClient = mockExistingClient;
|
| 371 |
+
(GeminiClient as Mock).mockImplementation(() => mockNewClient);
|
| 372 |
+
|
| 373 |
+
await config.refreshAuth(AuthType.USE_GEMINI);
|
| 374 |
+
|
| 375 |
+
expect(mockNewClient.setHistory).toHaveBeenCalledWith(
|
| 376 |
+
mockExistingHistory,
|
| 377 |
+
{ stripThoughts: false },
|
| 378 |
+
);
|
| 379 |
+
});
|
| 380 |
+
});
|
| 381 |
+
|
| 382 |
+
it('Config constructor should store userMemory correctly', () => {
|
| 383 |
+
const config = new Config(baseParams);
|
| 384 |
+
|
| 385 |
+
expect(config.getUserMemory()).toBe(USER_MEMORY);
|
| 386 |
+
// Verify other getters if needed
|
| 387 |
+
expect(config.getTargetDir()).toBe(path.resolve(TARGET_DIR)); // Check resolved path
|
| 388 |
+
});
|
| 389 |
+
|
| 390 |
+
it('Config constructor should default userMemory to empty string if not provided', () => {
|
| 391 |
+
const paramsWithoutMemory: ConfigParameters = { ...baseParams };
|
| 392 |
+
delete paramsWithoutMemory.userMemory;
|
| 393 |
+
const config = new Config(paramsWithoutMemory);
|
| 394 |
+
|
| 395 |
+
expect(config.getUserMemory()).toBe('');
|
| 396 |
+
});
|
| 397 |
+
|
| 398 |
+
it('Config constructor should call setGeminiMdFilename with contextFileName if provided', () => {
|
| 399 |
+
const contextFileName = 'CUSTOM_AGENTS.md';
|
| 400 |
+
const paramsWithContextFile: ConfigParameters = {
|
| 401 |
+
...baseParams,
|
| 402 |
+
contextFileName,
|
| 403 |
+
};
|
| 404 |
+
new Config(paramsWithContextFile);
|
| 405 |
+
expect(mockSetGeminiMdFilename).toHaveBeenCalledWith(contextFileName);
|
| 406 |
+
});
|
| 407 |
+
|
| 408 |
+
it('Config constructor should not call setGeminiMdFilename if contextFileName is not provided', () => {
|
| 409 |
+
new Config(baseParams); // baseParams does not have contextFileName
|
| 410 |
+
expect(mockSetGeminiMdFilename).not.toHaveBeenCalled();
|
| 411 |
+
});
|
| 412 |
+
|
| 413 |
+
it('should set default file filtering settings when not provided', () => {
|
| 414 |
+
const config = new Config(baseParams);
|
| 415 |
+
expect(config.getFileFilteringRespectGitIgnore()).toBe(true);
|
| 416 |
+
});
|
| 417 |
+
|
| 418 |
+
it('should set custom file filtering settings when provided', () => {
|
| 419 |
+
const paramsWithFileFiltering: ConfigParameters = {
|
| 420 |
+
...baseParams,
|
| 421 |
+
fileFiltering: {
|
| 422 |
+
respectGitIgnore: false,
|
| 423 |
+
},
|
| 424 |
+
};
|
| 425 |
+
const config = new Config(paramsWithFileFiltering);
|
| 426 |
+
expect(config.getFileFilteringRespectGitIgnore()).toBe(false);
|
| 427 |
+
});
|
| 428 |
+
|
| 429 |
+
it('should initialize WorkspaceContext with includeDirectories', () => {
|
| 430 |
+
const includeDirectories = ['/path/to/dir1', '/path/to/dir2'];
|
| 431 |
+
const paramsWithIncludeDirs: ConfigParameters = {
|
| 432 |
+
...baseParams,
|
| 433 |
+
includeDirectories,
|
| 434 |
+
};
|
| 435 |
+
const config = new Config(paramsWithIncludeDirs);
|
| 436 |
+
const workspaceContext = config.getWorkspaceContext();
|
| 437 |
+
const directories = workspaceContext.getDirectories();
|
| 438 |
+
|
| 439 |
+
// Should include the target directory plus the included directories
|
| 440 |
+
expect(directories).toHaveLength(3);
|
| 441 |
+
expect(directories).toContain(path.resolve(baseParams.targetDir));
|
| 442 |
+
expect(directories).toContain('/path/to/dir1');
|
| 443 |
+
expect(directories).toContain('/path/to/dir2');
|
| 444 |
+
});
|
| 445 |
+
|
| 446 |
+
it('Config constructor should set telemetry to true when provided as true', () => {
|
| 447 |
+
const paramsWithTelemetry: ConfigParameters = {
|
| 448 |
+
...baseParams,
|
| 449 |
+
telemetry: { enabled: true },
|
| 450 |
+
};
|
| 451 |
+
const config = new Config(paramsWithTelemetry);
|
| 452 |
+
expect(config.getTelemetryEnabled()).toBe(true);
|
| 453 |
+
});
|
| 454 |
+
|
| 455 |
+
it('Config constructor should set telemetry to false when provided as false', () => {
|
| 456 |
+
const paramsWithTelemetry: ConfigParameters = {
|
| 457 |
+
...baseParams,
|
| 458 |
+
telemetry: { enabled: false },
|
| 459 |
+
};
|
| 460 |
+
const config = new Config(paramsWithTelemetry);
|
| 461 |
+
expect(config.getTelemetryEnabled()).toBe(false);
|
| 462 |
+
});
|
| 463 |
+
|
| 464 |
+
it('Config constructor should default telemetry to default value if not provided', () => {
|
| 465 |
+
const paramsWithoutTelemetry: ConfigParameters = { ...baseParams };
|
| 466 |
+
delete paramsWithoutTelemetry.telemetry;
|
| 467 |
+
const config = new Config(paramsWithoutTelemetry);
|
| 468 |
+
expect(config.getTelemetryEnabled()).toBe(TELEMETRY_SETTINGS.enabled);
|
| 469 |
+
});
|
| 470 |
+
|
| 471 |
+
it('should have a getFileService method that returns FileDiscoveryService', () => {
|
| 472 |
+
const config = new Config(baseParams);
|
| 473 |
+
const fileService = config.getFileService();
|
| 474 |
+
expect(fileService).toBeDefined();
|
| 475 |
+
});
|
| 476 |
+
|
| 477 |
+
describe('Usage Statistics', () => {
|
| 478 |
+
it('defaults usage statistics to enabled if not specified', () => {
|
| 479 |
+
const config = new Config({
|
| 480 |
+
...baseParams,
|
| 481 |
+
usageStatisticsEnabled: undefined,
|
| 482 |
+
});
|
| 483 |
+
|
| 484 |
+
expect(config.getUsageStatisticsEnabled()).toBe(true);
|
| 485 |
+
});
|
| 486 |
+
|
| 487 |
+
it.each([{ enabled: true }, { enabled: false }])(
|
| 488 |
+
'sets usage statistics based on the provided value (enabled: $enabled)',
|
| 489 |
+
({ enabled }) => {
|
| 490 |
+
const config = new Config({
|
| 491 |
+
...baseParams,
|
| 492 |
+
usageStatisticsEnabled: enabled,
|
| 493 |
+
});
|
| 494 |
+
expect(config.getUsageStatisticsEnabled()).toBe(enabled);
|
| 495 |
+
},
|
| 496 |
+
);
|
| 497 |
+
});
|
| 498 |
+
|
| 499 |
+
describe('Telemetry Settings', () => {
|
| 500 |
+
it('should return default telemetry target if not provided', () => {
|
| 501 |
+
const params: ConfigParameters = {
|
| 502 |
+
...baseParams,
|
| 503 |
+
telemetry: { enabled: true },
|
| 504 |
+
};
|
| 505 |
+
const config = new Config(params);
|
| 506 |
+
expect(config.getTelemetryTarget()).toBe(DEFAULT_TELEMETRY_TARGET);
|
| 507 |
+
});
|
| 508 |
+
|
| 509 |
+
it('should return provided OTLP endpoint', () => {
|
| 510 |
+
const endpoint = 'http://custom.otel.collector:4317';
|
| 511 |
+
const params: ConfigParameters = {
|
| 512 |
+
...baseParams,
|
| 513 |
+
telemetry: { enabled: true, otlpEndpoint: endpoint },
|
| 514 |
+
};
|
| 515 |
+
const config = new Config(params);
|
| 516 |
+
expect(config.getTelemetryOtlpEndpoint()).toBe(endpoint);
|
| 517 |
+
});
|
| 518 |
+
|
| 519 |
+
it('should return default OTLP endpoint if not provided', () => {
|
| 520 |
+
const params: ConfigParameters = {
|
| 521 |
+
...baseParams,
|
| 522 |
+
telemetry: { enabled: true },
|
| 523 |
+
};
|
| 524 |
+
const config = new Config(params);
|
| 525 |
+
expect(config.getTelemetryOtlpEndpoint()).toBe(DEFAULT_OTLP_ENDPOINT);
|
| 526 |
+
});
|
| 527 |
+
|
| 528 |
+
it('should return provided logPrompts setting', () => {
|
| 529 |
+
const params: ConfigParameters = {
|
| 530 |
+
...baseParams,
|
| 531 |
+
telemetry: { enabled: true, logPrompts: false },
|
| 532 |
+
};
|
| 533 |
+
const config = new Config(params);
|
| 534 |
+
expect(config.getTelemetryLogPromptsEnabled()).toBe(false);
|
| 535 |
+
});
|
| 536 |
+
|
| 537 |
+
it('should return default logPrompts setting (true) if not provided', () => {
|
| 538 |
+
const params: ConfigParameters = {
|
| 539 |
+
...baseParams,
|
| 540 |
+
telemetry: { enabled: true },
|
| 541 |
+
};
|
| 542 |
+
const config = new Config(params);
|
| 543 |
+
expect(config.getTelemetryLogPromptsEnabled()).toBe(true);
|
| 544 |
+
});
|
| 545 |
+
|
| 546 |
+
it('should return default logPrompts setting (true) if telemetry object is not provided', () => {
|
| 547 |
+
const paramsWithoutTelemetry: ConfigParameters = { ...baseParams };
|
| 548 |
+
delete paramsWithoutTelemetry.telemetry;
|
| 549 |
+
const config = new Config(paramsWithoutTelemetry);
|
| 550 |
+
expect(config.getTelemetryLogPromptsEnabled()).toBe(true);
|
| 551 |
+
});
|
| 552 |
+
|
| 553 |
+
it('should return default telemetry target if telemetry object is not provided', () => {
|
| 554 |
+
const paramsWithoutTelemetry: ConfigParameters = { ...baseParams };
|
| 555 |
+
delete paramsWithoutTelemetry.telemetry;
|
| 556 |
+
const config = new Config(paramsWithoutTelemetry);
|
| 557 |
+
expect(config.getTelemetryTarget()).toBe(DEFAULT_TELEMETRY_TARGET);
|
| 558 |
+
});
|
| 559 |
+
|
| 560 |
+
it('should return default OTLP endpoint if telemetry object is not provided', () => {
|
| 561 |
+
const paramsWithoutTelemetry: ConfigParameters = { ...baseParams };
|
| 562 |
+
delete paramsWithoutTelemetry.telemetry;
|
| 563 |
+
const config = new Config(paramsWithoutTelemetry);
|
| 564 |
+
expect(config.getTelemetryOtlpEndpoint()).toBe(DEFAULT_OTLP_ENDPOINT);
|
| 565 |
+
});
|
| 566 |
+
|
| 567 |
+
it('should return provided OTLP protocol', () => {
|
| 568 |
+
const params: ConfigParameters = {
|
| 569 |
+
...baseParams,
|
| 570 |
+
telemetry: { enabled: true, otlpProtocol: 'http' },
|
| 571 |
+
};
|
| 572 |
+
const config = new Config(params);
|
| 573 |
+
expect(config.getTelemetryOtlpProtocol()).toBe('http');
|
| 574 |
+
});
|
| 575 |
+
|
| 576 |
+
it('should return default OTLP protocol if not provided', () => {
|
| 577 |
+
const params: ConfigParameters = {
|
| 578 |
+
...baseParams,
|
| 579 |
+
telemetry: { enabled: true },
|
| 580 |
+
};
|
| 581 |
+
const config = new Config(params);
|
| 582 |
+
expect(config.getTelemetryOtlpProtocol()).toBe('grpc');
|
| 583 |
+
});
|
| 584 |
+
|
| 585 |
+
it('should return default OTLP protocol if telemetry object is not provided', () => {
|
| 586 |
+
const paramsWithoutTelemetry: ConfigParameters = { ...baseParams };
|
| 587 |
+
delete paramsWithoutTelemetry.telemetry;
|
| 588 |
+
const config = new Config(paramsWithoutTelemetry);
|
| 589 |
+
expect(config.getTelemetryOtlpProtocol()).toBe('grpc');
|
| 590 |
+
});
|
| 591 |
+
});
|
| 592 |
+
});
|
projects/ui/qwen-code/packages/core/src/config/config.ts
ADDED
|
@@ -0,0 +1,919 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import * as path from 'node:path';
|
| 8 |
+
import process from 'node:process';
|
| 9 |
+
import {
|
| 10 |
+
AuthType,
|
| 11 |
+
ContentGeneratorConfig,
|
| 12 |
+
createContentGeneratorConfig,
|
| 13 |
+
} from '../core/contentGenerator.js';
|
| 14 |
+
import { PromptRegistry } from '../prompts/prompt-registry.js';
|
| 15 |
+
import { ToolRegistry } from '../tools/tool-registry.js';
|
| 16 |
+
import { LSTool } from '../tools/ls.js';
|
| 17 |
+
import { ReadFileTool } from '../tools/read-file.js';
|
| 18 |
+
import { GrepTool } from '../tools/grep.js';
|
| 19 |
+
import { GlobTool } from '../tools/glob.js';
|
| 20 |
+
import { EditTool } from '../tools/edit.js';
|
| 21 |
+
import { ShellTool } from '../tools/shell.js';
|
| 22 |
+
import { WriteFileTool } from '../tools/write-file.js';
|
| 23 |
+
import { WebFetchTool } from '../tools/web-fetch.js';
|
| 24 |
+
import { ReadManyFilesTool } from '../tools/read-many-files.js';
|
| 25 |
+
import {
|
| 26 |
+
MemoryTool,
|
| 27 |
+
setGeminiMdFilename,
|
| 28 |
+
GEMINI_CONFIG_DIR as GEMINI_DIR,
|
| 29 |
+
} from '../tools/memoryTool.js';
|
| 30 |
+
import { TodoWriteTool } from '../tools/todoWrite.js';
|
| 31 |
+
import { WebSearchTool } from '../tools/web-search.js';
|
| 32 |
+
import { GeminiClient } from '../core/client.js';
|
| 33 |
+
import { FileDiscoveryService } from '../services/fileDiscoveryService.js';
|
| 34 |
+
import { GitService } from '../services/gitService.js';
|
| 35 |
+
import { getProjectTempDir } from '../utils/paths.js';
|
| 36 |
+
import {
|
| 37 |
+
initializeTelemetry,
|
| 38 |
+
DEFAULT_TELEMETRY_TARGET,
|
| 39 |
+
DEFAULT_OTLP_ENDPOINT,
|
| 40 |
+
TelemetryTarget,
|
| 41 |
+
StartSessionEvent,
|
| 42 |
+
} from '../telemetry/index.js';
|
| 43 |
+
import {
|
| 44 |
+
DEFAULT_GEMINI_EMBEDDING_MODEL,
|
| 45 |
+
DEFAULT_GEMINI_FLASH_MODEL,
|
| 46 |
+
} from './models.js';
|
| 47 |
+
import { shouldAttemptBrowserLaunch } from '../utils/browser.js';
|
| 48 |
+
import { MCPOAuthConfig } from '../mcp/oauth-provider.js';
|
| 49 |
+
import { IdeClient } from '../ide/ide-client.js';
|
| 50 |
+
import type { Content } from '@google/genai';
|
| 51 |
+
import {
|
| 52 |
+
FileSystemService,
|
| 53 |
+
StandardFileSystemService,
|
| 54 |
+
} from '../services/fileSystemService.js';
|
| 55 |
+
import { logCliConfiguration, logIdeConnection } from '../telemetry/loggers.js';
|
| 56 |
+
import { IdeConnectionEvent, IdeConnectionType } from '../telemetry/types.js';
|
| 57 |
+
|
| 58 |
+
// Re-export OAuth config type
|
| 59 |
+
export type { MCPOAuthConfig };
|
| 60 |
+
import { WorkspaceContext } from '../utils/workspaceContext.js';
|
| 61 |
+
|
| 62 |
+
export enum ApprovalMode {
|
| 63 |
+
DEFAULT = 'default',
|
| 64 |
+
AUTO_EDIT = 'autoEdit',
|
| 65 |
+
YOLO = 'yolo',
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
export interface AccessibilitySettings {
|
| 69 |
+
disableLoadingPhrases?: boolean;
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
export interface BugCommandSettings {
|
| 73 |
+
urlTemplate: string;
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
export interface ChatCompressionSettings {
|
| 77 |
+
contextPercentageThreshold?: number;
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
export interface SummarizeToolOutputSettings {
|
| 81 |
+
tokenBudget?: number;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
export interface TelemetrySettings {
|
| 85 |
+
enabled?: boolean;
|
| 86 |
+
target?: TelemetryTarget;
|
| 87 |
+
otlpEndpoint?: string;
|
| 88 |
+
otlpProtocol?: 'grpc' | 'http';
|
| 89 |
+
logPrompts?: boolean;
|
| 90 |
+
outfile?: string;
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
export interface GitCoAuthorSettings {
|
| 94 |
+
enabled?: boolean;
|
| 95 |
+
name?: string;
|
| 96 |
+
email?: string;
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
export interface GeminiCLIExtension {
|
| 100 |
+
name: string;
|
| 101 |
+
version: string;
|
| 102 |
+
isActive: boolean;
|
| 103 |
+
path: string;
|
| 104 |
+
}
|
| 105 |
+
export interface FileFilteringOptions {
|
| 106 |
+
respectGitIgnore: boolean;
|
| 107 |
+
respectGeminiIgnore: boolean;
|
| 108 |
+
}
|
| 109 |
+
// For memory files
|
| 110 |
+
export const DEFAULT_MEMORY_FILE_FILTERING_OPTIONS: FileFilteringOptions = {
|
| 111 |
+
respectGitIgnore: false,
|
| 112 |
+
respectGeminiIgnore: true,
|
| 113 |
+
};
|
| 114 |
+
// For all other files
|
| 115 |
+
export const DEFAULT_FILE_FILTERING_OPTIONS: FileFilteringOptions = {
|
| 116 |
+
respectGitIgnore: true,
|
| 117 |
+
respectGeminiIgnore: true,
|
| 118 |
+
};
|
| 119 |
+
export class MCPServerConfig {
|
| 120 |
+
constructor(
|
| 121 |
+
// For stdio transport
|
| 122 |
+
readonly command?: string,
|
| 123 |
+
readonly args?: string[],
|
| 124 |
+
readonly env?: Record<string, string>,
|
| 125 |
+
readonly cwd?: string,
|
| 126 |
+
// For sse transport
|
| 127 |
+
readonly url?: string,
|
| 128 |
+
// For streamable http transport
|
| 129 |
+
readonly httpUrl?: string,
|
| 130 |
+
readonly headers?: Record<string, string>,
|
| 131 |
+
// For websocket transport
|
| 132 |
+
readonly tcp?: string,
|
| 133 |
+
// Common
|
| 134 |
+
readonly timeout?: number,
|
| 135 |
+
readonly trust?: boolean,
|
| 136 |
+
// Metadata
|
| 137 |
+
readonly description?: string,
|
| 138 |
+
readonly includeTools?: string[],
|
| 139 |
+
readonly excludeTools?: string[],
|
| 140 |
+
readonly extensionName?: string,
|
| 141 |
+
// OAuth configuration
|
| 142 |
+
readonly oauth?: MCPOAuthConfig,
|
| 143 |
+
readonly authProviderType?: AuthProviderType,
|
| 144 |
+
) {}
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
export enum AuthProviderType {
|
| 148 |
+
DYNAMIC_DISCOVERY = 'dynamic_discovery',
|
| 149 |
+
GOOGLE_CREDENTIALS = 'google_credentials',
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
export interface SandboxConfig {
|
| 153 |
+
command: 'docker' | 'podman' | 'sandbox-exec';
|
| 154 |
+
image: string;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
export type FlashFallbackHandler = (
|
| 158 |
+
currentModel: string,
|
| 159 |
+
fallbackModel: string,
|
| 160 |
+
error?: unknown,
|
| 161 |
+
) => Promise<boolean | string | null>;
|
| 162 |
+
|
| 163 |
+
export interface ConfigParameters {
|
| 164 |
+
sessionId: string;
|
| 165 |
+
embeddingModel?: string;
|
| 166 |
+
sandbox?: SandboxConfig;
|
| 167 |
+
targetDir: string;
|
| 168 |
+
debugMode: boolean;
|
| 169 |
+
question?: string;
|
| 170 |
+
fullContext?: boolean;
|
| 171 |
+
coreTools?: string[];
|
| 172 |
+
excludeTools?: string[];
|
| 173 |
+
toolDiscoveryCommand?: string;
|
| 174 |
+
toolCallCommand?: string;
|
| 175 |
+
mcpServerCommand?: string;
|
| 176 |
+
mcpServers?: Record<string, MCPServerConfig>;
|
| 177 |
+
userMemory?: string;
|
| 178 |
+
geminiMdFileCount?: number;
|
| 179 |
+
approvalMode?: ApprovalMode;
|
| 180 |
+
showMemoryUsage?: boolean;
|
| 181 |
+
contextFileName?: string | string[];
|
| 182 |
+
accessibility?: AccessibilitySettings;
|
| 183 |
+
telemetry?: TelemetrySettings;
|
| 184 |
+
gitCoAuthor?: GitCoAuthorSettings;
|
| 185 |
+
usageStatisticsEnabled?: boolean;
|
| 186 |
+
fileFiltering?: {
|
| 187 |
+
respectGitIgnore?: boolean;
|
| 188 |
+
respectGeminiIgnore?: boolean;
|
| 189 |
+
enableRecursiveFileSearch?: boolean;
|
| 190 |
+
};
|
| 191 |
+
checkpointing?: boolean;
|
| 192 |
+
proxy?: string;
|
| 193 |
+
cwd: string;
|
| 194 |
+
fileDiscoveryService?: FileDiscoveryService;
|
| 195 |
+
includeDirectories?: string[];
|
| 196 |
+
bugCommand?: BugCommandSettings;
|
| 197 |
+
model: string;
|
| 198 |
+
extensionContextFilePaths?: string[];
|
| 199 |
+
maxSessionTurns?: number;
|
| 200 |
+
sessionTokenLimit?: number;
|
| 201 |
+
experimentalZedIntegration?: boolean;
|
| 202 |
+
listExtensions?: boolean;
|
| 203 |
+
extensions?: GeminiCLIExtension[];
|
| 204 |
+
blockedMcpServers?: Array<{ name: string; extensionName: string }>;
|
| 205 |
+
noBrowser?: boolean;
|
| 206 |
+
summarizeToolOutput?: Record<string, SummarizeToolOutputSettings>;
|
| 207 |
+
folderTrustFeature?: boolean;
|
| 208 |
+
folderTrust?: boolean;
|
| 209 |
+
ideMode?: boolean;
|
| 210 |
+
enableOpenAILogging?: boolean;
|
| 211 |
+
systemPromptMappings?: Array<{
|
| 212 |
+
baseUrls: string[];
|
| 213 |
+
modelNames: string[];
|
| 214 |
+
template: string;
|
| 215 |
+
}>;
|
| 216 |
+
authType?: AuthType;
|
| 217 |
+
contentGenerator?: {
|
| 218 |
+
timeout?: number;
|
| 219 |
+
maxRetries?: number;
|
| 220 |
+
samplingParams?: {
|
| 221 |
+
[key: string]: unknown;
|
| 222 |
+
};
|
| 223 |
+
};
|
| 224 |
+
cliVersion?: string;
|
| 225 |
+
loadMemoryFromIncludeDirectories?: boolean;
|
| 226 |
+
// Web search providers
|
| 227 |
+
tavilyApiKey?: string;
|
| 228 |
+
chatCompression?: ChatCompressionSettings;
|
| 229 |
+
interactive?: boolean;
|
| 230 |
+
trustedFolder?: boolean;
|
| 231 |
+
shouldUseNodePtyShell?: boolean;
|
| 232 |
+
skipNextSpeakerCheck?: boolean;
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
export class Config {
|
| 236 |
+
private toolRegistry!: ToolRegistry;
|
| 237 |
+
private promptRegistry!: PromptRegistry;
|
| 238 |
+
private sessionId: string;
|
| 239 |
+
private fileSystemService: FileSystemService;
|
| 240 |
+
private contentGeneratorConfig!: ContentGeneratorConfig;
|
| 241 |
+
private readonly embeddingModel: string;
|
| 242 |
+
private readonly sandbox: SandboxConfig | undefined;
|
| 243 |
+
private readonly targetDir: string;
|
| 244 |
+
private workspaceContext: WorkspaceContext;
|
| 245 |
+
private readonly debugMode: boolean;
|
| 246 |
+
private readonly question: string | undefined;
|
| 247 |
+
private readonly fullContext: boolean;
|
| 248 |
+
private readonly coreTools: string[] | undefined;
|
| 249 |
+
private readonly excludeTools: string[] | undefined;
|
| 250 |
+
private readonly toolDiscoveryCommand: string | undefined;
|
| 251 |
+
private readonly toolCallCommand: string | undefined;
|
| 252 |
+
private readonly mcpServerCommand: string | undefined;
|
| 253 |
+
private readonly mcpServers: Record<string, MCPServerConfig> | undefined;
|
| 254 |
+
private userMemory: string;
|
| 255 |
+
private geminiMdFileCount: number;
|
| 256 |
+
private approvalMode: ApprovalMode;
|
| 257 |
+
private readonly showMemoryUsage: boolean;
|
| 258 |
+
private readonly accessibility: AccessibilitySettings;
|
| 259 |
+
private readonly telemetrySettings: TelemetrySettings;
|
| 260 |
+
private readonly gitCoAuthor: GitCoAuthorSettings;
|
| 261 |
+
private readonly usageStatisticsEnabled: boolean;
|
| 262 |
+
private geminiClient!: GeminiClient;
|
| 263 |
+
private readonly fileFiltering: {
|
| 264 |
+
respectGitIgnore: boolean;
|
| 265 |
+
respectGeminiIgnore: boolean;
|
| 266 |
+
enableRecursiveFileSearch: boolean;
|
| 267 |
+
};
|
| 268 |
+
private fileDiscoveryService: FileDiscoveryService | null = null;
|
| 269 |
+
private gitService: GitService | undefined = undefined;
|
| 270 |
+
private readonly checkpointing: boolean;
|
| 271 |
+
private readonly proxy: string | undefined;
|
| 272 |
+
private readonly cwd: string;
|
| 273 |
+
private readonly bugCommand: BugCommandSettings | undefined;
|
| 274 |
+
private readonly model: string;
|
| 275 |
+
private readonly extensionContextFilePaths: string[];
|
| 276 |
+
private readonly noBrowser: boolean;
|
| 277 |
+
private readonly folderTrustFeature: boolean;
|
| 278 |
+
private readonly folderTrust: boolean;
|
| 279 |
+
private ideMode: boolean;
|
| 280 |
+
private ideClient: IdeClient;
|
| 281 |
+
private inFallbackMode = false;
|
| 282 |
+
private readonly systemPromptMappings?: Array<{
|
| 283 |
+
baseUrls?: string[];
|
| 284 |
+
modelNames?: string[];
|
| 285 |
+
template?: string;
|
| 286 |
+
}>;
|
| 287 |
+
private readonly maxSessionTurns: number;
|
| 288 |
+
private readonly sessionTokenLimit: number;
|
| 289 |
+
private readonly listExtensions: boolean;
|
| 290 |
+
private readonly _extensions: GeminiCLIExtension[];
|
| 291 |
+
private readonly _blockedMcpServers: Array<{
|
| 292 |
+
name: string;
|
| 293 |
+
extensionName: string;
|
| 294 |
+
}>;
|
| 295 |
+
flashFallbackHandler?: FlashFallbackHandler;
|
| 296 |
+
private quotaErrorOccurred: boolean = false;
|
| 297 |
+
private readonly summarizeToolOutput:
|
| 298 |
+
| Record<string, SummarizeToolOutputSettings>
|
| 299 |
+
| undefined;
|
| 300 |
+
private authType?: AuthType;
|
| 301 |
+
private readonly enableOpenAILogging: boolean;
|
| 302 |
+
private readonly contentGenerator?: {
|
| 303 |
+
timeout?: number;
|
| 304 |
+
maxRetries?: number;
|
| 305 |
+
samplingParams?: Record<string, unknown>;
|
| 306 |
+
};
|
| 307 |
+
private readonly cliVersion?: string;
|
| 308 |
+
private readonly experimentalZedIntegration: boolean = false;
|
| 309 |
+
private readonly loadMemoryFromIncludeDirectories: boolean = false;
|
| 310 |
+
private readonly tavilyApiKey?: string;
|
| 311 |
+
private readonly chatCompression: ChatCompressionSettings | undefined;
|
| 312 |
+
private readonly interactive: boolean;
|
| 313 |
+
private readonly trustedFolder: boolean | undefined;
|
| 314 |
+
private readonly shouldUseNodePtyShell: boolean;
|
| 315 |
+
private readonly skipNextSpeakerCheck: boolean;
|
| 316 |
+
private initialized: boolean = false;
|
| 317 |
+
|
| 318 |
+
constructor(params: ConfigParameters) {
|
| 319 |
+
this.sessionId = params.sessionId;
|
| 320 |
+
this.embeddingModel =
|
| 321 |
+
params.embeddingModel ?? DEFAULT_GEMINI_EMBEDDING_MODEL;
|
| 322 |
+
this.fileSystemService = new StandardFileSystemService();
|
| 323 |
+
this.sandbox = params.sandbox;
|
| 324 |
+
this.targetDir = path.resolve(params.targetDir);
|
| 325 |
+
this.workspaceContext = new WorkspaceContext(
|
| 326 |
+
this.targetDir,
|
| 327 |
+
params.includeDirectories ?? [],
|
| 328 |
+
);
|
| 329 |
+
this.debugMode = params.debugMode;
|
| 330 |
+
this.question = params.question;
|
| 331 |
+
this.fullContext = params.fullContext ?? false;
|
| 332 |
+
this.coreTools = params.coreTools;
|
| 333 |
+
this.excludeTools = params.excludeTools;
|
| 334 |
+
this.toolDiscoveryCommand = params.toolDiscoveryCommand;
|
| 335 |
+
this.toolCallCommand = params.toolCallCommand;
|
| 336 |
+
this.mcpServerCommand = params.mcpServerCommand;
|
| 337 |
+
this.mcpServers = params.mcpServers;
|
| 338 |
+
this.userMemory = params.userMemory ?? '';
|
| 339 |
+
this.geminiMdFileCount = params.geminiMdFileCount ?? 0;
|
| 340 |
+
this.approvalMode = params.approvalMode ?? ApprovalMode.DEFAULT;
|
| 341 |
+
this.showMemoryUsage = params.showMemoryUsage ?? false;
|
| 342 |
+
this.accessibility = params.accessibility ?? {};
|
| 343 |
+
this.telemetrySettings = {
|
| 344 |
+
enabled: params.telemetry?.enabled ?? false,
|
| 345 |
+
target: params.telemetry?.target ?? DEFAULT_TELEMETRY_TARGET,
|
| 346 |
+
otlpEndpoint: params.telemetry?.otlpEndpoint ?? DEFAULT_OTLP_ENDPOINT,
|
| 347 |
+
otlpProtocol: params.telemetry?.otlpProtocol,
|
| 348 |
+
logPrompts: params.telemetry?.logPrompts ?? true,
|
| 349 |
+
outfile: params.telemetry?.outfile,
|
| 350 |
+
};
|
| 351 |
+
this.gitCoAuthor = {
|
| 352 |
+
enabled: params.gitCoAuthor?.enabled ?? true,
|
| 353 |
+
name: params.gitCoAuthor?.name ?? 'Qwen-Coder',
|
| 354 |
+
email: params.gitCoAuthor?.email ?? 'qwen-coder@alibabacloud.com',
|
| 355 |
+
};
|
| 356 |
+
this.usageStatisticsEnabled = params.usageStatisticsEnabled ?? true;
|
| 357 |
+
|
| 358 |
+
this.fileFiltering = {
|
| 359 |
+
respectGitIgnore: params.fileFiltering?.respectGitIgnore ?? true,
|
| 360 |
+
respectGeminiIgnore: params.fileFiltering?.respectGeminiIgnore ?? true,
|
| 361 |
+
enableRecursiveFileSearch:
|
| 362 |
+
params.fileFiltering?.enableRecursiveFileSearch ?? true,
|
| 363 |
+
};
|
| 364 |
+
this.checkpointing = params.checkpointing ?? false;
|
| 365 |
+
this.proxy = params.proxy;
|
| 366 |
+
this.cwd = params.cwd ?? process.cwd();
|
| 367 |
+
this.fileDiscoveryService = params.fileDiscoveryService ?? null;
|
| 368 |
+
this.bugCommand = params.bugCommand;
|
| 369 |
+
this.model = params.model;
|
| 370 |
+
this.extensionContextFilePaths = params.extensionContextFilePaths ?? [];
|
| 371 |
+
this.maxSessionTurns = params.maxSessionTurns ?? -1;
|
| 372 |
+
this.sessionTokenLimit = params.sessionTokenLimit ?? -1;
|
| 373 |
+
this.experimentalZedIntegration =
|
| 374 |
+
params.experimentalZedIntegration ?? false;
|
| 375 |
+
this.listExtensions = params.listExtensions ?? false;
|
| 376 |
+
this._extensions = params.extensions ?? [];
|
| 377 |
+
this._blockedMcpServers = params.blockedMcpServers ?? [];
|
| 378 |
+
this.noBrowser = params.noBrowser ?? false;
|
| 379 |
+
this.summarizeToolOutput = params.summarizeToolOutput;
|
| 380 |
+
this.folderTrustFeature = params.folderTrustFeature ?? false;
|
| 381 |
+
this.folderTrust = params.folderTrust ?? false;
|
| 382 |
+
this.ideMode = params.ideMode ?? false;
|
| 383 |
+
this.ideClient = IdeClient.getInstance();
|
| 384 |
+
this.systemPromptMappings = params.systemPromptMappings;
|
| 385 |
+
this.authType = params.authType;
|
| 386 |
+
this.enableOpenAILogging = params.enableOpenAILogging ?? false;
|
| 387 |
+
this.contentGenerator = params.contentGenerator;
|
| 388 |
+
this.cliVersion = params.cliVersion;
|
| 389 |
+
|
| 390 |
+
this.loadMemoryFromIncludeDirectories =
|
| 391 |
+
params.loadMemoryFromIncludeDirectories ?? false;
|
| 392 |
+
this.chatCompression = params.chatCompression;
|
| 393 |
+
this.interactive = params.interactive ?? false;
|
| 394 |
+
this.trustedFolder = params.trustedFolder;
|
| 395 |
+
this.shouldUseNodePtyShell = params.shouldUseNodePtyShell ?? false;
|
| 396 |
+
this.skipNextSpeakerCheck = params.skipNextSpeakerCheck ?? false;
|
| 397 |
+
|
| 398 |
+
// Web search
|
| 399 |
+
this.tavilyApiKey = params.tavilyApiKey;
|
| 400 |
+
|
| 401 |
+
if (params.contextFileName) {
|
| 402 |
+
setGeminiMdFilename(params.contextFileName);
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
if (this.telemetrySettings.enabled) {
|
| 406 |
+
initializeTelemetry(this);
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
logCliConfiguration(this, new StartSessionEvent(this));
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
/**
|
| 413 |
+
* Must only be called once, throws if called again.
|
| 414 |
+
*/
|
| 415 |
+
async initialize(): Promise<void> {
|
| 416 |
+
if (this.initialized) {
|
| 417 |
+
throw Error('Config was already initialized');
|
| 418 |
+
}
|
| 419 |
+
this.initialized = true;
|
| 420 |
+
// Initialize centralized FileDiscoveryService
|
| 421 |
+
this.getFileService();
|
| 422 |
+
if (this.getCheckpointingEnabled()) {
|
| 423 |
+
await this.getGitService();
|
| 424 |
+
}
|
| 425 |
+
this.promptRegistry = new PromptRegistry();
|
| 426 |
+
this.toolRegistry = await this.createToolRegistry();
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
async refreshAuth(authMethod: AuthType) {
|
| 430 |
+
// Save the current conversation history before creating a new client
|
| 431 |
+
let existingHistory: Content[] = [];
|
| 432 |
+
if (this.geminiClient && this.geminiClient.isInitialized()) {
|
| 433 |
+
existingHistory = this.geminiClient.getHistory();
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
// Create new content generator config
|
| 437 |
+
const newContentGeneratorConfig = createContentGeneratorConfig(
|
| 438 |
+
this,
|
| 439 |
+
authMethod,
|
| 440 |
+
);
|
| 441 |
+
|
| 442 |
+
// Create and initialize new client in local variable first
|
| 443 |
+
const newGeminiClient = new GeminiClient(this);
|
| 444 |
+
await newGeminiClient.initialize(newContentGeneratorConfig);
|
| 445 |
+
|
| 446 |
+
// Vertex and Genai have incompatible encryption and sending history with
|
| 447 |
+
// throughtSignature from Genai to Vertex will fail, we need to strip them
|
| 448 |
+
const fromGenaiToVertex =
|
| 449 |
+
this.contentGeneratorConfig?.authType === AuthType.USE_GEMINI &&
|
| 450 |
+
authMethod === AuthType.LOGIN_WITH_GOOGLE;
|
| 451 |
+
|
| 452 |
+
// Only assign to instance properties after successful initialization
|
| 453 |
+
this.contentGeneratorConfig = newContentGeneratorConfig;
|
| 454 |
+
this.geminiClient = newGeminiClient;
|
| 455 |
+
|
| 456 |
+
// Restore the conversation history to the new client
|
| 457 |
+
if (existingHistory.length > 0) {
|
| 458 |
+
this.geminiClient.setHistory(existingHistory, {
|
| 459 |
+
stripThoughts: fromGenaiToVertex,
|
| 460 |
+
});
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
// Reset the session flag since we're explicitly changing auth and using default model
|
| 464 |
+
this.inFallbackMode = false;
|
| 465 |
+
|
| 466 |
+
this.authType = authMethod;
|
| 467 |
+
}
|
| 468 |
+
|
| 469 |
+
getSessionId(): string {
|
| 470 |
+
return this.sessionId;
|
| 471 |
+
}
|
| 472 |
+
|
| 473 |
+
setSessionId(sessionId: string): void {
|
| 474 |
+
this.sessionId = sessionId;
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
shouldLoadMemoryFromIncludeDirectories(): boolean {
|
| 478 |
+
return this.loadMemoryFromIncludeDirectories;
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
getContentGeneratorConfig(): ContentGeneratorConfig {
|
| 482 |
+
return this.contentGeneratorConfig;
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
getModel(): string {
|
| 486 |
+
return this.contentGeneratorConfig?.model || this.model;
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
setModel(newModel: string): void {
|
| 490 |
+
if (this.contentGeneratorConfig) {
|
| 491 |
+
this.contentGeneratorConfig.model = newModel;
|
| 492 |
+
}
|
| 493 |
+
}
|
| 494 |
+
|
| 495 |
+
isInFallbackMode(): boolean {
|
| 496 |
+
return this.inFallbackMode;
|
| 497 |
+
}
|
| 498 |
+
|
| 499 |
+
setFallbackMode(active: boolean): void {
|
| 500 |
+
this.inFallbackMode = active;
|
| 501 |
+
}
|
| 502 |
+
|
| 503 |
+
setFlashFallbackHandler(handler: FlashFallbackHandler): void {
|
| 504 |
+
this.flashFallbackHandler = handler;
|
| 505 |
+
}
|
| 506 |
+
|
| 507 |
+
getMaxSessionTurns(): number {
|
| 508 |
+
return this.maxSessionTurns;
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
getSessionTokenLimit(): number {
|
| 512 |
+
return this.sessionTokenLimit;
|
| 513 |
+
}
|
| 514 |
+
|
| 515 |
+
setQuotaErrorOccurred(value: boolean): void {
|
| 516 |
+
this.quotaErrorOccurred = value;
|
| 517 |
+
}
|
| 518 |
+
|
| 519 |
+
getQuotaErrorOccurred(): boolean {
|
| 520 |
+
return this.quotaErrorOccurred;
|
| 521 |
+
}
|
| 522 |
+
|
| 523 |
+
getEmbeddingModel(): string {
|
| 524 |
+
return this.embeddingModel;
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
getSandbox(): SandboxConfig | undefined {
|
| 528 |
+
return this.sandbox;
|
| 529 |
+
}
|
| 530 |
+
|
| 531 |
+
isRestrictiveSandbox(): boolean {
|
| 532 |
+
const sandboxConfig = this.getSandbox();
|
| 533 |
+
const seatbeltProfile = process.env['SEATBELT_PROFILE'];
|
| 534 |
+
return (
|
| 535 |
+
!!sandboxConfig &&
|
| 536 |
+
sandboxConfig.command === 'sandbox-exec' &&
|
| 537 |
+
!!seatbeltProfile &&
|
| 538 |
+
seatbeltProfile.startsWith('restrictive-')
|
| 539 |
+
);
|
| 540 |
+
}
|
| 541 |
+
|
| 542 |
+
getTargetDir(): string {
|
| 543 |
+
return this.targetDir;
|
| 544 |
+
}
|
| 545 |
+
|
| 546 |
+
getProjectRoot(): string {
|
| 547 |
+
return this.targetDir;
|
| 548 |
+
}
|
| 549 |
+
|
| 550 |
+
getWorkspaceContext(): WorkspaceContext {
|
| 551 |
+
return this.workspaceContext;
|
| 552 |
+
}
|
| 553 |
+
|
| 554 |
+
getToolRegistry(): ToolRegistry {
|
| 555 |
+
return this.toolRegistry;
|
| 556 |
+
}
|
| 557 |
+
|
| 558 |
+
getPromptRegistry(): PromptRegistry {
|
| 559 |
+
return this.promptRegistry;
|
| 560 |
+
}
|
| 561 |
+
|
| 562 |
+
getDebugMode(): boolean {
|
| 563 |
+
return this.debugMode;
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
getQuestion(): string | undefined {
|
| 567 |
+
return this.question;
|
| 568 |
+
}
|
| 569 |
+
|
| 570 |
+
getFullContext(): boolean {
|
| 571 |
+
return this.fullContext;
|
| 572 |
+
}
|
| 573 |
+
|
| 574 |
+
getCoreTools(): string[] | undefined {
|
| 575 |
+
return this.coreTools;
|
| 576 |
+
}
|
| 577 |
+
|
| 578 |
+
getExcludeTools(): string[] | undefined {
|
| 579 |
+
return this.excludeTools;
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
getToolDiscoveryCommand(): string | undefined {
|
| 583 |
+
return this.toolDiscoveryCommand;
|
| 584 |
+
}
|
| 585 |
+
|
| 586 |
+
getToolCallCommand(): string | undefined {
|
| 587 |
+
return this.toolCallCommand;
|
| 588 |
+
}
|
| 589 |
+
|
| 590 |
+
getMcpServerCommand(): string | undefined {
|
| 591 |
+
return this.mcpServerCommand;
|
| 592 |
+
}
|
| 593 |
+
|
| 594 |
+
getMcpServers(): Record<string, MCPServerConfig> | undefined {
|
| 595 |
+
return this.mcpServers;
|
| 596 |
+
}
|
| 597 |
+
|
| 598 |
+
getUserMemory(): string {
|
| 599 |
+
return this.userMemory;
|
| 600 |
+
}
|
| 601 |
+
|
| 602 |
+
setUserMemory(newUserMemory: string): void {
|
| 603 |
+
this.userMemory = newUserMemory;
|
| 604 |
+
}
|
| 605 |
+
|
| 606 |
+
getGeminiMdFileCount(): number {
|
| 607 |
+
return this.geminiMdFileCount;
|
| 608 |
+
}
|
| 609 |
+
|
| 610 |
+
setGeminiMdFileCount(count: number): void {
|
| 611 |
+
this.geminiMdFileCount = count;
|
| 612 |
+
}
|
| 613 |
+
|
| 614 |
+
getApprovalMode(): ApprovalMode {
|
| 615 |
+
return this.approvalMode;
|
| 616 |
+
}
|
| 617 |
+
|
| 618 |
+
setApprovalMode(mode: ApprovalMode): void {
|
| 619 |
+
this.approvalMode = mode;
|
| 620 |
+
}
|
| 621 |
+
|
| 622 |
+
getShowMemoryUsage(): boolean {
|
| 623 |
+
return this.showMemoryUsage;
|
| 624 |
+
}
|
| 625 |
+
|
| 626 |
+
getAccessibility(): AccessibilitySettings {
|
| 627 |
+
return this.accessibility;
|
| 628 |
+
}
|
| 629 |
+
|
| 630 |
+
getTelemetryEnabled(): boolean {
|
| 631 |
+
return this.telemetrySettings.enabled ?? false;
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
getTelemetryLogPromptsEnabled(): boolean {
|
| 635 |
+
return this.telemetrySettings.logPrompts ?? true;
|
| 636 |
+
}
|
| 637 |
+
|
| 638 |
+
getTelemetryOtlpEndpoint(): string {
|
| 639 |
+
return this.telemetrySettings.otlpEndpoint ?? DEFAULT_OTLP_ENDPOINT;
|
| 640 |
+
}
|
| 641 |
+
|
| 642 |
+
getTelemetryOtlpProtocol(): 'grpc' | 'http' {
|
| 643 |
+
return this.telemetrySettings.otlpProtocol ?? 'grpc';
|
| 644 |
+
}
|
| 645 |
+
|
| 646 |
+
getTelemetryTarget(): TelemetryTarget {
|
| 647 |
+
return this.telemetrySettings.target ?? DEFAULT_TELEMETRY_TARGET;
|
| 648 |
+
}
|
| 649 |
+
|
| 650 |
+
getTelemetryOutfile(): string | undefined {
|
| 651 |
+
return this.telemetrySettings.outfile;
|
| 652 |
+
}
|
| 653 |
+
|
| 654 |
+
getGitCoAuthor(): GitCoAuthorSettings {
|
| 655 |
+
return this.gitCoAuthor;
|
| 656 |
+
}
|
| 657 |
+
|
| 658 |
+
getGeminiClient(): GeminiClient {
|
| 659 |
+
return this.geminiClient;
|
| 660 |
+
}
|
| 661 |
+
|
| 662 |
+
getGeminiDir(): string {
|
| 663 |
+
return path.join(this.targetDir, GEMINI_DIR);
|
| 664 |
+
}
|
| 665 |
+
|
| 666 |
+
getProjectTempDir(): string {
|
| 667 |
+
return getProjectTempDir(this.getProjectRoot());
|
| 668 |
+
}
|
| 669 |
+
|
| 670 |
+
getEnableRecursiveFileSearch(): boolean {
|
| 671 |
+
return this.fileFiltering.enableRecursiveFileSearch;
|
| 672 |
+
}
|
| 673 |
+
|
| 674 |
+
getFileFilteringRespectGitIgnore(): boolean {
|
| 675 |
+
return this.fileFiltering.respectGitIgnore;
|
| 676 |
+
}
|
| 677 |
+
getFileFilteringRespectGeminiIgnore(): boolean {
|
| 678 |
+
return this.fileFiltering.respectGeminiIgnore;
|
| 679 |
+
}
|
| 680 |
+
|
| 681 |
+
getFileFilteringOptions(): FileFilteringOptions {
|
| 682 |
+
return {
|
| 683 |
+
respectGitIgnore: this.fileFiltering.respectGitIgnore,
|
| 684 |
+
respectGeminiIgnore: this.fileFiltering.respectGeminiIgnore,
|
| 685 |
+
};
|
| 686 |
+
}
|
| 687 |
+
|
| 688 |
+
getCheckpointingEnabled(): boolean {
|
| 689 |
+
return this.checkpointing;
|
| 690 |
+
}
|
| 691 |
+
|
| 692 |
+
getProxy(): string | undefined {
|
| 693 |
+
return this.proxy;
|
| 694 |
+
}
|
| 695 |
+
|
| 696 |
+
getWorkingDir(): string {
|
| 697 |
+
return this.cwd;
|
| 698 |
+
}
|
| 699 |
+
|
| 700 |
+
getBugCommand(): BugCommandSettings | undefined {
|
| 701 |
+
return this.bugCommand;
|
| 702 |
+
}
|
| 703 |
+
|
| 704 |
+
getFileService(): FileDiscoveryService {
|
| 705 |
+
if (!this.fileDiscoveryService) {
|
| 706 |
+
this.fileDiscoveryService = new FileDiscoveryService(this.targetDir);
|
| 707 |
+
}
|
| 708 |
+
return this.fileDiscoveryService;
|
| 709 |
+
}
|
| 710 |
+
|
| 711 |
+
getUsageStatisticsEnabled(): boolean {
|
| 712 |
+
return this.usageStatisticsEnabled;
|
| 713 |
+
}
|
| 714 |
+
|
| 715 |
+
getExtensionContextFilePaths(): string[] {
|
| 716 |
+
return this.extensionContextFilePaths;
|
| 717 |
+
}
|
| 718 |
+
|
| 719 |
+
getExperimentalZedIntegration(): boolean {
|
| 720 |
+
return this.experimentalZedIntegration;
|
| 721 |
+
}
|
| 722 |
+
|
| 723 |
+
getListExtensions(): boolean {
|
| 724 |
+
return this.listExtensions;
|
| 725 |
+
}
|
| 726 |
+
|
| 727 |
+
getExtensions(): GeminiCLIExtension[] {
|
| 728 |
+
return this._extensions;
|
| 729 |
+
}
|
| 730 |
+
|
| 731 |
+
getBlockedMcpServers(): Array<{ name: string; extensionName: string }> {
|
| 732 |
+
return this._blockedMcpServers;
|
| 733 |
+
}
|
| 734 |
+
|
| 735 |
+
getNoBrowser(): boolean {
|
| 736 |
+
return this.noBrowser;
|
| 737 |
+
}
|
| 738 |
+
|
| 739 |
+
isBrowserLaunchSuppressed(): boolean {
|
| 740 |
+
return this.getNoBrowser() || !shouldAttemptBrowserLaunch();
|
| 741 |
+
}
|
| 742 |
+
|
| 743 |
+
getSummarizeToolOutputConfig():
|
| 744 |
+
| Record<string, SummarizeToolOutputSettings>
|
| 745 |
+
| undefined {
|
| 746 |
+
return this.summarizeToolOutput;
|
| 747 |
+
}
|
| 748 |
+
|
| 749 |
+
// Web search provider configuration
|
| 750 |
+
getTavilyApiKey(): string | undefined {
|
| 751 |
+
return this.tavilyApiKey;
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
getIdeClient(): IdeClient {
|
| 755 |
+
return this.ideClient;
|
| 756 |
+
}
|
| 757 |
+
|
| 758 |
+
getIdeMode(): boolean {
|
| 759 |
+
return this.ideMode;
|
| 760 |
+
}
|
| 761 |
+
|
| 762 |
+
getFolderTrustFeature(): boolean {
|
| 763 |
+
return this.folderTrustFeature;
|
| 764 |
+
}
|
| 765 |
+
|
| 766 |
+
getFolderTrust(): boolean {
|
| 767 |
+
return this.folderTrust;
|
| 768 |
+
}
|
| 769 |
+
|
| 770 |
+
isTrustedFolder(): boolean | undefined {
|
| 771 |
+
return this.trustedFolder;
|
| 772 |
+
}
|
| 773 |
+
|
| 774 |
+
setIdeMode(value: boolean): void {
|
| 775 |
+
this.ideMode = value;
|
| 776 |
+
}
|
| 777 |
+
|
| 778 |
+
async setIdeModeAndSyncConnection(value: boolean): Promise<void> {
|
| 779 |
+
this.ideMode = value;
|
| 780 |
+
if (value) {
|
| 781 |
+
await this.ideClient.connect();
|
| 782 |
+
logIdeConnection(this, new IdeConnectionEvent(IdeConnectionType.SESSION));
|
| 783 |
+
} else {
|
| 784 |
+
await this.ideClient.disconnect();
|
| 785 |
+
}
|
| 786 |
+
}
|
| 787 |
+
|
| 788 |
+
getAuthType(): AuthType | undefined {
|
| 789 |
+
return this.authType;
|
| 790 |
+
}
|
| 791 |
+
|
| 792 |
+
getEnableOpenAILogging(): boolean {
|
| 793 |
+
return this.enableOpenAILogging;
|
| 794 |
+
}
|
| 795 |
+
|
| 796 |
+
getContentGeneratorTimeout(): number | undefined {
|
| 797 |
+
return this.contentGenerator?.timeout;
|
| 798 |
+
}
|
| 799 |
+
|
| 800 |
+
getContentGeneratorMaxRetries(): number | undefined {
|
| 801 |
+
return this.contentGenerator?.maxRetries;
|
| 802 |
+
}
|
| 803 |
+
|
| 804 |
+
getContentGeneratorSamplingParams(): ContentGeneratorConfig['samplingParams'] {
|
| 805 |
+
return this.contentGenerator?.samplingParams as
|
| 806 |
+
| ContentGeneratorConfig['samplingParams']
|
| 807 |
+
| undefined;
|
| 808 |
+
}
|
| 809 |
+
|
| 810 |
+
getCliVersion(): string | undefined {
|
| 811 |
+
return this.cliVersion;
|
| 812 |
+
}
|
| 813 |
+
|
| 814 |
+
getSystemPromptMappings():
|
| 815 |
+
| Array<{
|
| 816 |
+
baseUrls?: string[];
|
| 817 |
+
modelNames?: string[];
|
| 818 |
+
template?: string;
|
| 819 |
+
}>
|
| 820 |
+
| undefined {
|
| 821 |
+
return this.systemPromptMappings;
|
| 822 |
+
}
|
| 823 |
+
|
| 824 |
+
/**
|
| 825 |
+
* Get the current FileSystemService
|
| 826 |
+
*/
|
| 827 |
+
getFileSystemService(): FileSystemService {
|
| 828 |
+
return this.fileSystemService;
|
| 829 |
+
}
|
| 830 |
+
|
| 831 |
+
/**
|
| 832 |
+
* Set a custom FileSystemService
|
| 833 |
+
*/
|
| 834 |
+
setFileSystemService(fileSystemService: FileSystemService): void {
|
| 835 |
+
this.fileSystemService = fileSystemService;
|
| 836 |
+
}
|
| 837 |
+
|
| 838 |
+
getChatCompression(): ChatCompressionSettings | undefined {
|
| 839 |
+
return this.chatCompression;
|
| 840 |
+
}
|
| 841 |
+
|
| 842 |
+
isInteractive(): boolean {
|
| 843 |
+
return this.interactive;
|
| 844 |
+
}
|
| 845 |
+
|
| 846 |
+
getShouldUseNodePtyShell(): boolean {
|
| 847 |
+
return this.shouldUseNodePtyShell;
|
| 848 |
+
}
|
| 849 |
+
|
| 850 |
+
getSkipNextSpeakerCheck(): boolean {
|
| 851 |
+
return this.skipNextSpeakerCheck;
|
| 852 |
+
}
|
| 853 |
+
|
| 854 |
+
async getGitService(): Promise<GitService> {
|
| 855 |
+
if (!this.gitService) {
|
| 856 |
+
this.gitService = new GitService(this.targetDir);
|
| 857 |
+
await this.gitService.initialize();
|
| 858 |
+
}
|
| 859 |
+
return this.gitService;
|
| 860 |
+
}
|
| 861 |
+
|
| 862 |
+
async createToolRegistry(): Promise<ToolRegistry> {
|
| 863 |
+
const registry = new ToolRegistry(this);
|
| 864 |
+
|
| 865 |
+
// helper to create & register core tools that are enabled
|
| 866 |
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
| 867 |
+
const registerCoreTool = (ToolClass: any, ...args: unknown[]) => {
|
| 868 |
+
const className = ToolClass.name;
|
| 869 |
+
const toolName = ToolClass.Name || className;
|
| 870 |
+
const coreTools = this.getCoreTools();
|
| 871 |
+
const excludeTools = this.getExcludeTools();
|
| 872 |
+
|
| 873 |
+
let isEnabled = false;
|
| 874 |
+
if (coreTools === undefined) {
|
| 875 |
+
isEnabled = true;
|
| 876 |
+
} else {
|
| 877 |
+
isEnabled = coreTools.some(
|
| 878 |
+
(tool) =>
|
| 879 |
+
tool === className ||
|
| 880 |
+
tool === toolName ||
|
| 881 |
+
tool.startsWith(`${className}(`) ||
|
| 882 |
+
tool.startsWith(`${toolName}(`),
|
| 883 |
+
);
|
| 884 |
+
}
|
| 885 |
+
|
| 886 |
+
if (
|
| 887 |
+
excludeTools?.includes(className) ||
|
| 888 |
+
excludeTools?.includes(toolName)
|
| 889 |
+
) {
|
| 890 |
+
isEnabled = false;
|
| 891 |
+
}
|
| 892 |
+
|
| 893 |
+
if (isEnabled) {
|
| 894 |
+
registry.registerTool(new ToolClass(...args));
|
| 895 |
+
}
|
| 896 |
+
};
|
| 897 |
+
|
| 898 |
+
registerCoreTool(LSTool, this);
|
| 899 |
+
registerCoreTool(ReadFileTool, this);
|
| 900 |
+
registerCoreTool(GrepTool, this);
|
| 901 |
+
registerCoreTool(GlobTool, this);
|
| 902 |
+
registerCoreTool(EditTool, this);
|
| 903 |
+
registerCoreTool(WriteFileTool, this);
|
| 904 |
+
registerCoreTool(WebFetchTool, this);
|
| 905 |
+
registerCoreTool(ReadManyFilesTool, this);
|
| 906 |
+
registerCoreTool(ShellTool, this);
|
| 907 |
+
registerCoreTool(MemoryTool);
|
| 908 |
+
registerCoreTool(TodoWriteTool, this);
|
| 909 |
+
// Conditionally register web search tool only if Tavily API key is set
|
| 910 |
+
if (this.getTavilyApiKey()) {
|
| 911 |
+
registerCoreTool(WebSearchTool, this);
|
| 912 |
+
}
|
| 913 |
+
|
| 914 |
+
await registry.discoverAllTools();
|
| 915 |
+
return registry;
|
| 916 |
+
}
|
| 917 |
+
}
|
| 918 |
+
// Export model constants for use in CLI
|
| 919 |
+
export { DEFAULT_GEMINI_FLASH_MODEL };
|
projects/ui/qwen-code/packages/core/src/config/flashFallback.test.ts
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
| 8 |
+
import { Config } from './config.js';
|
| 9 |
+
import { DEFAULT_GEMINI_MODEL, DEFAULT_GEMINI_FLASH_MODEL } from './models.js';
|
| 10 |
+
import fs from 'node:fs';
|
| 11 |
+
|
| 12 |
+
vi.mock('node:fs');
|
| 13 |
+
|
| 14 |
+
describe('Flash Model Fallback Configuration', () => {
|
| 15 |
+
let config: Config;
|
| 16 |
+
|
| 17 |
+
beforeEach(() => {
|
| 18 |
+
vi.mocked(fs.existsSync).mockReturnValue(true);
|
| 19 |
+
vi.mocked(fs.statSync).mockReturnValue({
|
| 20 |
+
isDirectory: () => true,
|
| 21 |
+
} as fs.Stats);
|
| 22 |
+
config = new Config({
|
| 23 |
+
sessionId: 'test-session',
|
| 24 |
+
targetDir: '/test',
|
| 25 |
+
debugMode: false,
|
| 26 |
+
cwd: '/test',
|
| 27 |
+
model: DEFAULT_GEMINI_MODEL,
|
| 28 |
+
});
|
| 29 |
+
|
| 30 |
+
// Initialize contentGeneratorConfig for testing
|
| 31 |
+
(
|
| 32 |
+
config as unknown as { contentGeneratorConfig: unknown }
|
| 33 |
+
).contentGeneratorConfig = {
|
| 34 |
+
model: DEFAULT_GEMINI_MODEL,
|
| 35 |
+
authType: 'oauth-personal',
|
| 36 |
+
};
|
| 37 |
+
});
|
| 38 |
+
|
| 39 |
+
// These tests do not actually test fallback. isInFallbackMode() only returns true,
|
| 40 |
+
// when setFallbackMode is marked as true. This is to decouple setting a model
|
| 41 |
+
// with the fallback mechanism. This will be necessary we introduce more
|
| 42 |
+
// intelligent model routing.
|
| 43 |
+
describe('setModel', () => {
|
| 44 |
+
it('should only mark as switched if contentGeneratorConfig exists', () => {
|
| 45 |
+
// Create config without initializing contentGeneratorConfig
|
| 46 |
+
const newConfig = new Config({
|
| 47 |
+
sessionId: 'test-session-2',
|
| 48 |
+
targetDir: '/test',
|
| 49 |
+
debugMode: false,
|
| 50 |
+
cwd: '/test',
|
| 51 |
+
model: DEFAULT_GEMINI_MODEL,
|
| 52 |
+
});
|
| 53 |
+
|
| 54 |
+
// Should not crash when contentGeneratorConfig is undefined
|
| 55 |
+
newConfig.setModel(DEFAULT_GEMINI_FLASH_MODEL);
|
| 56 |
+
expect(newConfig.isInFallbackMode()).toBe(false);
|
| 57 |
+
});
|
| 58 |
+
});
|
| 59 |
+
|
| 60 |
+
describe('getModel', () => {
|
| 61 |
+
it('should return contentGeneratorConfig model if available', () => {
|
| 62 |
+
// Simulate initialized content generator config
|
| 63 |
+
config.setModel(DEFAULT_GEMINI_FLASH_MODEL);
|
| 64 |
+
expect(config.getModel()).toBe(DEFAULT_GEMINI_FLASH_MODEL);
|
| 65 |
+
});
|
| 66 |
+
|
| 67 |
+
it('should fall back to initial model if contentGeneratorConfig is not available', () => {
|
| 68 |
+
// Test with fresh config where contentGeneratorConfig might not be set
|
| 69 |
+
const newConfig = new Config({
|
| 70 |
+
sessionId: 'test-session-2',
|
| 71 |
+
targetDir: '/test',
|
| 72 |
+
debugMode: false,
|
| 73 |
+
cwd: '/test',
|
| 74 |
+
model: 'custom-model',
|
| 75 |
+
});
|
| 76 |
+
|
| 77 |
+
expect(newConfig.getModel()).toBe('custom-model');
|
| 78 |
+
});
|
| 79 |
+
});
|
| 80 |
+
|
| 81 |
+
describe('isInFallbackMode', () => {
|
| 82 |
+
it('should start as false for new session', () => {
|
| 83 |
+
expect(config.isInFallbackMode()).toBe(false);
|
| 84 |
+
});
|
| 85 |
+
|
| 86 |
+
it('should remain false if no model switch occurs', () => {
|
| 87 |
+
// Perform other operations that don't involve model switching
|
| 88 |
+
expect(config.isInFallbackMode()).toBe(false);
|
| 89 |
+
});
|
| 90 |
+
|
| 91 |
+
it('should persist switched state throughout session', () => {
|
| 92 |
+
config.setModel(DEFAULT_GEMINI_FLASH_MODEL);
|
| 93 |
+
// Setting state for fallback mode as is expected of clients
|
| 94 |
+
config.setFallbackMode(true);
|
| 95 |
+
expect(config.isInFallbackMode()).toBe(true);
|
| 96 |
+
|
| 97 |
+
// Should remain true even after getting model
|
| 98 |
+
config.getModel();
|
| 99 |
+
expect(config.isInFallbackMode()).toBe(true);
|
| 100 |
+
});
|
| 101 |
+
});
|
| 102 |
+
});
|
projects/ui/qwen-code/packages/core/src/config/models.ts
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
export const DEFAULT_QWEN_MODEL = 'qwen3-coder-plus';
|
| 8 |
+
// We do not have a fallback model for now, but note it here anyway.
|
| 9 |
+
export const DEFAULT_QWEN_FLASH_MODEL = 'qwen3-coder-flash';
|
| 10 |
+
|
| 11 |
+
export const DEFAULT_GEMINI_MODEL = 'qwen3-coder-plus';
|
| 12 |
+
export const DEFAULT_GEMINI_FLASH_MODEL = 'gemini-2.5-flash';
|
| 13 |
+
export const DEFAULT_GEMINI_FLASH_LITE_MODEL = 'gemini-2.5-flash-lite';
|
| 14 |
+
|
| 15 |
+
export const DEFAULT_GEMINI_EMBEDDING_MODEL = 'gemini-embedding-001';
|
projects/ui/qwen-code/packages/core/src/core/client.test.ts
ADDED
|
@@ -0,0 +1,2176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
| 8 |
+
|
| 9 |
+
import {
|
| 10 |
+
Chat,
|
| 11 |
+
Content,
|
| 12 |
+
EmbedContentResponse,
|
| 13 |
+
GenerateContentResponse,
|
| 14 |
+
GoogleGenAI,
|
| 15 |
+
} from '@google/genai';
|
| 16 |
+
import { findIndexAfterFraction, GeminiClient } from './client.js';
|
| 17 |
+
import { AuthType, ContentGenerator } from './contentGenerator.js';
|
| 18 |
+
import { GeminiChat } from './geminiChat.js';
|
| 19 |
+
import { Config } from '../config/config.js';
|
| 20 |
+
import { GeminiEventType, Turn } from './turn.js';
|
| 21 |
+
import { getCoreSystemPrompt } from './prompts.js';
|
| 22 |
+
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
|
| 23 |
+
import { FileDiscoveryService } from '../services/fileDiscoveryService.js';
|
| 24 |
+
import { setSimulate429 } from '../utils/testUtils.js';
|
| 25 |
+
import { tokenLimit } from './tokenLimits.js';
|
| 26 |
+
import { ideContext } from '../ide/ideContext.js';
|
| 27 |
+
import { QwenLogger } from '../telemetry/qwen-logger/qwen-logger.js';
|
| 28 |
+
|
| 29 |
+
// --- Mocks ---
|
| 30 |
+
const mockChatCreateFn = vi.fn();
|
| 31 |
+
const mockGenerateContentFn = vi.fn();
|
| 32 |
+
const mockEmbedContentFn = vi.fn();
|
| 33 |
+
const mockTurnRunFn = vi.fn();
|
| 34 |
+
|
| 35 |
+
vi.mock('@google/genai');
|
| 36 |
+
vi.mock('./turn', () => {
|
| 37 |
+
// Define a mock class that has the same shape as the real Turn
|
| 38 |
+
class MockTurn {
|
| 39 |
+
pendingToolCalls = [];
|
| 40 |
+
// The run method is a property that holds our mock function
|
| 41 |
+
run = mockTurnRunFn;
|
| 42 |
+
|
| 43 |
+
constructor() {
|
| 44 |
+
// The constructor can be empty or do some mock setup
|
| 45 |
+
}
|
| 46 |
+
}
|
| 47 |
+
// Export the mock class as 'Turn'
|
| 48 |
+
return {
|
| 49 |
+
Turn: MockTurn,
|
| 50 |
+
GeminiEventType: {
|
| 51 |
+
MaxSessionTurns: 'MaxSessionTurns',
|
| 52 |
+
ChatCompressed: 'ChatCompressed',
|
| 53 |
+
Error: 'error',
|
| 54 |
+
Content: 'content',
|
| 55 |
+
},
|
| 56 |
+
};
|
| 57 |
+
});
|
| 58 |
+
|
| 59 |
+
vi.mock('../config/config.js');
|
| 60 |
+
vi.mock('./prompts');
|
| 61 |
+
vi.mock('../utils/getFolderStructure', () => ({
|
| 62 |
+
getFolderStructure: vi.fn().mockResolvedValue('Mock Folder Structure'),
|
| 63 |
+
}));
|
| 64 |
+
vi.mock('../utils/errorReporting', () => ({ reportError: vi.fn() }));
|
| 65 |
+
vi.mock('../utils/nextSpeakerChecker', () => ({
|
| 66 |
+
checkNextSpeaker: vi.fn().mockResolvedValue(null),
|
| 67 |
+
}));
|
| 68 |
+
vi.mock('../utils/generateContentResponseUtilities', () => ({
|
| 69 |
+
getResponseText: (result: GenerateContentResponse) =>
|
| 70 |
+
result.candidates?.[0]?.content?.parts?.map((part) => part.text).join('') ||
|
| 71 |
+
undefined,
|
| 72 |
+
getFunctionCalls: (result: GenerateContentResponse) => {
|
| 73 |
+
// Extract function calls from the response
|
| 74 |
+
const parts = result.candidates?.[0]?.content?.parts;
|
| 75 |
+
if (!parts) {
|
| 76 |
+
return undefined;
|
| 77 |
+
}
|
| 78 |
+
const functionCallParts = parts
|
| 79 |
+
.filter((part) => !!part.functionCall)
|
| 80 |
+
.map((part) => part.functionCall);
|
| 81 |
+
return functionCallParts.length > 0 ? functionCallParts : undefined;
|
| 82 |
+
},
|
| 83 |
+
}));
|
| 84 |
+
vi.mock('../telemetry/index.js', () => ({
|
| 85 |
+
logApiRequest: vi.fn(),
|
| 86 |
+
logApiResponse: vi.fn(),
|
| 87 |
+
logApiError: vi.fn(),
|
| 88 |
+
}));
|
| 89 |
+
vi.mock('../ide/ideContext.js');
|
| 90 |
+
|
| 91 |
+
describe('findIndexAfterFraction', () => {
|
| 92 |
+
const history: Content[] = [
|
| 93 |
+
{ role: 'user', parts: [{ text: 'This is the first message.' }] }, // JSON length: 66
|
| 94 |
+
{ role: 'model', parts: [{ text: 'This is the second message.' }] }, // JSON length: 68
|
| 95 |
+
{ role: 'user', parts: [{ text: 'This is the third message.' }] }, // JSON length: 66
|
| 96 |
+
{ role: 'model', parts: [{ text: 'This is the fourth message.' }] }, // JSON length: 68
|
| 97 |
+
{ role: 'user', parts: [{ text: 'This is the fifth message.' }] }, // JSON length: 65
|
| 98 |
+
];
|
| 99 |
+
// Total length: 333
|
| 100 |
+
|
| 101 |
+
it('should throw an error for non-positive numbers', () => {
|
| 102 |
+
expect(() => findIndexAfterFraction(history, 0)).toThrow(
|
| 103 |
+
'Fraction must be between 0 and 1',
|
| 104 |
+
);
|
| 105 |
+
});
|
| 106 |
+
|
| 107 |
+
it('should throw an error for a fraction greater than or equal to 1', () => {
|
| 108 |
+
expect(() => findIndexAfterFraction(history, 1)).toThrow(
|
| 109 |
+
'Fraction must be between 0 and 1',
|
| 110 |
+
);
|
| 111 |
+
});
|
| 112 |
+
|
| 113 |
+
it('should handle a fraction in the middle', () => {
|
| 114 |
+
// 333 * 0.5 = 166.5
|
| 115 |
+
// 0: 66
|
| 116 |
+
// 1: 66 + 68 = 134
|
| 117 |
+
// 2: 134 + 66 = 200
|
| 118 |
+
// 200 >= 166.5, so index is 2
|
| 119 |
+
expect(findIndexAfterFraction(history, 0.5)).toBe(2);
|
| 120 |
+
});
|
| 121 |
+
|
| 122 |
+
it('should handle a fraction that results in the last index', () => {
|
| 123 |
+
// 333 * 0.9 = 299.7
|
| 124 |
+
// ...
|
| 125 |
+
// 3: 200 + 68 = 268
|
| 126 |
+
// 4: 268 + 65 = 333
|
| 127 |
+
// 333 >= 299.7, so index is 4
|
| 128 |
+
expect(findIndexAfterFraction(history, 0.9)).toBe(4);
|
| 129 |
+
});
|
| 130 |
+
|
| 131 |
+
it('should handle an empty history', () => {
|
| 132 |
+
expect(findIndexAfterFraction([], 0.5)).toBe(0);
|
| 133 |
+
});
|
| 134 |
+
|
| 135 |
+
it('should handle a history with only one item', () => {
|
| 136 |
+
expect(findIndexAfterFraction(history.slice(0, 1), 0.5)).toBe(0);
|
| 137 |
+
});
|
| 138 |
+
|
| 139 |
+
it('should handle history with weird parts', () => {
|
| 140 |
+
const historyWithEmptyParts: Content[] = [
|
| 141 |
+
{ role: 'user', parts: [{ text: 'Message 1' }] },
|
| 142 |
+
{ role: 'model', parts: [{ fileData: { fileUri: 'derp' } }] },
|
| 143 |
+
{ role: 'user', parts: [{ text: 'Message 2' }] },
|
| 144 |
+
];
|
| 145 |
+
expect(findIndexAfterFraction(historyWithEmptyParts, 0.5)).toBe(1);
|
| 146 |
+
});
|
| 147 |
+
});
|
| 148 |
+
|
| 149 |
+
describe('Gemini Client (client.ts)', () => {
|
| 150 |
+
let client: GeminiClient;
|
| 151 |
+
beforeEach(async () => {
|
| 152 |
+
vi.resetAllMocks();
|
| 153 |
+
|
| 154 |
+
// Disable 429 simulation for tests
|
| 155 |
+
setSimulate429(false);
|
| 156 |
+
|
| 157 |
+
// Set up the mock for GoogleGenAI constructor and its methods
|
| 158 |
+
const MockedGoogleGenAI = vi.mocked(GoogleGenAI);
|
| 159 |
+
MockedGoogleGenAI.mockImplementation(() => {
|
| 160 |
+
const mock = {
|
| 161 |
+
chats: { create: mockChatCreateFn },
|
| 162 |
+
models: {
|
| 163 |
+
generateContent: mockGenerateContentFn,
|
| 164 |
+
embedContent: mockEmbedContentFn,
|
| 165 |
+
},
|
| 166 |
+
};
|
| 167 |
+
return mock as unknown as GoogleGenAI;
|
| 168 |
+
});
|
| 169 |
+
|
| 170 |
+
mockChatCreateFn.mockResolvedValue({} as Chat);
|
| 171 |
+
mockGenerateContentFn.mockResolvedValue({
|
| 172 |
+
candidates: [
|
| 173 |
+
{
|
| 174 |
+
content: {
|
| 175 |
+
parts: [
|
| 176 |
+
{
|
| 177 |
+
functionCall: {
|
| 178 |
+
name: 'respond_in_schema',
|
| 179 |
+
args: { key: 'value' },
|
| 180 |
+
},
|
| 181 |
+
},
|
| 182 |
+
],
|
| 183 |
+
},
|
| 184 |
+
},
|
| 185 |
+
],
|
| 186 |
+
} as unknown as GenerateContentResponse);
|
| 187 |
+
|
| 188 |
+
// Because the GeminiClient constructor kicks off an async process (startChat)
|
| 189 |
+
// that depends on a fully-formed Config object, we need to mock the
|
| 190 |
+
// entire implementation of Config for these tests.
|
| 191 |
+
const mockToolRegistry = {
|
| 192 |
+
getFunctionDeclarations: vi.fn().mockReturnValue([]),
|
| 193 |
+
getTool: vi.fn().mockReturnValue(null),
|
| 194 |
+
};
|
| 195 |
+
const fileService = new FileDiscoveryService('/test/dir');
|
| 196 |
+
const contentGeneratorConfig = {
|
| 197 |
+
model: 'test-model',
|
| 198 |
+
apiKey: 'test-key',
|
| 199 |
+
vertexai: false,
|
| 200 |
+
authType: AuthType.USE_GEMINI,
|
| 201 |
+
};
|
| 202 |
+
const mockConfigObject = {
|
| 203 |
+
getContentGeneratorConfig: vi
|
| 204 |
+
.fn()
|
| 205 |
+
.mockReturnValue(contentGeneratorConfig),
|
| 206 |
+
getToolRegistry: vi.fn().mockReturnValue(mockToolRegistry),
|
| 207 |
+
getModel: vi.fn().mockReturnValue('test-model'),
|
| 208 |
+
getEmbeddingModel: vi.fn().mockReturnValue('test-embedding-model'),
|
| 209 |
+
getApiKey: vi.fn().mockReturnValue('test-key'),
|
| 210 |
+
getVertexAI: vi.fn().mockReturnValue(false),
|
| 211 |
+
getUserAgent: vi.fn().mockReturnValue('test-agent'),
|
| 212 |
+
getUserMemory: vi.fn().mockReturnValue(''),
|
| 213 |
+
getFullContext: vi.fn().mockReturnValue(false),
|
| 214 |
+
getSessionId: vi.fn().mockReturnValue('test-session-id'),
|
| 215 |
+
getProxy: vi.fn().mockReturnValue(undefined),
|
| 216 |
+
getWorkingDir: vi.fn().mockReturnValue('/test/dir'),
|
| 217 |
+
getFileService: vi.fn().mockReturnValue(fileService),
|
| 218 |
+
getMaxSessionTurns: vi.fn().mockReturnValue(0),
|
| 219 |
+
getSessionTokenLimit: vi.fn().mockReturnValue(32000),
|
| 220 |
+
getQuotaErrorOccurred: vi.fn().mockReturnValue(false),
|
| 221 |
+
setQuotaErrorOccurred: vi.fn(),
|
| 222 |
+
getNoBrowser: vi.fn().mockReturnValue(false),
|
| 223 |
+
getSystemPromptMappings: vi.fn().mockReturnValue(undefined),
|
| 224 |
+
getUsageStatisticsEnabled: vi.fn().mockReturnValue(true),
|
| 225 |
+
getIdeModeFeature: vi.fn().mockReturnValue(false),
|
| 226 |
+
getIdeMode: vi.fn().mockReturnValue(true),
|
| 227 |
+
getDebugMode: vi.fn().mockReturnValue(false),
|
| 228 |
+
getWorkspaceContext: vi.fn().mockReturnValue({
|
| 229 |
+
getDirectories: vi.fn().mockReturnValue(['/test/dir']),
|
| 230 |
+
}),
|
| 231 |
+
getGeminiClient: vi.fn(),
|
| 232 |
+
setFallbackMode: vi.fn(),
|
| 233 |
+
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
| 234 |
+
getChatCompression: vi.fn().mockReturnValue(undefined),
|
| 235 |
+
getSkipNextSpeakerCheck: vi.fn().mockReturnValue(false),
|
| 236 |
+
};
|
| 237 |
+
const MockedConfig = vi.mocked(Config, true);
|
| 238 |
+
MockedConfig.mockImplementation(
|
| 239 |
+
() => mockConfigObject as unknown as Config,
|
| 240 |
+
);
|
| 241 |
+
|
| 242 |
+
// We can instantiate the client here since Config is mocked
|
| 243 |
+
// and the constructor will use the mocked GoogleGenAI
|
| 244 |
+
client = new GeminiClient(
|
| 245 |
+
new Config({ sessionId: 'test-session-id' } as never),
|
| 246 |
+
);
|
| 247 |
+
mockConfigObject.getGeminiClient.mockReturnValue(client);
|
| 248 |
+
|
| 249 |
+
await client.initialize(contentGeneratorConfig);
|
| 250 |
+
});
|
| 251 |
+
|
| 252 |
+
afterEach(() => {
|
| 253 |
+
vi.restoreAllMocks();
|
| 254 |
+
});
|
| 255 |
+
|
| 256 |
+
// NOTE: The following tests for startChat were removed due to persistent issues with
|
| 257 |
+
// the @google/genai mock. Specifically, the mockChatCreateFn (representing instance.chats.create)
|
| 258 |
+
// was not being detected as called by the GeminiClient instance.
|
| 259 |
+
// This likely points to a subtle issue in how the GoogleGenerativeAI class constructor
|
| 260 |
+
// and its instance methods are mocked and then used by the class under test.
|
| 261 |
+
// For future debugging, ensure that the `this.client` in `GeminiClient` (which is an
|
| 262 |
+
// instance of the mocked GoogleGenerativeAI) correctly has its `chats.create` method
|
| 263 |
+
// pointing to `mockChatCreateFn`.
|
| 264 |
+
// it('startChat should call getCoreSystemPrompt with userMemory and pass to chats.create', async () => { ... });
|
| 265 |
+
// it('startChat should call getCoreSystemPrompt with empty string if userMemory is empty', async () => { ... });
|
| 266 |
+
|
| 267 |
+
// NOTE: The following tests for generateJson were removed due to persistent issues with
|
| 268 |
+
// the @google/genai mock, similar to the startChat tests. The mockGenerateContentFn
|
| 269 |
+
// (representing instance.models.generateContent) was not being detected as called, or the mock
|
| 270 |
+
// was not preventing an actual API call (leading to API key errors).
|
| 271 |
+
// For future debugging, ensure `this.client.models.generateContent` in `GeminiClient` correctly
|
| 272 |
+
// uses the `mockGenerateContentFn`.
|
| 273 |
+
// it('generateJson should call getCoreSystemPrompt with userMemory and pass to generateContent', async () => { ... });
|
| 274 |
+
// it('generateJson should call getCoreSystemPrompt with empty string if userMemory is empty', async () => { ... });
|
| 275 |
+
|
| 276 |
+
describe('generateEmbedding', () => {
|
| 277 |
+
const texts = ['hello world', 'goodbye world'];
|
| 278 |
+
const testEmbeddingModel = 'test-embedding-model';
|
| 279 |
+
|
| 280 |
+
it('should call embedContent with correct parameters and return embeddings', async () => {
|
| 281 |
+
const mockEmbeddings = [
|
| 282 |
+
[0.1, 0.2, 0.3],
|
| 283 |
+
[0.4, 0.5, 0.6],
|
| 284 |
+
];
|
| 285 |
+
const mockResponse: EmbedContentResponse = {
|
| 286 |
+
embeddings: [
|
| 287 |
+
{ values: mockEmbeddings[0] },
|
| 288 |
+
{ values: mockEmbeddings[1] },
|
| 289 |
+
],
|
| 290 |
+
};
|
| 291 |
+
mockEmbedContentFn.mockResolvedValue(mockResponse);
|
| 292 |
+
|
| 293 |
+
const result = await client.generateEmbedding(texts);
|
| 294 |
+
|
| 295 |
+
expect(mockEmbedContentFn).toHaveBeenCalledTimes(1);
|
| 296 |
+
expect(mockEmbedContentFn).toHaveBeenCalledWith({
|
| 297 |
+
model: testEmbeddingModel,
|
| 298 |
+
contents: texts,
|
| 299 |
+
});
|
| 300 |
+
expect(result).toEqual(mockEmbeddings);
|
| 301 |
+
});
|
| 302 |
+
|
| 303 |
+
it('should return an empty array if an empty array is passed', async () => {
|
| 304 |
+
const result = await client.generateEmbedding([]);
|
| 305 |
+
expect(result).toEqual([]);
|
| 306 |
+
expect(mockEmbedContentFn).not.toHaveBeenCalled();
|
| 307 |
+
});
|
| 308 |
+
|
| 309 |
+
it('should throw an error if API response has no embeddings array', async () => {
|
| 310 |
+
mockEmbedContentFn.mockResolvedValue({} as EmbedContentResponse); // No `embeddings` key
|
| 311 |
+
|
| 312 |
+
await expect(client.generateEmbedding(texts)).rejects.toThrow(
|
| 313 |
+
'No embeddings found in API response.',
|
| 314 |
+
);
|
| 315 |
+
});
|
| 316 |
+
|
| 317 |
+
it('should throw an error if API response has an empty embeddings array', async () => {
|
| 318 |
+
const mockResponse: EmbedContentResponse = {
|
| 319 |
+
embeddings: [],
|
| 320 |
+
};
|
| 321 |
+
mockEmbedContentFn.mockResolvedValue(mockResponse);
|
| 322 |
+
await expect(client.generateEmbedding(texts)).rejects.toThrow(
|
| 323 |
+
'No embeddings found in API response.',
|
| 324 |
+
);
|
| 325 |
+
});
|
| 326 |
+
|
| 327 |
+
it('should throw an error if API returns a mismatched number of embeddings', async () => {
|
| 328 |
+
const mockResponse: EmbedContentResponse = {
|
| 329 |
+
embeddings: [{ values: [1, 2, 3] }], // Only one for two texts
|
| 330 |
+
};
|
| 331 |
+
mockEmbedContentFn.mockResolvedValue(mockResponse);
|
| 332 |
+
|
| 333 |
+
await expect(client.generateEmbedding(texts)).rejects.toThrow(
|
| 334 |
+
'API returned a mismatched number of embeddings. Expected 2, got 1.',
|
| 335 |
+
);
|
| 336 |
+
});
|
| 337 |
+
|
| 338 |
+
it('should throw an error if any embedding has nullish values', async () => {
|
| 339 |
+
const mockResponse: EmbedContentResponse = {
|
| 340 |
+
embeddings: [{ values: [1, 2, 3] }, { values: undefined }], // Second one is bad
|
| 341 |
+
};
|
| 342 |
+
mockEmbedContentFn.mockResolvedValue(mockResponse);
|
| 343 |
+
|
| 344 |
+
await expect(client.generateEmbedding(texts)).rejects.toThrow(
|
| 345 |
+
'API returned an empty embedding for input text at index 1: "goodbye world"',
|
| 346 |
+
);
|
| 347 |
+
});
|
| 348 |
+
|
| 349 |
+
it('should throw an error if any embedding has an empty values array', async () => {
|
| 350 |
+
const mockResponse: EmbedContentResponse = {
|
| 351 |
+
embeddings: [{ values: [] }, { values: [1, 2, 3] }], // First one is bad
|
| 352 |
+
};
|
| 353 |
+
mockEmbedContentFn.mockResolvedValue(mockResponse);
|
| 354 |
+
|
| 355 |
+
await expect(client.generateEmbedding(texts)).rejects.toThrow(
|
| 356 |
+
'API returned an empty embedding for input text at index 0: "hello world"',
|
| 357 |
+
);
|
| 358 |
+
});
|
| 359 |
+
|
| 360 |
+
it('should propagate errors from the API call', async () => {
|
| 361 |
+
const apiError = new Error('API Failure');
|
| 362 |
+
mockEmbedContentFn.mockRejectedValue(apiError);
|
| 363 |
+
|
| 364 |
+
await expect(client.generateEmbedding(texts)).rejects.toThrow(
|
| 365 |
+
'API Failure',
|
| 366 |
+
);
|
| 367 |
+
});
|
| 368 |
+
});
|
| 369 |
+
|
| 370 |
+
describe('generateContent', () => {
|
| 371 |
+
it('should call generateContent with the correct parameters', async () => {
|
| 372 |
+
const contents = [{ role: 'user', parts: [{ text: 'hello' }] }];
|
| 373 |
+
const generationConfig = { temperature: 0.5 };
|
| 374 |
+
const abortSignal = new AbortController().signal;
|
| 375 |
+
|
| 376 |
+
// Mock countTokens
|
| 377 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 378 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 1 }),
|
| 379 |
+
generateContent: mockGenerateContentFn,
|
| 380 |
+
};
|
| 381 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 382 |
+
|
| 383 |
+
await client.generateContent(contents, generationConfig, abortSignal);
|
| 384 |
+
|
| 385 |
+
expect(mockGenerateContentFn).toHaveBeenCalledWith(
|
| 386 |
+
{
|
| 387 |
+
model: 'test-model',
|
| 388 |
+
config: {
|
| 389 |
+
abortSignal,
|
| 390 |
+
systemInstruction: getCoreSystemPrompt(''),
|
| 391 |
+
temperature: 0.5,
|
| 392 |
+
topP: 1,
|
| 393 |
+
},
|
| 394 |
+
contents,
|
| 395 |
+
},
|
| 396 |
+
'test-session-id',
|
| 397 |
+
);
|
| 398 |
+
});
|
| 399 |
+
});
|
| 400 |
+
|
| 401 |
+
describe('generateJson', () => {
|
| 402 |
+
it('should call generateContent with the correct parameters', async () => {
|
| 403 |
+
const contents = [{ role: 'user', parts: [{ text: 'hello' }] }];
|
| 404 |
+
const schema = { type: 'string' };
|
| 405 |
+
const abortSignal = new AbortController().signal;
|
| 406 |
+
|
| 407 |
+
// Mock countTokens
|
| 408 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 409 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 1 }),
|
| 410 |
+
generateContent: mockGenerateContentFn,
|
| 411 |
+
};
|
| 412 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 413 |
+
|
| 414 |
+
const result = await client.generateJson(contents, schema, abortSignal);
|
| 415 |
+
expect(result).toEqual({ key: 'value' });
|
| 416 |
+
|
| 417 |
+
expect(mockGenerateContentFn).toHaveBeenCalledWith(
|
| 418 |
+
{
|
| 419 |
+
model: 'test-model', // Should use current model from config
|
| 420 |
+
config: {
|
| 421 |
+
abortSignal,
|
| 422 |
+
systemInstruction: getCoreSystemPrompt(''),
|
| 423 |
+
temperature: 0,
|
| 424 |
+
topP: 1,
|
| 425 |
+
tools: [
|
| 426 |
+
{
|
| 427 |
+
functionDeclarations: [
|
| 428 |
+
{
|
| 429 |
+
name: 'respond_in_schema',
|
| 430 |
+
description: 'Provide the response in provided schema',
|
| 431 |
+
parameters: schema,
|
| 432 |
+
},
|
| 433 |
+
],
|
| 434 |
+
},
|
| 435 |
+
],
|
| 436 |
+
},
|
| 437 |
+
contents,
|
| 438 |
+
},
|
| 439 |
+
'test-session-id',
|
| 440 |
+
);
|
| 441 |
+
});
|
| 442 |
+
|
| 443 |
+
it('should allow overriding model and config', async () => {
|
| 444 |
+
const contents = [{ role: 'user', parts: [{ text: 'hello' }] }];
|
| 445 |
+
const schema = { type: 'string' };
|
| 446 |
+
const abortSignal = new AbortController().signal;
|
| 447 |
+
const customModel = 'custom-json-model';
|
| 448 |
+
const customConfig = { temperature: 0.9, topK: 20 };
|
| 449 |
+
|
| 450 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 451 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 1 }),
|
| 452 |
+
generateContent: mockGenerateContentFn,
|
| 453 |
+
};
|
| 454 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 455 |
+
|
| 456 |
+
const result = await client.generateJson(
|
| 457 |
+
contents,
|
| 458 |
+
schema,
|
| 459 |
+
abortSignal,
|
| 460 |
+
customModel,
|
| 461 |
+
customConfig,
|
| 462 |
+
);
|
| 463 |
+
expect(result).toEqual({ key: 'value' });
|
| 464 |
+
|
| 465 |
+
expect(mockGenerateContentFn).toHaveBeenCalledWith(
|
| 466 |
+
{
|
| 467 |
+
model: customModel,
|
| 468 |
+
config: {
|
| 469 |
+
abortSignal,
|
| 470 |
+
systemInstruction: getCoreSystemPrompt(''),
|
| 471 |
+
temperature: 0.9,
|
| 472 |
+
topP: 1, // from default
|
| 473 |
+
topK: 20,
|
| 474 |
+
tools: [
|
| 475 |
+
{
|
| 476 |
+
functionDeclarations: [
|
| 477 |
+
{
|
| 478 |
+
name: 'respond_in_schema',
|
| 479 |
+
description: 'Provide the response in provided schema',
|
| 480 |
+
parameters: schema,
|
| 481 |
+
},
|
| 482 |
+
],
|
| 483 |
+
},
|
| 484 |
+
],
|
| 485 |
+
},
|
| 486 |
+
contents,
|
| 487 |
+
},
|
| 488 |
+
'test-session-id',
|
| 489 |
+
);
|
| 490 |
+
});
|
| 491 |
+
});
|
| 492 |
+
|
| 493 |
+
describe('addHistory', () => {
|
| 494 |
+
it('should call chat.addHistory with the provided content', async () => {
|
| 495 |
+
const mockChat = {
|
| 496 |
+
addHistory: vi.fn(),
|
| 497 |
+
};
|
| 498 |
+
client['chat'] = mockChat as unknown as GeminiChat;
|
| 499 |
+
|
| 500 |
+
const newContent = {
|
| 501 |
+
role: 'user',
|
| 502 |
+
parts: [{ text: 'New history item' }],
|
| 503 |
+
};
|
| 504 |
+
await client.addHistory(newContent);
|
| 505 |
+
|
| 506 |
+
expect(mockChat.addHistory).toHaveBeenCalledWith(newContent);
|
| 507 |
+
});
|
| 508 |
+
});
|
| 509 |
+
|
| 510 |
+
describe('resetChat', () => {
|
| 511 |
+
it('should create a new chat session, clearing the old history', async () => {
|
| 512 |
+
// 1. Get the initial chat instance and add some history.
|
| 513 |
+
const initialChat = client.getChat();
|
| 514 |
+
const initialHistory = await client.getHistory();
|
| 515 |
+
await client.addHistory({
|
| 516 |
+
role: 'user',
|
| 517 |
+
parts: [{ text: 'some old message' }],
|
| 518 |
+
});
|
| 519 |
+
const historyWithOldMessage = await client.getHistory();
|
| 520 |
+
expect(historyWithOldMessage.length).toBeGreaterThan(
|
| 521 |
+
initialHistory.length,
|
| 522 |
+
);
|
| 523 |
+
|
| 524 |
+
// 2. Call resetChat.
|
| 525 |
+
await client.resetChat();
|
| 526 |
+
|
| 527 |
+
// 3. Get the new chat instance and its history.
|
| 528 |
+
const newChat = client.getChat();
|
| 529 |
+
const newHistory = await client.getHistory();
|
| 530 |
+
|
| 531 |
+
// 4. Assert that the chat instance is new and the history is reset.
|
| 532 |
+
expect(newChat).not.toBe(initialChat);
|
| 533 |
+
expect(newHistory.length).toBe(initialHistory.length);
|
| 534 |
+
expect(JSON.stringify(newHistory)).not.toContain('some old message');
|
| 535 |
+
});
|
| 536 |
+
});
|
| 537 |
+
|
| 538 |
+
describe('tryCompressChat', () => {
|
| 539 |
+
const mockCountTokens = vi.fn();
|
| 540 |
+
const mockSendMessage = vi.fn();
|
| 541 |
+
const mockGetHistory = vi.fn();
|
| 542 |
+
|
| 543 |
+
beforeEach(() => {
|
| 544 |
+
vi.mock('./tokenLimits', () => ({
|
| 545 |
+
tokenLimit: vi.fn(),
|
| 546 |
+
}));
|
| 547 |
+
|
| 548 |
+
client['contentGenerator'] = {
|
| 549 |
+
countTokens: mockCountTokens,
|
| 550 |
+
} as unknown as ContentGenerator;
|
| 551 |
+
|
| 552 |
+
client['chat'] = {
|
| 553 |
+
getHistory: mockGetHistory,
|
| 554 |
+
addHistory: vi.fn(),
|
| 555 |
+
setHistory: vi.fn(),
|
| 556 |
+
sendMessage: mockSendMessage,
|
| 557 |
+
} as unknown as GeminiChat;
|
| 558 |
+
});
|
| 559 |
+
|
| 560 |
+
it('should not trigger summarization if token count is below threshold', async () => {
|
| 561 |
+
const MOCKED_TOKEN_LIMIT = 1000;
|
| 562 |
+
vi.mocked(tokenLimit).mockReturnValue(MOCKED_TOKEN_LIMIT);
|
| 563 |
+
mockGetHistory.mockReturnValue([
|
| 564 |
+
{ role: 'user', parts: [{ text: '...history...' }] },
|
| 565 |
+
]);
|
| 566 |
+
|
| 567 |
+
mockCountTokens.mockResolvedValue({
|
| 568 |
+
totalTokens: MOCKED_TOKEN_LIMIT * 0.699, // TOKEN_THRESHOLD_FOR_SUMMARIZATION = 0.7
|
| 569 |
+
});
|
| 570 |
+
|
| 571 |
+
const initialChat = client.getChat();
|
| 572 |
+
const result = await client.tryCompressChat('prompt-id-2');
|
| 573 |
+
const newChat = client.getChat();
|
| 574 |
+
|
| 575 |
+
expect(tokenLimit).toHaveBeenCalled();
|
| 576 |
+
expect(result).toBeNull();
|
| 577 |
+
expect(newChat).toBe(initialChat);
|
| 578 |
+
});
|
| 579 |
+
|
| 580 |
+
it('logs a telemetry event when compressing', async () => {
|
| 581 |
+
vi.spyOn(QwenLogger.prototype, 'logChatCompressionEvent');
|
| 582 |
+
|
| 583 |
+
const MOCKED_TOKEN_LIMIT = 1000;
|
| 584 |
+
const MOCKED_CONTEXT_PERCENTAGE_THRESHOLD = 0.5;
|
| 585 |
+
vi.mocked(tokenLimit).mockReturnValue(MOCKED_TOKEN_LIMIT);
|
| 586 |
+
vi.spyOn(client['config'], 'getChatCompression').mockReturnValue({
|
| 587 |
+
contextPercentageThreshold: MOCKED_CONTEXT_PERCENTAGE_THRESHOLD,
|
| 588 |
+
});
|
| 589 |
+
mockGetHistory.mockReturnValue([
|
| 590 |
+
{ role: 'user', parts: [{ text: '...history...' }] },
|
| 591 |
+
]);
|
| 592 |
+
|
| 593 |
+
const originalTokenCount =
|
| 594 |
+
MOCKED_TOKEN_LIMIT * MOCKED_CONTEXT_PERCENTAGE_THRESHOLD;
|
| 595 |
+
const newTokenCount = 100;
|
| 596 |
+
|
| 597 |
+
mockCountTokens
|
| 598 |
+
.mockResolvedValueOnce({ totalTokens: originalTokenCount }) // First call for the check
|
| 599 |
+
.mockResolvedValueOnce({ totalTokens: newTokenCount }); // Second call for the new history
|
| 600 |
+
|
| 601 |
+
// Mock the summary response from the chat
|
| 602 |
+
mockSendMessage.mockResolvedValue({
|
| 603 |
+
role: 'model',
|
| 604 |
+
parts: [{ text: 'This is a summary.' }],
|
| 605 |
+
});
|
| 606 |
+
|
| 607 |
+
await client.tryCompressChat('prompt-id-3');
|
| 608 |
+
|
| 609 |
+
expect(QwenLogger.prototype.logChatCompressionEvent).toHaveBeenCalledWith(
|
| 610 |
+
expect.objectContaining({
|
| 611 |
+
tokens_before: originalTokenCount,
|
| 612 |
+
tokens_after: newTokenCount,
|
| 613 |
+
}),
|
| 614 |
+
);
|
| 615 |
+
});
|
| 616 |
+
|
| 617 |
+
it('should trigger summarization if token count is at threshold with contextPercentageThreshold setting', async () => {
|
| 618 |
+
const MOCKED_TOKEN_LIMIT = 1000;
|
| 619 |
+
const MOCKED_CONTEXT_PERCENTAGE_THRESHOLD = 0.5;
|
| 620 |
+
vi.mocked(tokenLimit).mockReturnValue(MOCKED_TOKEN_LIMIT);
|
| 621 |
+
vi.spyOn(client['config'], 'getChatCompression').mockReturnValue({
|
| 622 |
+
contextPercentageThreshold: MOCKED_CONTEXT_PERCENTAGE_THRESHOLD,
|
| 623 |
+
});
|
| 624 |
+
mockGetHistory.mockReturnValue([
|
| 625 |
+
{ role: 'user', parts: [{ text: '...history...' }] },
|
| 626 |
+
]);
|
| 627 |
+
|
| 628 |
+
const originalTokenCount =
|
| 629 |
+
MOCKED_TOKEN_LIMIT * MOCKED_CONTEXT_PERCENTAGE_THRESHOLD;
|
| 630 |
+
const newTokenCount = 100;
|
| 631 |
+
|
| 632 |
+
mockCountTokens
|
| 633 |
+
.mockResolvedValueOnce({ totalTokens: originalTokenCount }) // First call for the check
|
| 634 |
+
.mockResolvedValueOnce({ totalTokens: newTokenCount }); // Second call for the new history
|
| 635 |
+
|
| 636 |
+
// Mock the summary response from the chat
|
| 637 |
+
mockSendMessage.mockResolvedValue({
|
| 638 |
+
role: 'model',
|
| 639 |
+
parts: [{ text: 'This is a summary.' }],
|
| 640 |
+
});
|
| 641 |
+
|
| 642 |
+
const initialChat = client.getChat();
|
| 643 |
+
const result = await client.tryCompressChat('prompt-id-3');
|
| 644 |
+
const newChat = client.getChat();
|
| 645 |
+
|
| 646 |
+
expect(tokenLimit).toHaveBeenCalled();
|
| 647 |
+
expect(mockSendMessage).toHaveBeenCalled();
|
| 648 |
+
|
| 649 |
+
// Assert that summarization happened and returned the correct stats
|
| 650 |
+
expect(result).toEqual({
|
| 651 |
+
originalTokenCount,
|
| 652 |
+
newTokenCount,
|
| 653 |
+
});
|
| 654 |
+
|
| 655 |
+
// Assert that the chat was reset
|
| 656 |
+
expect(newChat).not.toBe(initialChat);
|
| 657 |
+
});
|
| 658 |
+
|
| 659 |
+
it('should not compress across a function call response', async () => {
|
| 660 |
+
const MOCKED_TOKEN_LIMIT = 1000;
|
| 661 |
+
vi.mocked(tokenLimit).mockReturnValue(MOCKED_TOKEN_LIMIT);
|
| 662 |
+
mockGetHistory.mockReturnValue([
|
| 663 |
+
{ role: 'user', parts: [{ text: '...history 1...' }] },
|
| 664 |
+
{ role: 'model', parts: [{ text: '...history 2...' }] },
|
| 665 |
+
{ role: 'user', parts: [{ text: '...history 3...' }] },
|
| 666 |
+
{ role: 'model', parts: [{ text: '...history 4...' }] },
|
| 667 |
+
{ role: 'user', parts: [{ text: '...history 5...' }] },
|
| 668 |
+
{ role: 'model', parts: [{ text: '...history 6...' }] },
|
| 669 |
+
{ role: 'user', parts: [{ text: '...history 7...' }] },
|
| 670 |
+
{ role: 'model', parts: [{ text: '...history 8...' }] },
|
| 671 |
+
// Normally we would break here, but we have a function response.
|
| 672 |
+
{
|
| 673 |
+
role: 'user',
|
| 674 |
+
parts: [{ functionResponse: { name: '...history 8...' } }],
|
| 675 |
+
},
|
| 676 |
+
{ role: 'model', parts: [{ text: '...history 10...' }] },
|
| 677 |
+
// Instead we will break here.
|
| 678 |
+
{ role: 'user', parts: [{ text: '...history 10...' }] },
|
| 679 |
+
]);
|
| 680 |
+
|
| 681 |
+
const originalTokenCount = 1000 * 0.7;
|
| 682 |
+
const newTokenCount = 100;
|
| 683 |
+
|
| 684 |
+
mockCountTokens
|
| 685 |
+
.mockResolvedValueOnce({ totalTokens: originalTokenCount }) // First call for the check
|
| 686 |
+
.mockResolvedValueOnce({ totalTokens: newTokenCount }); // Second call for the new history
|
| 687 |
+
|
| 688 |
+
// Mock the summary response from the chat
|
| 689 |
+
mockSendMessage.mockResolvedValue({
|
| 690 |
+
role: 'model',
|
| 691 |
+
parts: [{ text: 'This is a summary.' }],
|
| 692 |
+
});
|
| 693 |
+
|
| 694 |
+
const initialChat = client.getChat();
|
| 695 |
+
const result = await client.tryCompressChat('prompt-id-3');
|
| 696 |
+
const newChat = client.getChat();
|
| 697 |
+
|
| 698 |
+
expect(tokenLimit).toHaveBeenCalled();
|
| 699 |
+
expect(mockSendMessage).toHaveBeenCalled();
|
| 700 |
+
|
| 701 |
+
// Assert that summarization happened and returned the correct stats
|
| 702 |
+
expect(result).toEqual({
|
| 703 |
+
originalTokenCount,
|
| 704 |
+
newTokenCount,
|
| 705 |
+
});
|
| 706 |
+
// Assert that the chat was reset
|
| 707 |
+
expect(newChat).not.toBe(initialChat);
|
| 708 |
+
|
| 709 |
+
// 1. standard start context message
|
| 710 |
+
// 2. standard canned user start message
|
| 711 |
+
// 3. compressed summary message
|
| 712 |
+
// 4. standard canned user summary message
|
| 713 |
+
// 5. The last user message (not the last 3 because that would start with a function response)
|
| 714 |
+
expect(newChat.getHistory().length).toEqual(5);
|
| 715 |
+
});
|
| 716 |
+
|
| 717 |
+
it('should always trigger summarization when force is true, regardless of token count', async () => {
|
| 718 |
+
mockGetHistory.mockReturnValue([
|
| 719 |
+
{ role: 'user', parts: [{ text: '...history...' }] },
|
| 720 |
+
]);
|
| 721 |
+
|
| 722 |
+
const originalTokenCount = 10; // Well below threshold
|
| 723 |
+
const newTokenCount = 5;
|
| 724 |
+
|
| 725 |
+
mockCountTokens
|
| 726 |
+
.mockResolvedValueOnce({ totalTokens: originalTokenCount })
|
| 727 |
+
.mockResolvedValueOnce({ totalTokens: newTokenCount });
|
| 728 |
+
|
| 729 |
+
// Mock the summary response from the chat
|
| 730 |
+
mockSendMessage.mockResolvedValue({
|
| 731 |
+
role: 'model',
|
| 732 |
+
parts: [{ text: 'This is a summary.' }],
|
| 733 |
+
});
|
| 734 |
+
|
| 735 |
+
const initialChat = client.getChat();
|
| 736 |
+
const result = await client.tryCompressChat('prompt-id-1', true); // force = true
|
| 737 |
+
const newChat = client.getChat();
|
| 738 |
+
|
| 739 |
+
expect(mockSendMessage).toHaveBeenCalled();
|
| 740 |
+
|
| 741 |
+
expect(result).toEqual({
|
| 742 |
+
originalTokenCount,
|
| 743 |
+
newTokenCount,
|
| 744 |
+
});
|
| 745 |
+
|
| 746 |
+
// Assert that the chat was reset
|
| 747 |
+
expect(newChat).not.toBe(initialChat);
|
| 748 |
+
});
|
| 749 |
+
});
|
| 750 |
+
|
| 751 |
+
describe('sendMessageStream', () => {
|
| 752 |
+
it('should include editor context when ideMode is enabled', async () => {
|
| 753 |
+
// Arrange
|
| 754 |
+
vi.mocked(ideContext.getIdeContext).mockReturnValue({
|
| 755 |
+
workspaceState: {
|
| 756 |
+
openFiles: [
|
| 757 |
+
{
|
| 758 |
+
path: '/path/to/active/file.ts',
|
| 759 |
+
timestamp: Date.now(),
|
| 760 |
+
isActive: true,
|
| 761 |
+
selectedText: 'hello',
|
| 762 |
+
cursor: { line: 5, character: 10 },
|
| 763 |
+
},
|
| 764 |
+
{
|
| 765 |
+
path: '/path/to/recent/file1.ts',
|
| 766 |
+
timestamp: Date.now(),
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
path: '/path/to/recent/file2.ts',
|
| 770 |
+
timestamp: Date.now(),
|
| 771 |
+
},
|
| 772 |
+
],
|
| 773 |
+
},
|
| 774 |
+
});
|
| 775 |
+
|
| 776 |
+
vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true);
|
| 777 |
+
|
| 778 |
+
const mockStream = (async function* () {
|
| 779 |
+
yield { type: 'content', value: 'Hello' };
|
| 780 |
+
})();
|
| 781 |
+
mockTurnRunFn.mockReturnValue(mockStream);
|
| 782 |
+
|
| 783 |
+
const mockChat: Partial<GeminiChat> = {
|
| 784 |
+
addHistory: vi.fn(),
|
| 785 |
+
getHistory: vi.fn().mockReturnValue([]),
|
| 786 |
+
};
|
| 787 |
+
client['chat'] = mockChat as GeminiChat;
|
| 788 |
+
|
| 789 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 790 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }),
|
| 791 |
+
generateContent: mockGenerateContentFn,
|
| 792 |
+
};
|
| 793 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 794 |
+
|
| 795 |
+
const initialRequest = [{ text: 'Hi' }];
|
| 796 |
+
|
| 797 |
+
// Act
|
| 798 |
+
const stream = client.sendMessageStream(
|
| 799 |
+
initialRequest,
|
| 800 |
+
new AbortController().signal,
|
| 801 |
+
'prompt-id-ide',
|
| 802 |
+
);
|
| 803 |
+
for await (const _ of stream) {
|
| 804 |
+
// consume stream
|
| 805 |
+
}
|
| 806 |
+
|
| 807 |
+
// Assert
|
| 808 |
+
expect(ideContext.getIdeContext).toHaveBeenCalled();
|
| 809 |
+
const expectedContext = `
|
| 810 |
+
Here is the user's editor context as a JSON object. This is for your information only.
|
| 811 |
+
\`\`\`json
|
| 812 |
+
${JSON.stringify(
|
| 813 |
+
{
|
| 814 |
+
activeFile: {
|
| 815 |
+
path: '/path/to/active/file.ts',
|
| 816 |
+
cursor: {
|
| 817 |
+
line: 5,
|
| 818 |
+
character: 10,
|
| 819 |
+
},
|
| 820 |
+
selectedText: 'hello',
|
| 821 |
+
},
|
| 822 |
+
otherOpenFiles: ['/path/to/recent/file1.ts', '/path/to/recent/file2.ts'],
|
| 823 |
+
},
|
| 824 |
+
null,
|
| 825 |
+
2,
|
| 826 |
+
)}
|
| 827 |
+
\`\`\`
|
| 828 |
+
`.trim();
|
| 829 |
+
const expectedRequest = [{ text: expectedContext }];
|
| 830 |
+
expect(mockChat.addHistory).toHaveBeenCalledWith({
|
| 831 |
+
role: 'user',
|
| 832 |
+
parts: expectedRequest,
|
| 833 |
+
});
|
| 834 |
+
});
|
| 835 |
+
|
| 836 |
+
it('should not add context if ideMode is enabled but no open files', async () => {
|
| 837 |
+
// Arrange
|
| 838 |
+
vi.mocked(ideContext.getIdeContext).mockReturnValue({
|
| 839 |
+
workspaceState: {
|
| 840 |
+
openFiles: [],
|
| 841 |
+
},
|
| 842 |
+
});
|
| 843 |
+
|
| 844 |
+
vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true);
|
| 845 |
+
|
| 846 |
+
const mockStream = (async function* () {
|
| 847 |
+
yield { type: 'content', value: 'Hello' };
|
| 848 |
+
})();
|
| 849 |
+
mockTurnRunFn.mockReturnValue(mockStream);
|
| 850 |
+
|
| 851 |
+
const mockChat: Partial<GeminiChat> = {
|
| 852 |
+
addHistory: vi.fn(),
|
| 853 |
+
getHistory: vi.fn().mockReturnValue([]),
|
| 854 |
+
};
|
| 855 |
+
client['chat'] = mockChat as GeminiChat;
|
| 856 |
+
|
| 857 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 858 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }),
|
| 859 |
+
generateContent: mockGenerateContentFn,
|
| 860 |
+
};
|
| 861 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 862 |
+
|
| 863 |
+
const initialRequest = [{ text: 'Hi' }];
|
| 864 |
+
|
| 865 |
+
// Act
|
| 866 |
+
const stream = client.sendMessageStream(
|
| 867 |
+
initialRequest,
|
| 868 |
+
new AbortController().signal,
|
| 869 |
+
'prompt-id-ide',
|
| 870 |
+
);
|
| 871 |
+
for await (const _ of stream) {
|
| 872 |
+
// consume stream
|
| 873 |
+
}
|
| 874 |
+
|
| 875 |
+
// Assert
|
| 876 |
+
expect(ideContext.getIdeContext).toHaveBeenCalled();
|
| 877 |
+
expect(mockTurnRunFn).toHaveBeenCalledWith(
|
| 878 |
+
initialRequest,
|
| 879 |
+
expect.any(Object),
|
| 880 |
+
);
|
| 881 |
+
});
|
| 882 |
+
|
| 883 |
+
it('should add context if ideMode is enabled and there is one active file', async () => {
|
| 884 |
+
// Arrange
|
| 885 |
+
vi.mocked(ideContext.getIdeContext).mockReturnValue({
|
| 886 |
+
workspaceState: {
|
| 887 |
+
openFiles: [
|
| 888 |
+
{
|
| 889 |
+
path: '/path/to/active/file.ts',
|
| 890 |
+
timestamp: Date.now(),
|
| 891 |
+
isActive: true,
|
| 892 |
+
selectedText: 'hello',
|
| 893 |
+
cursor: { line: 5, character: 10 },
|
| 894 |
+
},
|
| 895 |
+
],
|
| 896 |
+
},
|
| 897 |
+
});
|
| 898 |
+
|
| 899 |
+
vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true);
|
| 900 |
+
|
| 901 |
+
const mockStream = (async function* () {
|
| 902 |
+
yield { type: 'content', value: 'Hello' };
|
| 903 |
+
})();
|
| 904 |
+
mockTurnRunFn.mockReturnValue(mockStream);
|
| 905 |
+
|
| 906 |
+
const mockChat: Partial<GeminiChat> = {
|
| 907 |
+
addHistory: vi.fn(),
|
| 908 |
+
getHistory: vi.fn().mockReturnValue([]),
|
| 909 |
+
};
|
| 910 |
+
client['chat'] = mockChat as GeminiChat;
|
| 911 |
+
|
| 912 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 913 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }),
|
| 914 |
+
generateContent: mockGenerateContentFn,
|
| 915 |
+
};
|
| 916 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 917 |
+
|
| 918 |
+
const initialRequest = [{ text: 'Hi' }];
|
| 919 |
+
|
| 920 |
+
// Act
|
| 921 |
+
const stream = client.sendMessageStream(
|
| 922 |
+
initialRequest,
|
| 923 |
+
new AbortController().signal,
|
| 924 |
+
'prompt-id-ide',
|
| 925 |
+
);
|
| 926 |
+
for await (const _ of stream) {
|
| 927 |
+
// consume stream
|
| 928 |
+
}
|
| 929 |
+
|
| 930 |
+
// Assert
|
| 931 |
+
expect(ideContext.getIdeContext).toHaveBeenCalled();
|
| 932 |
+
const expectedContext = `
|
| 933 |
+
Here is the user's editor context as a JSON object. This is for your information only.
|
| 934 |
+
\`\`\`json
|
| 935 |
+
${JSON.stringify(
|
| 936 |
+
{
|
| 937 |
+
activeFile: {
|
| 938 |
+
path: '/path/to/active/file.ts',
|
| 939 |
+
cursor: {
|
| 940 |
+
line: 5,
|
| 941 |
+
character: 10,
|
| 942 |
+
},
|
| 943 |
+
selectedText: 'hello',
|
| 944 |
+
},
|
| 945 |
+
},
|
| 946 |
+
null,
|
| 947 |
+
2,
|
| 948 |
+
)}
|
| 949 |
+
\`\`\`
|
| 950 |
+
`.trim();
|
| 951 |
+
const expectedRequest = [{ text: expectedContext }];
|
| 952 |
+
expect(mockChat.addHistory).toHaveBeenCalledWith({
|
| 953 |
+
role: 'user',
|
| 954 |
+
parts: expectedRequest,
|
| 955 |
+
});
|
| 956 |
+
});
|
| 957 |
+
|
| 958 |
+
it('should add context if ideMode is enabled and there are open files but no active file', async () => {
|
| 959 |
+
// Arrange
|
| 960 |
+
vi.mocked(ideContext.getIdeContext).mockReturnValue({
|
| 961 |
+
workspaceState: {
|
| 962 |
+
openFiles: [
|
| 963 |
+
{
|
| 964 |
+
path: '/path/to/recent/file1.ts',
|
| 965 |
+
timestamp: Date.now(),
|
| 966 |
+
},
|
| 967 |
+
{
|
| 968 |
+
path: '/path/to/recent/file2.ts',
|
| 969 |
+
timestamp: Date.now(),
|
| 970 |
+
},
|
| 971 |
+
],
|
| 972 |
+
},
|
| 973 |
+
});
|
| 974 |
+
|
| 975 |
+
vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true);
|
| 976 |
+
|
| 977 |
+
const mockStream = (async function* () {
|
| 978 |
+
yield { type: 'content', value: 'Hello' };
|
| 979 |
+
})();
|
| 980 |
+
mockTurnRunFn.mockReturnValue(mockStream);
|
| 981 |
+
|
| 982 |
+
const mockChat: Partial<GeminiChat> = {
|
| 983 |
+
addHistory: vi.fn(),
|
| 984 |
+
getHistory: vi.fn().mockReturnValue([]),
|
| 985 |
+
};
|
| 986 |
+
client['chat'] = mockChat as GeminiChat;
|
| 987 |
+
|
| 988 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 989 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }),
|
| 990 |
+
generateContent: mockGenerateContentFn,
|
| 991 |
+
};
|
| 992 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 993 |
+
|
| 994 |
+
const initialRequest = [{ text: 'Hi' }];
|
| 995 |
+
|
| 996 |
+
// Act
|
| 997 |
+
const stream = client.sendMessageStream(
|
| 998 |
+
initialRequest,
|
| 999 |
+
new AbortController().signal,
|
| 1000 |
+
'prompt-id-ide',
|
| 1001 |
+
);
|
| 1002 |
+
for await (const _ of stream) {
|
| 1003 |
+
// consume stream
|
| 1004 |
+
}
|
| 1005 |
+
|
| 1006 |
+
// Assert
|
| 1007 |
+
expect(ideContext.getIdeContext).toHaveBeenCalled();
|
| 1008 |
+
const expectedContext = `
|
| 1009 |
+
Here is the user's editor context as a JSON object. This is for your information only.
|
| 1010 |
+
\`\`\`json
|
| 1011 |
+
${JSON.stringify(
|
| 1012 |
+
{
|
| 1013 |
+
otherOpenFiles: ['/path/to/recent/file1.ts', '/path/to/recent/file2.ts'],
|
| 1014 |
+
},
|
| 1015 |
+
null,
|
| 1016 |
+
2,
|
| 1017 |
+
)}
|
| 1018 |
+
\`\`\`
|
| 1019 |
+
`.trim();
|
| 1020 |
+
const expectedRequest = [{ text: expectedContext }];
|
| 1021 |
+
expect(mockChat.addHistory).toHaveBeenCalledWith({
|
| 1022 |
+
role: 'user',
|
| 1023 |
+
parts: expectedRequest,
|
| 1024 |
+
});
|
| 1025 |
+
});
|
| 1026 |
+
|
| 1027 |
+
it('should return the turn instance after the stream is complete', async () => {
|
| 1028 |
+
// Arrange
|
| 1029 |
+
const mockStream = (async function* () {
|
| 1030 |
+
yield { type: 'content', value: 'Hello' };
|
| 1031 |
+
})();
|
| 1032 |
+
mockTurnRunFn.mockReturnValue(mockStream);
|
| 1033 |
+
|
| 1034 |
+
const mockChat: Partial<GeminiChat> = {
|
| 1035 |
+
addHistory: vi.fn(),
|
| 1036 |
+
getHistory: vi.fn().mockReturnValue([]),
|
| 1037 |
+
};
|
| 1038 |
+
client['chat'] = mockChat as GeminiChat;
|
| 1039 |
+
|
| 1040 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 1041 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }),
|
| 1042 |
+
generateContent: mockGenerateContentFn,
|
| 1043 |
+
};
|
| 1044 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 1045 |
+
|
| 1046 |
+
// Act
|
| 1047 |
+
const stream = client.sendMessageStream(
|
| 1048 |
+
[{ text: 'Hi' }],
|
| 1049 |
+
new AbortController().signal,
|
| 1050 |
+
'prompt-id-1',
|
| 1051 |
+
);
|
| 1052 |
+
|
| 1053 |
+
// Consume the stream manually to get the final return value.
|
| 1054 |
+
let finalResult: Turn | undefined;
|
| 1055 |
+
while (true) {
|
| 1056 |
+
const result = await stream.next();
|
| 1057 |
+
if (result.done) {
|
| 1058 |
+
finalResult = result.value;
|
| 1059 |
+
break;
|
| 1060 |
+
}
|
| 1061 |
+
}
|
| 1062 |
+
|
| 1063 |
+
// Assert
|
| 1064 |
+
expect(finalResult).toBeInstanceOf(Turn);
|
| 1065 |
+
});
|
| 1066 |
+
|
| 1067 |
+
it('should stop infinite loop after MAX_TURNS when nextSpeaker always returns model', async () => {
|
| 1068 |
+
// Get the mocked checkNextSpeaker function and configure it to trigger infinite loop
|
| 1069 |
+
const { checkNextSpeaker } = await import(
|
| 1070 |
+
'../utils/nextSpeakerChecker.js'
|
| 1071 |
+
);
|
| 1072 |
+
const mockCheckNextSpeaker = vi.mocked(checkNextSpeaker);
|
| 1073 |
+
mockCheckNextSpeaker.mockResolvedValue({
|
| 1074 |
+
next_speaker: 'model',
|
| 1075 |
+
reasoning: 'Test case - always continue',
|
| 1076 |
+
});
|
| 1077 |
+
|
| 1078 |
+
// Mock Turn to have no pending tool calls (which would allow nextSpeaker check)
|
| 1079 |
+
const mockStream = (async function* () {
|
| 1080 |
+
yield { type: 'content', value: 'Continue...' };
|
| 1081 |
+
})();
|
| 1082 |
+
mockTurnRunFn.mockReturnValue(mockStream);
|
| 1083 |
+
|
| 1084 |
+
const mockChat: Partial<GeminiChat> = {
|
| 1085 |
+
addHistory: vi.fn(),
|
| 1086 |
+
getHistory: vi.fn().mockReturnValue([]),
|
| 1087 |
+
};
|
| 1088 |
+
client['chat'] = mockChat as GeminiChat;
|
| 1089 |
+
|
| 1090 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 1091 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }),
|
| 1092 |
+
generateContent: mockGenerateContentFn,
|
| 1093 |
+
};
|
| 1094 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 1095 |
+
|
| 1096 |
+
// Use a signal that never gets aborted
|
| 1097 |
+
const abortController = new AbortController();
|
| 1098 |
+
const signal = abortController.signal;
|
| 1099 |
+
|
| 1100 |
+
// Act - Start the stream that should loop
|
| 1101 |
+
const stream = client.sendMessageStream(
|
| 1102 |
+
[{ text: 'Start conversation' }],
|
| 1103 |
+
signal,
|
| 1104 |
+
'prompt-id-2',
|
| 1105 |
+
);
|
| 1106 |
+
|
| 1107 |
+
// Count how many stream events we get
|
| 1108 |
+
let eventCount = 0;
|
| 1109 |
+
let finalResult: Turn | undefined;
|
| 1110 |
+
|
| 1111 |
+
// Consume the stream and count iterations
|
| 1112 |
+
while (true) {
|
| 1113 |
+
const result = await stream.next();
|
| 1114 |
+
if (result.done) {
|
| 1115 |
+
finalResult = result.value;
|
| 1116 |
+
break;
|
| 1117 |
+
}
|
| 1118 |
+
eventCount++;
|
| 1119 |
+
|
| 1120 |
+
// Safety check to prevent actual infinite loop in test
|
| 1121 |
+
if (eventCount > 200) {
|
| 1122 |
+
abortController.abort();
|
| 1123 |
+
throw new Error(
|
| 1124 |
+
'Test exceeded expected event limit - possible actual infinite loop',
|
| 1125 |
+
);
|
| 1126 |
+
}
|
| 1127 |
+
}
|
| 1128 |
+
|
| 1129 |
+
// Assert
|
| 1130 |
+
expect(finalResult).toBeInstanceOf(Turn);
|
| 1131 |
+
|
| 1132 |
+
// Debug: Check how many times checkNextSpeaker was called
|
| 1133 |
+
const callCount = mockCheckNextSpeaker.mock.calls.length;
|
| 1134 |
+
|
| 1135 |
+
// If infinite loop protection is working, checkNextSpeaker should be called many times
|
| 1136 |
+
// but stop at MAX_TURNS (100). Since each recursive call should trigger checkNextSpeaker,
|
| 1137 |
+
// we expect it to be called multiple times before hitting the limit
|
| 1138 |
+
expect(mockCheckNextSpeaker).toHaveBeenCalled();
|
| 1139 |
+
|
| 1140 |
+
// The test should demonstrate that the infinite loop protection works:
|
| 1141 |
+
// - If checkNextSpeaker is called many times (close to MAX_TURNS), it shows the loop was happening
|
| 1142 |
+
// - If it's only called once, the recursive behavior might not be triggered
|
| 1143 |
+
if (callCount === 0) {
|
| 1144 |
+
throw new Error(
|
| 1145 |
+
'checkNextSpeaker was never called - the recursive condition was not met',
|
| 1146 |
+
);
|
| 1147 |
+
} else if (callCount === 1) {
|
| 1148 |
+
// This might be expected behavior if the turn has pending tool calls or other conditions prevent recursion
|
| 1149 |
+
console.log(
|
| 1150 |
+
'checkNextSpeaker called only once - no infinite loop occurred',
|
| 1151 |
+
);
|
| 1152 |
+
} else {
|
| 1153 |
+
console.log(
|
| 1154 |
+
`checkNextSpeaker called ${callCount} times - infinite loop protection worked`,
|
| 1155 |
+
);
|
| 1156 |
+
// If called multiple times, we expect it to be stopped before MAX_TURNS
|
| 1157 |
+
expect(callCount).toBeLessThanOrEqual(100); // Should not exceed MAX_TURNS
|
| 1158 |
+
}
|
| 1159 |
+
|
| 1160 |
+
// The stream should produce events and eventually terminate
|
| 1161 |
+
expect(eventCount).toBeGreaterThanOrEqual(1);
|
| 1162 |
+
expect(eventCount).toBeLessThan(200); // Should not exceed our safety limit
|
| 1163 |
+
});
|
| 1164 |
+
|
| 1165 |
+
it('should yield MaxSessionTurns and stop when session turn limit is reached', async () => {
|
| 1166 |
+
// Arrange
|
| 1167 |
+
const MAX_SESSION_TURNS = 5;
|
| 1168 |
+
vi.spyOn(client['config'], 'getMaxSessionTurns').mockReturnValue(
|
| 1169 |
+
MAX_SESSION_TURNS,
|
| 1170 |
+
);
|
| 1171 |
+
|
| 1172 |
+
const mockStream = (async function* () {
|
| 1173 |
+
yield { type: 'content', value: 'Hello' };
|
| 1174 |
+
})();
|
| 1175 |
+
mockTurnRunFn.mockReturnValue(mockStream);
|
| 1176 |
+
|
| 1177 |
+
const mockChat: Partial<GeminiChat> = {
|
| 1178 |
+
addHistory: vi.fn(),
|
| 1179 |
+
getHistory: vi.fn().mockReturnValue([]),
|
| 1180 |
+
};
|
| 1181 |
+
client['chat'] = mockChat as GeminiChat;
|
| 1182 |
+
|
| 1183 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 1184 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }),
|
| 1185 |
+
generateContent: mockGenerateContentFn,
|
| 1186 |
+
};
|
| 1187 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 1188 |
+
|
| 1189 |
+
// Act & Assert
|
| 1190 |
+
// Run up to the limit
|
| 1191 |
+
for (let i = 0; i < MAX_SESSION_TURNS; i++) {
|
| 1192 |
+
const stream = client.sendMessageStream(
|
| 1193 |
+
[{ text: 'Hi' }],
|
| 1194 |
+
new AbortController().signal,
|
| 1195 |
+
'prompt-id-4',
|
| 1196 |
+
);
|
| 1197 |
+
// consume stream
|
| 1198 |
+
for await (const _event of stream) {
|
| 1199 |
+
// do nothing
|
| 1200 |
+
}
|
| 1201 |
+
}
|
| 1202 |
+
|
| 1203 |
+
// This call should exceed the limit
|
| 1204 |
+
const stream = client.sendMessageStream(
|
| 1205 |
+
[{ text: 'Hi' }],
|
| 1206 |
+
new AbortController().signal,
|
| 1207 |
+
'prompt-id-5',
|
| 1208 |
+
);
|
| 1209 |
+
|
| 1210 |
+
const events = [];
|
| 1211 |
+
for await (const event of stream) {
|
| 1212 |
+
events.push(event);
|
| 1213 |
+
}
|
| 1214 |
+
|
| 1215 |
+
expect(events).toEqual([{ type: GeminiEventType.MaxSessionTurns }]);
|
| 1216 |
+
expect(mockTurnRunFn).toHaveBeenCalledTimes(MAX_SESSION_TURNS);
|
| 1217 |
+
});
|
| 1218 |
+
|
| 1219 |
+
it('should respect MAX_TURNS limit even when turns parameter is set to a large value', async () => {
|
| 1220 |
+
// This test verifies that the infinite loop protection works even when
|
| 1221 |
+
// someone tries to bypass it by calling with a very large turns value
|
| 1222 |
+
|
| 1223 |
+
// Get the mocked checkNextSpeaker function and configure it to trigger infinite loop
|
| 1224 |
+
const { checkNextSpeaker } = await import(
|
| 1225 |
+
'../utils/nextSpeakerChecker.js'
|
| 1226 |
+
);
|
| 1227 |
+
const mockCheckNextSpeaker = vi.mocked(checkNextSpeaker);
|
| 1228 |
+
mockCheckNextSpeaker.mockResolvedValue({
|
| 1229 |
+
next_speaker: 'model',
|
| 1230 |
+
reasoning: 'Test case - always continue',
|
| 1231 |
+
});
|
| 1232 |
+
|
| 1233 |
+
// Mock Turn to have no pending tool calls (which would allow nextSpeaker check)
|
| 1234 |
+
const mockStream = (async function* () {
|
| 1235 |
+
yield { type: 'content', value: 'Continue...' };
|
| 1236 |
+
})();
|
| 1237 |
+
mockTurnRunFn.mockReturnValue(mockStream);
|
| 1238 |
+
|
| 1239 |
+
const mockChat: Partial<GeminiChat> = {
|
| 1240 |
+
addHistory: vi.fn(),
|
| 1241 |
+
getHistory: vi.fn().mockReturnValue([]),
|
| 1242 |
+
};
|
| 1243 |
+
client['chat'] = mockChat as GeminiChat;
|
| 1244 |
+
|
| 1245 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 1246 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }),
|
| 1247 |
+
generateContent: mockGenerateContentFn,
|
| 1248 |
+
};
|
| 1249 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 1250 |
+
|
| 1251 |
+
// Use a signal that never gets aborted
|
| 1252 |
+
const abortController = new AbortController();
|
| 1253 |
+
const signal = abortController.signal;
|
| 1254 |
+
|
| 1255 |
+
// Act - Start the stream with an extremely high turns value
|
| 1256 |
+
// This simulates a case where the turns protection is bypassed
|
| 1257 |
+
const stream = client.sendMessageStream(
|
| 1258 |
+
[{ text: 'Start conversation' }],
|
| 1259 |
+
signal,
|
| 1260 |
+
'prompt-id-3',
|
| 1261 |
+
Number.MAX_SAFE_INTEGER, // Bypass the MAX_TURNS protection
|
| 1262 |
+
);
|
| 1263 |
+
|
| 1264 |
+
// Count how many stream events we get
|
| 1265 |
+
let eventCount = 0;
|
| 1266 |
+
const maxTestIterations = 1000; // Higher limit to show the loop continues
|
| 1267 |
+
|
| 1268 |
+
// Consume the stream and count iterations
|
| 1269 |
+
try {
|
| 1270 |
+
while (true) {
|
| 1271 |
+
const result = await stream.next();
|
| 1272 |
+
if (result.done) {
|
| 1273 |
+
break;
|
| 1274 |
+
}
|
| 1275 |
+
eventCount++;
|
| 1276 |
+
|
| 1277 |
+
// This test should hit this limit, demonstrating the infinite loop
|
| 1278 |
+
if (eventCount > maxTestIterations) {
|
| 1279 |
+
abortController.abort();
|
| 1280 |
+
// This is the expected behavior - we hit the infinite loop
|
| 1281 |
+
break;
|
| 1282 |
+
}
|
| 1283 |
+
}
|
| 1284 |
+
} catch (error) {
|
| 1285 |
+
// If the test framework times out, that also demonstrates the infinite loop
|
| 1286 |
+
console.error('Test timed out or errored:', error);
|
| 1287 |
+
}
|
| 1288 |
+
|
| 1289 |
+
// Assert that the fix works - the loop should stop at MAX_TURNS
|
| 1290 |
+
const callCount = mockCheckNextSpeaker.mock.calls.length;
|
| 1291 |
+
|
| 1292 |
+
// With the fix: even when turns is set to a very high value,
|
| 1293 |
+
// the loop should stop at MAX_TURNS (100)
|
| 1294 |
+
expect(callCount).toBeLessThanOrEqual(100); // Should not exceed MAX_TURNS
|
| 1295 |
+
expect(eventCount).toBeLessThanOrEqual(200); // Should have reasonable number of events
|
| 1296 |
+
|
| 1297 |
+
console.log(
|
| 1298 |
+
`Infinite loop protection working: checkNextSpeaker called ${callCount} times, ` +
|
| 1299 |
+
`${eventCount} events generated (properly bounded by MAX_TURNS)`,
|
| 1300 |
+
);
|
| 1301 |
+
});
|
| 1302 |
+
|
| 1303 |
+
describe('Editor context delta', () => {
|
| 1304 |
+
const mockStream = (async function* () {
|
| 1305 |
+
yield { type: 'content', value: 'Hello' };
|
| 1306 |
+
})();
|
| 1307 |
+
|
| 1308 |
+
beforeEach(() => {
|
| 1309 |
+
client['forceFullIdeContext'] = false; // Reset before each delta test
|
| 1310 |
+
vi.spyOn(client, 'tryCompressChat').mockResolvedValue(null);
|
| 1311 |
+
vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true);
|
| 1312 |
+
mockTurnRunFn.mockReturnValue(mockStream);
|
| 1313 |
+
|
| 1314 |
+
const mockChat: Partial<GeminiChat> = {
|
| 1315 |
+
addHistory: vi.fn(),
|
| 1316 |
+
setHistory: vi.fn(),
|
| 1317 |
+
sendMessage: vi.fn().mockResolvedValue({ text: 'summary' }),
|
| 1318 |
+
// Assume history is not empty for delta checks
|
| 1319 |
+
getHistory: vi
|
| 1320 |
+
.fn()
|
| 1321 |
+
.mockReturnValue([
|
| 1322 |
+
{ role: 'user', parts: [{ text: 'previous message' }] },
|
| 1323 |
+
]),
|
| 1324 |
+
};
|
| 1325 |
+
client['chat'] = mockChat as GeminiChat;
|
| 1326 |
+
|
| 1327 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 1328 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }),
|
| 1329 |
+
generateContent: mockGenerateContentFn,
|
| 1330 |
+
};
|
| 1331 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 1332 |
+
});
|
| 1333 |
+
|
| 1334 |
+
const testCases = [
|
| 1335 |
+
{
|
| 1336 |
+
description: 'sends delta when active file changes',
|
| 1337 |
+
previousActiveFile: {
|
| 1338 |
+
path: '/path/to/old/file.ts',
|
| 1339 |
+
cursor: { line: 5, character: 10 },
|
| 1340 |
+
selectedText: 'hello',
|
| 1341 |
+
},
|
| 1342 |
+
currentActiveFile: {
|
| 1343 |
+
path: '/path/to/active/file.ts',
|
| 1344 |
+
cursor: { line: 5, character: 10 },
|
| 1345 |
+
selectedText: 'hello',
|
| 1346 |
+
},
|
| 1347 |
+
shouldSendContext: true,
|
| 1348 |
+
},
|
| 1349 |
+
{
|
| 1350 |
+
description: 'sends delta when cursor line changes',
|
| 1351 |
+
previousActiveFile: {
|
| 1352 |
+
path: '/path/to/active/file.ts',
|
| 1353 |
+
cursor: { line: 1, character: 10 },
|
| 1354 |
+
selectedText: 'hello',
|
| 1355 |
+
},
|
| 1356 |
+
currentActiveFile: {
|
| 1357 |
+
path: '/path/to/active/file.ts',
|
| 1358 |
+
cursor: { line: 5, character: 10 },
|
| 1359 |
+
selectedText: 'hello',
|
| 1360 |
+
},
|
| 1361 |
+
shouldSendContext: true,
|
| 1362 |
+
},
|
| 1363 |
+
{
|
| 1364 |
+
description: 'sends delta when cursor character changes',
|
| 1365 |
+
previousActiveFile: {
|
| 1366 |
+
path: '/path/to/active/file.ts',
|
| 1367 |
+
cursor: { line: 5, character: 1 },
|
| 1368 |
+
selectedText: 'hello',
|
| 1369 |
+
},
|
| 1370 |
+
currentActiveFile: {
|
| 1371 |
+
path: '/path/to/active/file.ts',
|
| 1372 |
+
cursor: { line: 5, character: 10 },
|
| 1373 |
+
selectedText: 'hello',
|
| 1374 |
+
},
|
| 1375 |
+
shouldSendContext: true,
|
| 1376 |
+
},
|
| 1377 |
+
{
|
| 1378 |
+
description: 'sends delta when selected text changes',
|
| 1379 |
+
previousActiveFile: {
|
| 1380 |
+
path: '/path/to/active/file.ts',
|
| 1381 |
+
cursor: { line: 5, character: 10 },
|
| 1382 |
+
selectedText: 'world',
|
| 1383 |
+
},
|
| 1384 |
+
currentActiveFile: {
|
| 1385 |
+
path: '/path/to/active/file.ts',
|
| 1386 |
+
cursor: { line: 5, character: 10 },
|
| 1387 |
+
selectedText: 'hello',
|
| 1388 |
+
},
|
| 1389 |
+
shouldSendContext: true,
|
| 1390 |
+
},
|
| 1391 |
+
{
|
| 1392 |
+
description: 'sends delta when selected text is added',
|
| 1393 |
+
previousActiveFile: {
|
| 1394 |
+
path: '/path/to/active/file.ts',
|
| 1395 |
+
cursor: { line: 5, character: 10 },
|
| 1396 |
+
},
|
| 1397 |
+
currentActiveFile: {
|
| 1398 |
+
path: '/path/to/active/file.ts',
|
| 1399 |
+
cursor: { line: 5, character: 10 },
|
| 1400 |
+
selectedText: 'hello',
|
| 1401 |
+
},
|
| 1402 |
+
shouldSendContext: true,
|
| 1403 |
+
},
|
| 1404 |
+
{
|
| 1405 |
+
description: 'sends delta when selected text is removed',
|
| 1406 |
+
previousActiveFile: {
|
| 1407 |
+
path: '/path/to/active/file.ts',
|
| 1408 |
+
cursor: { line: 5, character: 10 },
|
| 1409 |
+
selectedText: 'hello',
|
| 1410 |
+
},
|
| 1411 |
+
currentActiveFile: {
|
| 1412 |
+
path: '/path/to/active/file.ts',
|
| 1413 |
+
cursor: { line: 5, character: 10 },
|
| 1414 |
+
},
|
| 1415 |
+
shouldSendContext: true,
|
| 1416 |
+
},
|
| 1417 |
+
{
|
| 1418 |
+
description: 'does not send context when nothing changes',
|
| 1419 |
+
previousActiveFile: {
|
| 1420 |
+
path: '/path/to/active/file.ts',
|
| 1421 |
+
cursor: { line: 5, character: 10 },
|
| 1422 |
+
selectedText: 'hello',
|
| 1423 |
+
},
|
| 1424 |
+
currentActiveFile: {
|
| 1425 |
+
path: '/path/to/active/file.ts',
|
| 1426 |
+
cursor: { line: 5, character: 10 },
|
| 1427 |
+
selectedText: 'hello',
|
| 1428 |
+
},
|
| 1429 |
+
shouldSendContext: false,
|
| 1430 |
+
},
|
| 1431 |
+
];
|
| 1432 |
+
|
| 1433 |
+
it.each(testCases)(
|
| 1434 |
+
'$description',
|
| 1435 |
+
async ({
|
| 1436 |
+
previousActiveFile,
|
| 1437 |
+
currentActiveFile,
|
| 1438 |
+
shouldSendContext,
|
| 1439 |
+
}) => {
|
| 1440 |
+
// Setup previous context
|
| 1441 |
+
client['lastSentIdeContext'] = {
|
| 1442 |
+
workspaceState: {
|
| 1443 |
+
openFiles: [
|
| 1444 |
+
{
|
| 1445 |
+
path: previousActiveFile.path,
|
| 1446 |
+
cursor: previousActiveFile.cursor,
|
| 1447 |
+
selectedText: previousActiveFile.selectedText,
|
| 1448 |
+
isActive: true,
|
| 1449 |
+
timestamp: Date.now() - 1000,
|
| 1450 |
+
},
|
| 1451 |
+
],
|
| 1452 |
+
},
|
| 1453 |
+
};
|
| 1454 |
+
|
| 1455 |
+
// Setup current context
|
| 1456 |
+
vi.mocked(ideContext.getIdeContext).mockReturnValue({
|
| 1457 |
+
workspaceState: {
|
| 1458 |
+
openFiles: [
|
| 1459 |
+
{ ...currentActiveFile, isActive: true, timestamp: Date.now() },
|
| 1460 |
+
],
|
| 1461 |
+
},
|
| 1462 |
+
});
|
| 1463 |
+
|
| 1464 |
+
const stream = client.sendMessageStream(
|
| 1465 |
+
[{ text: 'Hi' }],
|
| 1466 |
+
new AbortController().signal,
|
| 1467 |
+
'prompt-id-delta',
|
| 1468 |
+
);
|
| 1469 |
+
for await (const _ of stream) {
|
| 1470 |
+
// consume stream
|
| 1471 |
+
}
|
| 1472 |
+
|
| 1473 |
+
const mockChat = client['chat'] as unknown as {
|
| 1474 |
+
addHistory: (typeof vi)['fn'];
|
| 1475 |
+
};
|
| 1476 |
+
|
| 1477 |
+
if (shouldSendContext) {
|
| 1478 |
+
expect(mockChat.addHistory).toHaveBeenCalledWith(
|
| 1479 |
+
expect.objectContaining({
|
| 1480 |
+
parts: expect.arrayContaining([
|
| 1481 |
+
expect.objectContaining({
|
| 1482 |
+
text: expect.stringContaining(
|
| 1483 |
+
"Here is a summary of changes in the user's editor context",
|
| 1484 |
+
),
|
| 1485 |
+
}),
|
| 1486 |
+
]),
|
| 1487 |
+
}),
|
| 1488 |
+
);
|
| 1489 |
+
} else {
|
| 1490 |
+
expect(mockChat.addHistory).not.toHaveBeenCalled();
|
| 1491 |
+
}
|
| 1492 |
+
},
|
| 1493 |
+
);
|
| 1494 |
+
|
| 1495 |
+
it('sends full context when history is cleared, even if editor state is unchanged', async () => {
|
| 1496 |
+
const activeFile = {
|
| 1497 |
+
path: '/path/to/active/file.ts',
|
| 1498 |
+
cursor: { line: 5, character: 10 },
|
| 1499 |
+
selectedText: 'hello',
|
| 1500 |
+
};
|
| 1501 |
+
|
| 1502 |
+
// Setup previous context
|
| 1503 |
+
client['lastSentIdeContext'] = {
|
| 1504 |
+
workspaceState: {
|
| 1505 |
+
openFiles: [
|
| 1506 |
+
{
|
| 1507 |
+
path: activeFile.path,
|
| 1508 |
+
cursor: activeFile.cursor,
|
| 1509 |
+
selectedText: activeFile.selectedText,
|
| 1510 |
+
isActive: true,
|
| 1511 |
+
timestamp: Date.now() - 1000,
|
| 1512 |
+
},
|
| 1513 |
+
],
|
| 1514 |
+
},
|
| 1515 |
+
};
|
| 1516 |
+
|
| 1517 |
+
// Setup current context (same as previous)
|
| 1518 |
+
vi.mocked(ideContext.getIdeContext).mockReturnValue({
|
| 1519 |
+
workspaceState: {
|
| 1520 |
+
openFiles: [
|
| 1521 |
+
{ ...activeFile, isActive: true, timestamp: Date.now() },
|
| 1522 |
+
],
|
| 1523 |
+
},
|
| 1524 |
+
});
|
| 1525 |
+
|
| 1526 |
+
// Make history empty
|
| 1527 |
+
const mockChat = client['chat'] as unknown as {
|
| 1528 |
+
getHistory: ReturnType<(typeof vi)['fn']>;
|
| 1529 |
+
addHistory: ReturnType<(typeof vi)['fn']>;
|
| 1530 |
+
};
|
| 1531 |
+
mockChat.getHistory.mockReturnValue([]);
|
| 1532 |
+
|
| 1533 |
+
const stream = client.sendMessageStream(
|
| 1534 |
+
[{ text: 'Hi' }],
|
| 1535 |
+
new AbortController().signal,
|
| 1536 |
+
'prompt-id-history-cleared',
|
| 1537 |
+
);
|
| 1538 |
+
for await (const _ of stream) {
|
| 1539 |
+
// consume stream
|
| 1540 |
+
}
|
| 1541 |
+
|
| 1542 |
+
expect(mockChat.addHistory).toHaveBeenCalledWith(
|
| 1543 |
+
expect.objectContaining({
|
| 1544 |
+
parts: expect.arrayContaining([
|
| 1545 |
+
expect.objectContaining({
|
| 1546 |
+
text: expect.stringContaining(
|
| 1547 |
+
"Here is the user's editor context",
|
| 1548 |
+
),
|
| 1549 |
+
}),
|
| 1550 |
+
]),
|
| 1551 |
+
}),
|
| 1552 |
+
);
|
| 1553 |
+
|
| 1554 |
+
// Also verify it's the full context, not a delta.
|
| 1555 |
+
const call = mockChat.addHistory.mock.calls[0][0];
|
| 1556 |
+
const contextText = call.parts[0].text;
|
| 1557 |
+
const contextJson = JSON.parse(
|
| 1558 |
+
contextText.match(/```json\n(.*)\n```/s)![1],
|
| 1559 |
+
);
|
| 1560 |
+
expect(contextJson).toHaveProperty('activeFile');
|
| 1561 |
+
expect(contextJson.activeFile.path).toBe('/path/to/active/file.ts');
|
| 1562 |
+
});
|
| 1563 |
+
});
|
| 1564 |
+
|
| 1565 |
+
describe('IDE context with pending tool calls', () => {
|
| 1566 |
+
let mockChat: Partial<GeminiChat>;
|
| 1567 |
+
|
| 1568 |
+
beforeEach(() => {
|
| 1569 |
+
vi.spyOn(client, 'tryCompressChat').mockResolvedValue(null);
|
| 1570 |
+
|
| 1571 |
+
const mockStream = (async function* () {
|
| 1572 |
+
yield { type: 'content', value: 'response' };
|
| 1573 |
+
})();
|
| 1574 |
+
mockTurnRunFn.mockReturnValue(mockStream);
|
| 1575 |
+
|
| 1576 |
+
mockChat = {
|
| 1577 |
+
addHistory: vi.fn(),
|
| 1578 |
+
getHistory: vi.fn().mockReturnValue([]), // Default empty history
|
| 1579 |
+
setHistory: vi.fn(),
|
| 1580 |
+
sendMessage: vi.fn().mockResolvedValue({ text: 'summary' }),
|
| 1581 |
+
};
|
| 1582 |
+
client['chat'] = mockChat as GeminiChat;
|
| 1583 |
+
|
| 1584 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 1585 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }),
|
| 1586 |
+
};
|
| 1587 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 1588 |
+
|
| 1589 |
+
vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true);
|
| 1590 |
+
vi.mocked(ideContext.getIdeContext).mockReturnValue({
|
| 1591 |
+
workspaceState: {
|
| 1592 |
+
openFiles: [{ path: '/path/to/file.ts', timestamp: Date.now() }],
|
| 1593 |
+
},
|
| 1594 |
+
});
|
| 1595 |
+
});
|
| 1596 |
+
|
| 1597 |
+
it('should NOT add IDE context when a tool call is pending', async () => {
|
| 1598 |
+
// Arrange: History ends with a functionCall from the model
|
| 1599 |
+
const historyWithPendingCall: Content[] = [
|
| 1600 |
+
{ role: 'user', parts: [{ text: 'Please use a tool.' }] },
|
| 1601 |
+
{
|
| 1602 |
+
role: 'model',
|
| 1603 |
+
parts: [{ functionCall: { name: 'some_tool', args: {} } }],
|
| 1604 |
+
},
|
| 1605 |
+
];
|
| 1606 |
+
vi.mocked(mockChat.getHistory!).mockReturnValue(historyWithPendingCall);
|
| 1607 |
+
|
| 1608 |
+
// Act: Simulate sending the tool's response back
|
| 1609 |
+
const stream = client.sendMessageStream(
|
| 1610 |
+
[
|
| 1611 |
+
{
|
| 1612 |
+
functionResponse: {
|
| 1613 |
+
name: 'some_tool',
|
| 1614 |
+
response: { success: true },
|
| 1615 |
+
},
|
| 1616 |
+
},
|
| 1617 |
+
],
|
| 1618 |
+
new AbortController().signal,
|
| 1619 |
+
'prompt-id-tool-response',
|
| 1620 |
+
);
|
| 1621 |
+
for await (const _ of stream) {
|
| 1622 |
+
// consume stream to complete the call
|
| 1623 |
+
}
|
| 1624 |
+
|
| 1625 |
+
// Assert: The IDE context message should NOT have been added to the history.
|
| 1626 |
+
expect(mockChat.addHistory).not.toHaveBeenCalledWith(
|
| 1627 |
+
expect.objectContaining({
|
| 1628 |
+
parts: expect.arrayContaining([
|
| 1629 |
+
expect.objectContaining({
|
| 1630 |
+
text: expect.stringContaining("user's editor context"),
|
| 1631 |
+
}),
|
| 1632 |
+
]),
|
| 1633 |
+
}),
|
| 1634 |
+
);
|
| 1635 |
+
});
|
| 1636 |
+
|
| 1637 |
+
it('should add IDE context when no tool call is pending', async () => {
|
| 1638 |
+
// Arrange: History is normal, no pending calls
|
| 1639 |
+
const normalHistory: Content[] = [
|
| 1640 |
+
{ role: 'user', parts: [{ text: 'A normal message.' }] },
|
| 1641 |
+
{ role: 'model', parts: [{ text: 'A normal response.' }] },
|
| 1642 |
+
];
|
| 1643 |
+
vi.mocked(mockChat.getHistory!).mockReturnValue(normalHistory);
|
| 1644 |
+
|
| 1645 |
+
// Act
|
| 1646 |
+
const stream = client.sendMessageStream(
|
| 1647 |
+
[{ text: 'Another normal message' }],
|
| 1648 |
+
new AbortController().signal,
|
| 1649 |
+
'prompt-id-normal',
|
| 1650 |
+
);
|
| 1651 |
+
for await (const _ of stream) {
|
| 1652 |
+
// consume stream
|
| 1653 |
+
}
|
| 1654 |
+
|
| 1655 |
+
// Assert: The IDE context message SHOULD have been added.
|
| 1656 |
+
expect(mockChat.addHistory).toHaveBeenCalledWith(
|
| 1657 |
+
expect.objectContaining({
|
| 1658 |
+
role: 'user',
|
| 1659 |
+
parts: expect.arrayContaining([
|
| 1660 |
+
expect.objectContaining({
|
| 1661 |
+
text: expect.stringContaining("user's editor context"),
|
| 1662 |
+
}),
|
| 1663 |
+
]),
|
| 1664 |
+
}),
|
| 1665 |
+
);
|
| 1666 |
+
});
|
| 1667 |
+
|
| 1668 |
+
it('should send the latest IDE context on the next message after a skipped context', async () => {
|
| 1669 |
+
// --- Step 1: A tool call is pending, context should be skipped ---
|
| 1670 |
+
|
| 1671 |
+
// Arrange: History ends with a functionCall
|
| 1672 |
+
const historyWithPendingCall: Content[] = [
|
| 1673 |
+
{ role: 'user', parts: [{ text: 'Please use a tool.' }] },
|
| 1674 |
+
{
|
| 1675 |
+
role: 'model',
|
| 1676 |
+
parts: [{ functionCall: { name: 'some_tool', args: {} } }],
|
| 1677 |
+
},
|
| 1678 |
+
];
|
| 1679 |
+
vi.mocked(mockChat.getHistory!).mockReturnValue(historyWithPendingCall);
|
| 1680 |
+
|
| 1681 |
+
// Arrange: Set the initial IDE context
|
| 1682 |
+
const initialIdeContext = {
|
| 1683 |
+
workspaceState: {
|
| 1684 |
+
openFiles: [{ path: '/path/to/fileA.ts', timestamp: Date.now() }],
|
| 1685 |
+
},
|
| 1686 |
+
};
|
| 1687 |
+
vi.mocked(ideContext.getIdeContext).mockReturnValue(initialIdeContext);
|
| 1688 |
+
|
| 1689 |
+
// Act: Send the tool response
|
| 1690 |
+
let stream = client.sendMessageStream(
|
| 1691 |
+
[
|
| 1692 |
+
{
|
| 1693 |
+
functionResponse: {
|
| 1694 |
+
name: 'some_tool',
|
| 1695 |
+
response: { success: true },
|
| 1696 |
+
},
|
| 1697 |
+
},
|
| 1698 |
+
],
|
| 1699 |
+
new AbortController().signal,
|
| 1700 |
+
'prompt-id-tool-response',
|
| 1701 |
+
);
|
| 1702 |
+
for await (const _ of stream) {
|
| 1703 |
+
/* consume */
|
| 1704 |
+
}
|
| 1705 |
+
|
| 1706 |
+
// Assert: The initial context was NOT sent
|
| 1707 |
+
expect(mockChat.addHistory).not.toHaveBeenCalledWith(
|
| 1708 |
+
expect.objectContaining({
|
| 1709 |
+
parts: expect.arrayContaining([
|
| 1710 |
+
expect.objectContaining({
|
| 1711 |
+
text: expect.stringContaining("user's editor context"),
|
| 1712 |
+
}),
|
| 1713 |
+
]),
|
| 1714 |
+
}),
|
| 1715 |
+
);
|
| 1716 |
+
|
| 1717 |
+
// --- Step 2: A new message is sent, latest context should be included ---
|
| 1718 |
+
|
| 1719 |
+
// Arrange: The model has responded to the tool, and the user is sending a new message.
|
| 1720 |
+
const historyAfterToolResponse: Content[] = [
|
| 1721 |
+
...historyWithPendingCall,
|
| 1722 |
+
{
|
| 1723 |
+
role: 'user',
|
| 1724 |
+
parts: [
|
| 1725 |
+
{
|
| 1726 |
+
functionResponse: {
|
| 1727 |
+
name: 'some_tool',
|
| 1728 |
+
response: { success: true },
|
| 1729 |
+
},
|
| 1730 |
+
},
|
| 1731 |
+
],
|
| 1732 |
+
},
|
| 1733 |
+
{ role: 'model', parts: [{ text: 'The tool ran successfully.' }] },
|
| 1734 |
+
];
|
| 1735 |
+
vi.mocked(mockChat.getHistory!).mockReturnValue(
|
| 1736 |
+
historyAfterToolResponse,
|
| 1737 |
+
);
|
| 1738 |
+
vi.mocked(mockChat.addHistory!).mockClear(); // Clear previous calls for the next assertion
|
| 1739 |
+
|
| 1740 |
+
// Arrange: The IDE context has now changed
|
| 1741 |
+
const newIdeContext = {
|
| 1742 |
+
workspaceState: {
|
| 1743 |
+
openFiles: [{ path: '/path/to/fileB.ts', timestamp: Date.now() }],
|
| 1744 |
+
},
|
| 1745 |
+
};
|
| 1746 |
+
vi.mocked(ideContext.getIdeContext).mockReturnValue(newIdeContext);
|
| 1747 |
+
|
| 1748 |
+
// Act: Send a new, regular user message
|
| 1749 |
+
stream = client.sendMessageStream(
|
| 1750 |
+
[{ text: 'Thanks!' }],
|
| 1751 |
+
new AbortController().signal,
|
| 1752 |
+
'prompt-id-final',
|
| 1753 |
+
);
|
| 1754 |
+
for await (const _ of stream) {
|
| 1755 |
+
/* consume */
|
| 1756 |
+
}
|
| 1757 |
+
|
| 1758 |
+
// Assert: The NEW context was sent as a FULL context because there was no previously sent context.
|
| 1759 |
+
const addHistoryCalls = vi.mocked(mockChat.addHistory!).mock.calls;
|
| 1760 |
+
const contextCall = addHistoryCalls.find((call) =>
|
| 1761 |
+
JSON.stringify(call[0]).includes("user's editor context"),
|
| 1762 |
+
);
|
| 1763 |
+
expect(contextCall).toBeDefined();
|
| 1764 |
+
expect(JSON.stringify(contextCall![0])).toContain(
|
| 1765 |
+
"Here is the user's editor context as a JSON object",
|
| 1766 |
+
);
|
| 1767 |
+
// Check that the sent context is the new one (fileB.ts)
|
| 1768 |
+
expect(JSON.stringify(contextCall![0])).toContain('fileB.ts');
|
| 1769 |
+
// Check that the sent context is NOT the old one (fileA.ts)
|
| 1770 |
+
expect(JSON.stringify(contextCall![0])).not.toContain('fileA.ts');
|
| 1771 |
+
});
|
| 1772 |
+
|
| 1773 |
+
it('should send a context DELTA on the next message after a skipped context', async () => {
|
| 1774 |
+
// --- Step 0: Establish an initial context ---
|
| 1775 |
+
vi.mocked(mockChat.getHistory!).mockReturnValue([]); // Start with empty history
|
| 1776 |
+
const contextA = {
|
| 1777 |
+
workspaceState: {
|
| 1778 |
+
openFiles: [
|
| 1779 |
+
{
|
| 1780 |
+
path: '/path/to/fileA.ts',
|
| 1781 |
+
isActive: true,
|
| 1782 |
+
timestamp: Date.now(),
|
| 1783 |
+
},
|
| 1784 |
+
],
|
| 1785 |
+
},
|
| 1786 |
+
};
|
| 1787 |
+
vi.mocked(ideContext.getIdeContext).mockReturnValue(contextA);
|
| 1788 |
+
|
| 1789 |
+
// Act: Send a regular message to establish the initial context
|
| 1790 |
+
let stream = client.sendMessageStream(
|
| 1791 |
+
[{ text: 'Initial message' }],
|
| 1792 |
+
new AbortController().signal,
|
| 1793 |
+
'prompt-id-initial',
|
| 1794 |
+
);
|
| 1795 |
+
for await (const _ of stream) {
|
| 1796 |
+
/* consume */
|
| 1797 |
+
}
|
| 1798 |
+
|
| 1799 |
+
// Assert: Full context for fileA.ts was sent and stored.
|
| 1800 |
+
const initialCall = vi.mocked(mockChat.addHistory!).mock.calls[0][0];
|
| 1801 |
+
expect(JSON.stringify(initialCall)).toContain(
|
| 1802 |
+
"user's editor context as a JSON object",
|
| 1803 |
+
);
|
| 1804 |
+
expect(JSON.stringify(initialCall)).toContain('fileA.ts');
|
| 1805 |
+
// This implicitly tests that `lastSentIdeContext` is now set internally by the client.
|
| 1806 |
+
vi.mocked(mockChat.addHistory!).mockClear();
|
| 1807 |
+
|
| 1808 |
+
// --- Step 1: A tool call is pending, context should be skipped ---
|
| 1809 |
+
const historyWithPendingCall: Content[] = [
|
| 1810 |
+
{ role: 'user', parts: [{ text: 'Please use a tool.' }] },
|
| 1811 |
+
{
|
| 1812 |
+
role: 'model',
|
| 1813 |
+
parts: [{ functionCall: { name: 'some_tool', args: {} } }],
|
| 1814 |
+
},
|
| 1815 |
+
];
|
| 1816 |
+
vi.mocked(mockChat.getHistory!).mockReturnValue(historyWithPendingCall);
|
| 1817 |
+
|
| 1818 |
+
// Arrange: IDE context changes, but this should be skipped
|
| 1819 |
+
const contextB = {
|
| 1820 |
+
workspaceState: {
|
| 1821 |
+
openFiles: [
|
| 1822 |
+
{
|
| 1823 |
+
path: '/path/to/fileB.ts',
|
| 1824 |
+
isActive: true,
|
| 1825 |
+
timestamp: Date.now(),
|
| 1826 |
+
},
|
| 1827 |
+
],
|
| 1828 |
+
},
|
| 1829 |
+
};
|
| 1830 |
+
vi.mocked(ideContext.getIdeContext).mockReturnValue(contextB);
|
| 1831 |
+
|
| 1832 |
+
// Act: Send the tool response
|
| 1833 |
+
stream = client.sendMessageStream(
|
| 1834 |
+
[
|
| 1835 |
+
{
|
| 1836 |
+
functionResponse: {
|
| 1837 |
+
name: 'some_tool',
|
| 1838 |
+
response: { success: true },
|
| 1839 |
+
},
|
| 1840 |
+
},
|
| 1841 |
+
],
|
| 1842 |
+
new AbortController().signal,
|
| 1843 |
+
'prompt-id-tool-response',
|
| 1844 |
+
);
|
| 1845 |
+
for await (const _ of stream) {
|
| 1846 |
+
/* consume */
|
| 1847 |
+
}
|
| 1848 |
+
|
| 1849 |
+
// Assert: No context was sent
|
| 1850 |
+
expect(mockChat.addHistory).not.toHaveBeenCalled();
|
| 1851 |
+
|
| 1852 |
+
// --- Step 2: A new message is sent, latest context DELTA should be included ---
|
| 1853 |
+
const historyAfterToolResponse: Content[] = [
|
| 1854 |
+
...historyWithPendingCall,
|
| 1855 |
+
{
|
| 1856 |
+
role: 'user',
|
| 1857 |
+
parts: [
|
| 1858 |
+
{
|
| 1859 |
+
functionResponse: {
|
| 1860 |
+
name: 'some_tool',
|
| 1861 |
+
response: { success: true },
|
| 1862 |
+
},
|
| 1863 |
+
},
|
| 1864 |
+
],
|
| 1865 |
+
},
|
| 1866 |
+
{ role: 'model', parts: [{ text: 'The tool ran successfully.' }] },
|
| 1867 |
+
];
|
| 1868 |
+
vi.mocked(mockChat.getHistory!).mockReturnValue(
|
| 1869 |
+
historyAfterToolResponse,
|
| 1870 |
+
);
|
| 1871 |
+
|
| 1872 |
+
// Arrange: The IDE context has changed again
|
| 1873 |
+
const contextC = {
|
| 1874 |
+
workspaceState: {
|
| 1875 |
+
openFiles: [
|
| 1876 |
+
// fileA is now closed, fileC is open
|
| 1877 |
+
{
|
| 1878 |
+
path: '/path/to/fileC.ts',
|
| 1879 |
+
isActive: true,
|
| 1880 |
+
timestamp: Date.now(),
|
| 1881 |
+
},
|
| 1882 |
+
],
|
| 1883 |
+
},
|
| 1884 |
+
};
|
| 1885 |
+
vi.mocked(ideContext.getIdeContext).mockReturnValue(contextC);
|
| 1886 |
+
|
| 1887 |
+
// Act: Send a new, regular user message
|
| 1888 |
+
stream = client.sendMessageStream(
|
| 1889 |
+
[{ text: 'Thanks!' }],
|
| 1890 |
+
new AbortController().signal,
|
| 1891 |
+
'prompt-id-final',
|
| 1892 |
+
);
|
| 1893 |
+
for await (const _ of stream) {
|
| 1894 |
+
/* consume */
|
| 1895 |
+
}
|
| 1896 |
+
|
| 1897 |
+
// Assert: The DELTA context was sent
|
| 1898 |
+
const finalCall = vi.mocked(mockChat.addHistory!).mock.calls[0][0];
|
| 1899 |
+
expect(JSON.stringify(finalCall)).toContain('summary of changes');
|
| 1900 |
+
// The delta should reflect fileA being closed and fileC being opened.
|
| 1901 |
+
expect(JSON.stringify(finalCall)).toContain('filesClosed');
|
| 1902 |
+
expect(JSON.stringify(finalCall)).toContain('fileA.ts');
|
| 1903 |
+
expect(JSON.stringify(finalCall)).toContain('activeFileChanged');
|
| 1904 |
+
expect(JSON.stringify(finalCall)).toContain('fileC.ts');
|
| 1905 |
+
});
|
| 1906 |
+
});
|
| 1907 |
+
|
| 1908 |
+
it('should not call checkNextSpeaker when turn.run() yields an error', async () => {
|
| 1909 |
+
// Arrange
|
| 1910 |
+
const { checkNextSpeaker } = await import(
|
| 1911 |
+
'../utils/nextSpeakerChecker.js'
|
| 1912 |
+
);
|
| 1913 |
+
const mockCheckNextSpeaker = vi.mocked(checkNextSpeaker);
|
| 1914 |
+
|
| 1915 |
+
const mockStream = (async function* () {
|
| 1916 |
+
yield {
|
| 1917 |
+
type: GeminiEventType.Error,
|
| 1918 |
+
value: { error: { message: 'test error' } },
|
| 1919 |
+
};
|
| 1920 |
+
})();
|
| 1921 |
+
mockTurnRunFn.mockReturnValue(mockStream);
|
| 1922 |
+
|
| 1923 |
+
const mockChat: Partial<GeminiChat> = {
|
| 1924 |
+
addHistory: vi.fn(),
|
| 1925 |
+
getHistory: vi.fn().mockReturnValue([]),
|
| 1926 |
+
};
|
| 1927 |
+
client['chat'] = mockChat as GeminiChat;
|
| 1928 |
+
|
| 1929 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 1930 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }),
|
| 1931 |
+
generateContent: mockGenerateContentFn,
|
| 1932 |
+
};
|
| 1933 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 1934 |
+
|
| 1935 |
+
// Act
|
| 1936 |
+
const stream = client.sendMessageStream(
|
| 1937 |
+
[{ text: 'Hi' }],
|
| 1938 |
+
new AbortController().signal,
|
| 1939 |
+
'prompt-id-error',
|
| 1940 |
+
);
|
| 1941 |
+
for await (const _ of stream) {
|
| 1942 |
+
// consume stream
|
| 1943 |
+
}
|
| 1944 |
+
|
| 1945 |
+
// Assert
|
| 1946 |
+
expect(mockCheckNextSpeaker).not.toHaveBeenCalled();
|
| 1947 |
+
});
|
| 1948 |
+
|
| 1949 |
+
it('should not call checkNextSpeaker when turn.run() yields a value then an error', async () => {
|
| 1950 |
+
// Arrange
|
| 1951 |
+
const { checkNextSpeaker } = await import(
|
| 1952 |
+
'../utils/nextSpeakerChecker.js'
|
| 1953 |
+
);
|
| 1954 |
+
const mockCheckNextSpeaker = vi.mocked(checkNextSpeaker);
|
| 1955 |
+
|
| 1956 |
+
const mockStream = (async function* () {
|
| 1957 |
+
yield { type: GeminiEventType.Content, value: 'some content' };
|
| 1958 |
+
yield {
|
| 1959 |
+
type: GeminiEventType.Error,
|
| 1960 |
+
value: { error: { message: 'test error' } },
|
| 1961 |
+
};
|
| 1962 |
+
})();
|
| 1963 |
+
mockTurnRunFn.mockReturnValue(mockStream);
|
| 1964 |
+
|
| 1965 |
+
const mockChat: Partial<GeminiChat> = {
|
| 1966 |
+
addHistory: vi.fn(),
|
| 1967 |
+
getHistory: vi.fn().mockReturnValue([]),
|
| 1968 |
+
};
|
| 1969 |
+
client['chat'] = mockChat as GeminiChat;
|
| 1970 |
+
|
| 1971 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 1972 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }),
|
| 1973 |
+
generateContent: mockGenerateContentFn,
|
| 1974 |
+
};
|
| 1975 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 1976 |
+
|
| 1977 |
+
// Act
|
| 1978 |
+
const stream = client.sendMessageStream(
|
| 1979 |
+
[{ text: 'Hi' }],
|
| 1980 |
+
new AbortController().signal,
|
| 1981 |
+
'prompt-id-error',
|
| 1982 |
+
);
|
| 1983 |
+
for await (const _ of stream) {
|
| 1984 |
+
// consume stream
|
| 1985 |
+
}
|
| 1986 |
+
|
| 1987 |
+
// Assert
|
| 1988 |
+
expect(mockCheckNextSpeaker).not.toHaveBeenCalled();
|
| 1989 |
+
});
|
| 1990 |
+
});
|
| 1991 |
+
|
| 1992 |
+
describe('generateContent', () => {
|
| 1993 |
+
it('should use current model from config for content generation', async () => {
|
| 1994 |
+
const initialModel = client['config'].getModel();
|
| 1995 |
+
const contents = [{ role: 'user', parts: [{ text: 'test' }] }];
|
| 1996 |
+
const currentModel = initialModel + '-changed';
|
| 1997 |
+
|
| 1998 |
+
vi.spyOn(client['config'], 'getModel').mockReturnValueOnce(currentModel);
|
| 1999 |
+
|
| 2000 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 2001 |
+
countTokens: vi.fn().mockResolvedValue({ totalTokens: 1 }),
|
| 2002 |
+
generateContent: mockGenerateContentFn,
|
| 2003 |
+
};
|
| 2004 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 2005 |
+
|
| 2006 |
+
await client.generateContent(contents, {}, new AbortController().signal);
|
| 2007 |
+
|
| 2008 |
+
expect(mockGenerateContentFn).not.toHaveBeenCalledWith({
|
| 2009 |
+
model: initialModel,
|
| 2010 |
+
config: expect.any(Object),
|
| 2011 |
+
contents,
|
| 2012 |
+
});
|
| 2013 |
+
expect(mockGenerateContentFn).toHaveBeenCalledWith(
|
| 2014 |
+
{
|
| 2015 |
+
model: currentModel,
|
| 2016 |
+
config: expect.any(Object),
|
| 2017 |
+
contents,
|
| 2018 |
+
},
|
| 2019 |
+
'test-session-id',
|
| 2020 |
+
);
|
| 2021 |
+
});
|
| 2022 |
+
});
|
| 2023 |
+
|
| 2024 |
+
describe('tryCompressChat', () => {
|
| 2025 |
+
it('should use current model from config for token counting after sendMessage', async () => {
|
| 2026 |
+
const initialModel = client['config'].getModel();
|
| 2027 |
+
|
| 2028 |
+
const mockCountTokens = vi
|
| 2029 |
+
.fn()
|
| 2030 |
+
.mockResolvedValueOnce({ totalTokens: 100000 })
|
| 2031 |
+
.mockResolvedValueOnce({ totalTokens: 5000 });
|
| 2032 |
+
|
| 2033 |
+
const mockSendMessage = vi.fn().mockResolvedValue({ text: 'Summary' });
|
| 2034 |
+
|
| 2035 |
+
const mockChatHistory = [
|
| 2036 |
+
{ role: 'user', parts: [{ text: 'Long conversation' }] },
|
| 2037 |
+
{ role: 'model', parts: [{ text: 'Long response' }] },
|
| 2038 |
+
];
|
| 2039 |
+
|
| 2040 |
+
const mockChat: Partial<GeminiChat> = {
|
| 2041 |
+
getHistory: vi.fn().mockReturnValue(mockChatHistory),
|
| 2042 |
+
setHistory: vi.fn(),
|
| 2043 |
+
sendMessage: mockSendMessage,
|
| 2044 |
+
};
|
| 2045 |
+
|
| 2046 |
+
const mockGenerator: Partial<ContentGenerator> = {
|
| 2047 |
+
countTokens: mockCountTokens,
|
| 2048 |
+
};
|
| 2049 |
+
|
| 2050 |
+
// mock the model has been changed between calls of `countTokens`
|
| 2051 |
+
const firstCurrentModel = initialModel + '-changed-1';
|
| 2052 |
+
const secondCurrentModel = initialModel + '-changed-2';
|
| 2053 |
+
vi.spyOn(client['config'], 'getModel')
|
| 2054 |
+
.mockReturnValueOnce(firstCurrentModel)
|
| 2055 |
+
.mockReturnValueOnce(secondCurrentModel);
|
| 2056 |
+
|
| 2057 |
+
client['chat'] = mockChat as GeminiChat;
|
| 2058 |
+
client['contentGenerator'] = mockGenerator as ContentGenerator;
|
| 2059 |
+
client['startChat'] = vi.fn().mockResolvedValue(mockChat);
|
| 2060 |
+
|
| 2061 |
+
const result = await client.tryCompressChat('prompt-id-4', true);
|
| 2062 |
+
|
| 2063 |
+
expect(mockCountTokens).toHaveBeenCalledTimes(2);
|
| 2064 |
+
expect(mockCountTokens).toHaveBeenNthCalledWith(1, {
|
| 2065 |
+
model: firstCurrentModel,
|
| 2066 |
+
contents: mockChatHistory,
|
| 2067 |
+
});
|
| 2068 |
+
expect(mockCountTokens).toHaveBeenNthCalledWith(2, {
|
| 2069 |
+
model: secondCurrentModel,
|
| 2070 |
+
contents: expect.any(Array),
|
| 2071 |
+
});
|
| 2072 |
+
|
| 2073 |
+
expect(result).toEqual({
|
| 2074 |
+
originalTokenCount: 100000,
|
| 2075 |
+
newTokenCount: 5000,
|
| 2076 |
+
});
|
| 2077 |
+
});
|
| 2078 |
+
});
|
| 2079 |
+
|
| 2080 |
+
describe('handleFlashFallback', () => {
|
| 2081 |
+
it('should use current model from config when checking for fallback', async () => {
|
| 2082 |
+
const initialModel = client['config'].getModel();
|
| 2083 |
+
const fallbackModel = DEFAULT_GEMINI_FLASH_MODEL;
|
| 2084 |
+
|
| 2085 |
+
// mock config been changed
|
| 2086 |
+
const currentModel = initialModel + '-changed';
|
| 2087 |
+
const getModelSpy = vi.spyOn(client['config'], 'getModel');
|
| 2088 |
+
getModelSpy.mockReturnValue(currentModel);
|
| 2089 |
+
|
| 2090 |
+
const mockFallbackHandler = vi.fn().mockResolvedValue(true);
|
| 2091 |
+
client['config'].flashFallbackHandler = mockFallbackHandler;
|
| 2092 |
+
client['config'].setModel = vi.fn();
|
| 2093 |
+
|
| 2094 |
+
const result = await client['handleFlashFallback'](
|
| 2095 |
+
AuthType.LOGIN_WITH_GOOGLE,
|
| 2096 |
+
);
|
| 2097 |
+
|
| 2098 |
+
expect(result).toBe(fallbackModel);
|
| 2099 |
+
|
| 2100 |
+
expect(mockFallbackHandler).toHaveBeenCalledWith(
|
| 2101 |
+
currentModel,
|
| 2102 |
+
fallbackModel,
|
| 2103 |
+
undefined,
|
| 2104 |
+
);
|
| 2105 |
+
});
|
| 2106 |
+
});
|
| 2107 |
+
|
| 2108 |
+
describe('setHistory', () => {
|
| 2109 |
+
it('should strip thought signatures when stripThoughts is true', () => {
|
| 2110 |
+
const mockChat = {
|
| 2111 |
+
setHistory: vi.fn(),
|
| 2112 |
+
};
|
| 2113 |
+
client['chat'] = mockChat as unknown as GeminiChat;
|
| 2114 |
+
|
| 2115 |
+
const historyWithThoughts: Content[] = [
|
| 2116 |
+
{
|
| 2117 |
+
role: 'user',
|
| 2118 |
+
parts: [{ text: 'hello' }],
|
| 2119 |
+
},
|
| 2120 |
+
{
|
| 2121 |
+
role: 'model',
|
| 2122 |
+
parts: [
|
| 2123 |
+
{ text: 'thinking...', thoughtSignature: 'thought-123' },
|
| 2124 |
+
{
|
| 2125 |
+
functionCall: { name: 'test', args: {} },
|
| 2126 |
+
thoughtSignature: 'thought-456',
|
| 2127 |
+
},
|
| 2128 |
+
],
|
| 2129 |
+
},
|
| 2130 |
+
];
|
| 2131 |
+
|
| 2132 |
+
client.setHistory(historyWithThoughts, { stripThoughts: true });
|
| 2133 |
+
|
| 2134 |
+
const expectedHistory: Content[] = [
|
| 2135 |
+
{
|
| 2136 |
+
role: 'user',
|
| 2137 |
+
parts: [{ text: 'hello' }],
|
| 2138 |
+
},
|
| 2139 |
+
{
|
| 2140 |
+
role: 'model',
|
| 2141 |
+
parts: [
|
| 2142 |
+
{ text: 'thinking...' },
|
| 2143 |
+
{ functionCall: { name: 'test', args: {} } },
|
| 2144 |
+
],
|
| 2145 |
+
},
|
| 2146 |
+
];
|
| 2147 |
+
|
| 2148 |
+
expect(mockChat.setHistory).toHaveBeenCalledWith(expectedHistory);
|
| 2149 |
+
});
|
| 2150 |
+
|
| 2151 |
+
it('should not strip thought signatures when stripThoughts is false', () => {
|
| 2152 |
+
const mockChat = {
|
| 2153 |
+
setHistory: vi.fn(),
|
| 2154 |
+
};
|
| 2155 |
+
client['chat'] = mockChat as unknown as GeminiChat;
|
| 2156 |
+
|
| 2157 |
+
const historyWithThoughts: Content[] = [
|
| 2158 |
+
{
|
| 2159 |
+
role: 'user',
|
| 2160 |
+
parts: [{ text: 'hello' }],
|
| 2161 |
+
},
|
| 2162 |
+
{
|
| 2163 |
+
role: 'model',
|
| 2164 |
+
parts: [
|
| 2165 |
+
{ text: 'thinking...', thoughtSignature: 'thought-123' },
|
| 2166 |
+
{ text: 'ok', thoughtSignature: 'thought-456' },
|
| 2167 |
+
],
|
| 2168 |
+
},
|
| 2169 |
+
];
|
| 2170 |
+
|
| 2171 |
+
client.setHistory(historyWithThoughts, { stripThoughts: false });
|
| 2172 |
+
|
| 2173 |
+
expect(mockChat.setHistory).toHaveBeenCalledWith(historyWithThoughts);
|
| 2174 |
+
});
|
| 2175 |
+
});
|
| 2176 |
+
});
|
projects/ui/qwen-code/packages/core/src/core/client.ts
ADDED
|
@@ -0,0 +1,1001 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import {
|
| 8 |
+
EmbedContentParameters,
|
| 9 |
+
GenerateContentConfig,
|
| 10 |
+
PartListUnion,
|
| 11 |
+
Content,
|
| 12 |
+
Tool,
|
| 13 |
+
GenerateContentResponse,
|
| 14 |
+
FunctionDeclaration,
|
| 15 |
+
Schema,
|
| 16 |
+
} from '@google/genai';
|
| 17 |
+
import {
|
| 18 |
+
getDirectoryContextString,
|
| 19 |
+
getEnvironmentContext,
|
| 20 |
+
} from '../utils/environmentContext.js';
|
| 21 |
+
import {
|
| 22 |
+
Turn,
|
| 23 |
+
ServerGeminiStreamEvent,
|
| 24 |
+
GeminiEventType,
|
| 25 |
+
ChatCompressionInfo,
|
| 26 |
+
} from './turn.js';
|
| 27 |
+
import { Config } from '../config/config.js';
|
| 28 |
+
import { UserTierId } from '../code_assist/types.js';
|
| 29 |
+
import { getCoreSystemPrompt, getCompressionPrompt } from './prompts.js';
|
| 30 |
+
import { checkNextSpeaker } from '../utils/nextSpeakerChecker.js';
|
| 31 |
+
import { reportError } from '../utils/errorReporting.js';
|
| 32 |
+
import { GeminiChat } from './geminiChat.js';
|
| 33 |
+
import { retryWithBackoff } from '../utils/retry.js';
|
| 34 |
+
import { getErrorMessage } from '../utils/errors.js';
|
| 35 |
+
import { isFunctionResponse } from '../utils/messageInspectors.js';
|
| 36 |
+
import { tokenLimit } from './tokenLimits.js';
|
| 37 |
+
import {
|
| 38 |
+
AuthType,
|
| 39 |
+
ContentGenerator,
|
| 40 |
+
ContentGeneratorConfig,
|
| 41 |
+
createContentGenerator,
|
| 42 |
+
} from './contentGenerator.js';
|
| 43 |
+
import { getFunctionCalls } from '../utils/generateContentResponseUtilities.js';
|
| 44 |
+
import { ProxyAgent, setGlobalDispatcher } from 'undici';
|
| 45 |
+
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
|
| 46 |
+
import { LoopDetectionService } from '../services/loopDetectionService.js';
|
| 47 |
+
import { ideContext } from '../ide/ideContext.js';
|
| 48 |
+
import {
|
| 49 |
+
logChatCompression,
|
| 50 |
+
logNextSpeakerCheck,
|
| 51 |
+
} from '../telemetry/loggers.js';
|
| 52 |
+
import {
|
| 53 |
+
makeChatCompressionEvent,
|
| 54 |
+
NextSpeakerCheckEvent,
|
| 55 |
+
} from '../telemetry/types.js';
|
| 56 |
+
import { IdeContext, File } from '../ide/ideContext.js';
|
| 57 |
+
|
| 58 |
+
function isThinkingSupported(model: string) {
|
| 59 |
+
if (model.startsWith('gemini-2.5')) return true;
|
| 60 |
+
return false;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
/**
|
| 64 |
+
* Returns the index of the content after the fraction of the total characters in the history.
|
| 65 |
+
*
|
| 66 |
+
* Exported for testing purposes.
|
| 67 |
+
*/
|
| 68 |
+
export function findIndexAfterFraction(
|
| 69 |
+
history: Content[],
|
| 70 |
+
fraction: number,
|
| 71 |
+
): number {
|
| 72 |
+
if (fraction <= 0 || fraction >= 1) {
|
| 73 |
+
throw new Error('Fraction must be between 0 and 1');
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
const contentLengths = history.map(
|
| 77 |
+
(content) => JSON.stringify(content).length,
|
| 78 |
+
);
|
| 79 |
+
|
| 80 |
+
const totalCharacters = contentLengths.reduce(
|
| 81 |
+
(sum, length) => sum + length,
|
| 82 |
+
0,
|
| 83 |
+
);
|
| 84 |
+
const targetCharacters = totalCharacters * fraction;
|
| 85 |
+
|
| 86 |
+
let charactersSoFar = 0;
|
| 87 |
+
for (let i = 0; i < contentLengths.length; i++) {
|
| 88 |
+
charactersSoFar += contentLengths[i];
|
| 89 |
+
if (charactersSoFar >= targetCharacters) {
|
| 90 |
+
return i;
|
| 91 |
+
}
|
| 92 |
+
}
|
| 93 |
+
return contentLengths.length;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
const MAX_TURNS = 100;
|
| 97 |
+
|
| 98 |
+
/**
|
| 99 |
+
* Threshold for compression token count as a fraction of the model's token limit.
|
| 100 |
+
* If the chat history exceeds this threshold, it will be compressed.
|
| 101 |
+
*/
|
| 102 |
+
const COMPRESSION_TOKEN_THRESHOLD = 0.7;
|
| 103 |
+
|
| 104 |
+
/**
|
| 105 |
+
* The fraction of the latest chat history to keep. A value of 0.3
|
| 106 |
+
* means that only the last 30% of the chat history will be kept after compression.
|
| 107 |
+
*/
|
| 108 |
+
const COMPRESSION_PRESERVE_THRESHOLD = 0.3;
|
| 109 |
+
|
| 110 |
+
export class GeminiClient {
|
| 111 |
+
private chat?: GeminiChat;
|
| 112 |
+
private contentGenerator?: ContentGenerator;
|
| 113 |
+
private embeddingModel: string;
|
| 114 |
+
private generateContentConfig: GenerateContentConfig = {
|
| 115 |
+
temperature: 0,
|
| 116 |
+
topP: 1,
|
| 117 |
+
};
|
| 118 |
+
private sessionTurnCount = 0;
|
| 119 |
+
|
| 120 |
+
private readonly loopDetector: LoopDetectionService;
|
| 121 |
+
private lastPromptId: string;
|
| 122 |
+
private lastSentIdeContext: IdeContext | undefined;
|
| 123 |
+
private forceFullIdeContext = true;
|
| 124 |
+
|
| 125 |
+
constructor(private config: Config) {
|
| 126 |
+
if (config.getProxy()) {
|
| 127 |
+
setGlobalDispatcher(new ProxyAgent(config.getProxy() as string));
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
this.embeddingModel = config.getEmbeddingModel();
|
| 131 |
+
this.loopDetector = new LoopDetectionService(config);
|
| 132 |
+
this.lastPromptId = this.config.getSessionId();
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
async initialize(contentGeneratorConfig: ContentGeneratorConfig) {
|
| 136 |
+
this.contentGenerator = await createContentGenerator(
|
| 137 |
+
contentGeneratorConfig,
|
| 138 |
+
this.config,
|
| 139 |
+
this.config.getSessionId(),
|
| 140 |
+
);
|
| 141 |
+
this.chat = await this.startChat();
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
getContentGenerator(): ContentGenerator {
|
| 145 |
+
if (!this.contentGenerator) {
|
| 146 |
+
throw new Error('Content generator not initialized');
|
| 147 |
+
}
|
| 148 |
+
return this.contentGenerator;
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
getUserTier(): UserTierId | undefined {
|
| 152 |
+
return this.contentGenerator?.userTier;
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
async addHistory(content: Content) {
|
| 156 |
+
this.getChat().addHistory(content);
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
getChat(): GeminiChat {
|
| 160 |
+
if (!this.chat) {
|
| 161 |
+
throw new Error('Chat not initialized');
|
| 162 |
+
}
|
| 163 |
+
return this.chat;
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
isInitialized(): boolean {
|
| 167 |
+
return this.chat !== undefined && this.contentGenerator !== undefined;
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
getHistory(): Content[] {
|
| 171 |
+
return this.getChat().getHistory();
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
setHistory(
|
| 175 |
+
history: Content[],
|
| 176 |
+
{ stripThoughts = false }: { stripThoughts?: boolean } = {},
|
| 177 |
+
) {
|
| 178 |
+
const historyToSet = stripThoughts
|
| 179 |
+
? history.map((content) => {
|
| 180 |
+
const newContent = { ...content };
|
| 181 |
+
if (newContent.parts) {
|
| 182 |
+
newContent.parts = newContent.parts.map((part) => {
|
| 183 |
+
if (
|
| 184 |
+
part &&
|
| 185 |
+
typeof part === 'object' &&
|
| 186 |
+
'thoughtSignature' in part
|
| 187 |
+
) {
|
| 188 |
+
const newPart = { ...part };
|
| 189 |
+
delete (newPart as { thoughtSignature?: string })
|
| 190 |
+
.thoughtSignature;
|
| 191 |
+
return newPart;
|
| 192 |
+
}
|
| 193 |
+
return part;
|
| 194 |
+
});
|
| 195 |
+
}
|
| 196 |
+
return newContent;
|
| 197 |
+
})
|
| 198 |
+
: history;
|
| 199 |
+
this.getChat().setHistory(historyToSet);
|
| 200 |
+
this.forceFullIdeContext = true;
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
async setTools(): Promise<void> {
|
| 204 |
+
const toolRegistry = this.config.getToolRegistry();
|
| 205 |
+
const toolDeclarations = toolRegistry.getFunctionDeclarations();
|
| 206 |
+
const tools: Tool[] = [{ functionDeclarations: toolDeclarations }];
|
| 207 |
+
this.getChat().setTools(tools);
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
async resetChat(): Promise<void> {
|
| 211 |
+
this.chat = await this.startChat();
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
async addDirectoryContext(): Promise<void> {
|
| 215 |
+
if (!this.chat) {
|
| 216 |
+
return;
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
this.getChat().addHistory({
|
| 220 |
+
role: 'user',
|
| 221 |
+
parts: [{ text: await getDirectoryContextString(this.config) }],
|
| 222 |
+
});
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
async startChat(extraHistory?: Content[]): Promise<GeminiChat> {
|
| 226 |
+
this.forceFullIdeContext = true;
|
| 227 |
+
const envParts = await getEnvironmentContext(this.config);
|
| 228 |
+
const toolRegistry = this.config.getToolRegistry();
|
| 229 |
+
const toolDeclarations = toolRegistry.getFunctionDeclarations();
|
| 230 |
+
const tools: Tool[] = [{ functionDeclarations: toolDeclarations }];
|
| 231 |
+
const history: Content[] = [
|
| 232 |
+
{
|
| 233 |
+
role: 'user',
|
| 234 |
+
parts: envParts,
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
role: 'model',
|
| 238 |
+
parts: [{ text: 'Got it. Thanks for the context!' }],
|
| 239 |
+
},
|
| 240 |
+
...(extraHistory ?? []),
|
| 241 |
+
];
|
| 242 |
+
try {
|
| 243 |
+
const userMemory = this.config.getUserMemory();
|
| 244 |
+
const systemInstruction = getCoreSystemPrompt(userMemory);
|
| 245 |
+
const generateContentConfigWithThinking = isThinkingSupported(
|
| 246 |
+
this.config.getModel(),
|
| 247 |
+
)
|
| 248 |
+
? {
|
| 249 |
+
...this.generateContentConfig,
|
| 250 |
+
thinkingConfig: {
|
| 251 |
+
includeThoughts: true,
|
| 252 |
+
},
|
| 253 |
+
}
|
| 254 |
+
: this.generateContentConfig;
|
| 255 |
+
return new GeminiChat(
|
| 256 |
+
this.config,
|
| 257 |
+
this.getContentGenerator(),
|
| 258 |
+
{
|
| 259 |
+
systemInstruction,
|
| 260 |
+
...generateContentConfigWithThinking,
|
| 261 |
+
tools,
|
| 262 |
+
},
|
| 263 |
+
history,
|
| 264 |
+
);
|
| 265 |
+
} catch (error) {
|
| 266 |
+
await reportError(
|
| 267 |
+
error,
|
| 268 |
+
'Error initializing Gemini chat session.',
|
| 269 |
+
history,
|
| 270 |
+
'startChat',
|
| 271 |
+
);
|
| 272 |
+
throw new Error(`Failed to initialize chat: ${getErrorMessage(error)}`);
|
| 273 |
+
}
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
private getIdeContextParts(forceFullContext: boolean): {
|
| 277 |
+
contextParts: string[];
|
| 278 |
+
newIdeContext: IdeContext | undefined;
|
| 279 |
+
} {
|
| 280 |
+
const currentIdeContext = ideContext.getIdeContext();
|
| 281 |
+
if (!currentIdeContext) {
|
| 282 |
+
return { contextParts: [], newIdeContext: undefined };
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
if (forceFullContext || !this.lastSentIdeContext) {
|
| 286 |
+
// Send full context as JSON
|
| 287 |
+
const openFiles = currentIdeContext.workspaceState?.openFiles || [];
|
| 288 |
+
const activeFile = openFiles.find((f) => f.isActive);
|
| 289 |
+
const otherOpenFiles = openFiles
|
| 290 |
+
.filter((f) => !f.isActive)
|
| 291 |
+
.map((f) => f.path);
|
| 292 |
+
|
| 293 |
+
const contextData: Record<string, unknown> = {};
|
| 294 |
+
|
| 295 |
+
if (activeFile) {
|
| 296 |
+
contextData['activeFile'] = {
|
| 297 |
+
path: activeFile.path,
|
| 298 |
+
cursor: activeFile.cursor
|
| 299 |
+
? {
|
| 300 |
+
line: activeFile.cursor.line,
|
| 301 |
+
character: activeFile.cursor.character,
|
| 302 |
+
}
|
| 303 |
+
: undefined,
|
| 304 |
+
selectedText: activeFile.selectedText || undefined,
|
| 305 |
+
};
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
if (otherOpenFiles.length > 0) {
|
| 309 |
+
contextData['otherOpenFiles'] = otherOpenFiles;
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
if (Object.keys(contextData).length === 0) {
|
| 313 |
+
return { contextParts: [], newIdeContext: currentIdeContext };
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
const jsonString = JSON.stringify(contextData, null, 2);
|
| 317 |
+
const contextParts = [
|
| 318 |
+
"Here is the user's editor context as a JSON object. This is for your information only.",
|
| 319 |
+
'```json',
|
| 320 |
+
jsonString,
|
| 321 |
+
'```',
|
| 322 |
+
];
|
| 323 |
+
|
| 324 |
+
if (this.config.getDebugMode()) {
|
| 325 |
+
console.log(contextParts.join('\n'));
|
| 326 |
+
}
|
| 327 |
+
return {
|
| 328 |
+
contextParts,
|
| 329 |
+
newIdeContext: currentIdeContext,
|
| 330 |
+
};
|
| 331 |
+
} else {
|
| 332 |
+
// Calculate and send delta as JSON
|
| 333 |
+
const delta: Record<string, unknown> = {};
|
| 334 |
+
const changes: Record<string, unknown> = {};
|
| 335 |
+
|
| 336 |
+
const lastFiles = new Map(
|
| 337 |
+
(this.lastSentIdeContext.workspaceState?.openFiles || []).map(
|
| 338 |
+
(f: File) => [f.path, f],
|
| 339 |
+
),
|
| 340 |
+
);
|
| 341 |
+
const currentFiles = new Map(
|
| 342 |
+
(currentIdeContext.workspaceState?.openFiles || []).map((f: File) => [
|
| 343 |
+
f.path,
|
| 344 |
+
f,
|
| 345 |
+
]),
|
| 346 |
+
);
|
| 347 |
+
|
| 348 |
+
const openedFiles: string[] = [];
|
| 349 |
+
for (const [path] of currentFiles.entries()) {
|
| 350 |
+
if (!lastFiles.has(path)) {
|
| 351 |
+
openedFiles.push(path);
|
| 352 |
+
}
|
| 353 |
+
}
|
| 354 |
+
if (openedFiles.length > 0) {
|
| 355 |
+
changes['filesOpened'] = openedFiles;
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
const closedFiles: string[] = [];
|
| 359 |
+
for (const [path] of lastFiles.entries()) {
|
| 360 |
+
if (!currentFiles.has(path)) {
|
| 361 |
+
closedFiles.push(path);
|
| 362 |
+
}
|
| 363 |
+
}
|
| 364 |
+
if (closedFiles.length > 0) {
|
| 365 |
+
changes['filesClosed'] = closedFiles;
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
const lastActiveFile = (
|
| 369 |
+
this.lastSentIdeContext.workspaceState?.openFiles || []
|
| 370 |
+
).find((f: File) => f.isActive);
|
| 371 |
+
const currentActiveFile = (
|
| 372 |
+
currentIdeContext.workspaceState?.openFiles || []
|
| 373 |
+
).find((f: File) => f.isActive);
|
| 374 |
+
|
| 375 |
+
if (currentActiveFile) {
|
| 376 |
+
if (!lastActiveFile || lastActiveFile.path !== currentActiveFile.path) {
|
| 377 |
+
changes['activeFileChanged'] = {
|
| 378 |
+
path: currentActiveFile.path,
|
| 379 |
+
cursor: currentActiveFile.cursor
|
| 380 |
+
? {
|
| 381 |
+
line: currentActiveFile.cursor.line,
|
| 382 |
+
character: currentActiveFile.cursor.character,
|
| 383 |
+
}
|
| 384 |
+
: undefined,
|
| 385 |
+
selectedText: currentActiveFile.selectedText || undefined,
|
| 386 |
+
};
|
| 387 |
+
} else {
|
| 388 |
+
const lastCursor = lastActiveFile.cursor;
|
| 389 |
+
const currentCursor = currentActiveFile.cursor;
|
| 390 |
+
if (
|
| 391 |
+
currentCursor &&
|
| 392 |
+
(!lastCursor ||
|
| 393 |
+
lastCursor.line !== currentCursor.line ||
|
| 394 |
+
lastCursor.character !== currentCursor.character)
|
| 395 |
+
) {
|
| 396 |
+
changes['cursorMoved'] = {
|
| 397 |
+
path: currentActiveFile.path,
|
| 398 |
+
cursor: {
|
| 399 |
+
line: currentCursor.line,
|
| 400 |
+
character: currentCursor.character,
|
| 401 |
+
},
|
| 402 |
+
};
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
const lastSelectedText = lastActiveFile.selectedText || '';
|
| 406 |
+
const currentSelectedText = currentActiveFile.selectedText || '';
|
| 407 |
+
if (lastSelectedText !== currentSelectedText) {
|
| 408 |
+
changes['selectionChanged'] = {
|
| 409 |
+
path: currentActiveFile.path,
|
| 410 |
+
selectedText: currentSelectedText,
|
| 411 |
+
};
|
| 412 |
+
}
|
| 413 |
+
}
|
| 414 |
+
} else if (lastActiveFile) {
|
| 415 |
+
changes['activeFileChanged'] = {
|
| 416 |
+
path: null,
|
| 417 |
+
previousPath: lastActiveFile.path,
|
| 418 |
+
};
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
if (Object.keys(changes).length === 0) {
|
| 422 |
+
return { contextParts: [], newIdeContext: currentIdeContext };
|
| 423 |
+
}
|
| 424 |
+
|
| 425 |
+
delta['changes'] = changes;
|
| 426 |
+
const jsonString = JSON.stringify(delta, null, 2);
|
| 427 |
+
const contextParts = [
|
| 428 |
+
"Here is a summary of changes in the user's editor context, in JSON format. This is for your information only.",
|
| 429 |
+
'```json',
|
| 430 |
+
jsonString,
|
| 431 |
+
'```',
|
| 432 |
+
];
|
| 433 |
+
|
| 434 |
+
if (this.config.getDebugMode()) {
|
| 435 |
+
console.log(contextParts.join('\n'));
|
| 436 |
+
}
|
| 437 |
+
return {
|
| 438 |
+
contextParts,
|
| 439 |
+
newIdeContext: currentIdeContext,
|
| 440 |
+
};
|
| 441 |
+
}
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
async *sendMessageStream(
|
| 445 |
+
request: PartListUnion,
|
| 446 |
+
signal: AbortSignal,
|
| 447 |
+
prompt_id: string,
|
| 448 |
+
turns: number = MAX_TURNS,
|
| 449 |
+
originalModel?: string,
|
| 450 |
+
): AsyncGenerator<ServerGeminiStreamEvent, Turn> {
|
| 451 |
+
if (this.lastPromptId !== prompt_id) {
|
| 452 |
+
this.loopDetector.reset(prompt_id);
|
| 453 |
+
this.lastPromptId = prompt_id;
|
| 454 |
+
}
|
| 455 |
+
this.sessionTurnCount++;
|
| 456 |
+
if (
|
| 457 |
+
this.config.getMaxSessionTurns() > 0 &&
|
| 458 |
+
this.sessionTurnCount > this.config.getMaxSessionTurns()
|
| 459 |
+
) {
|
| 460 |
+
yield { type: GeminiEventType.MaxSessionTurns };
|
| 461 |
+
return new Turn(this.getChat(), prompt_id);
|
| 462 |
+
}
|
| 463 |
+
// Ensure turns never exceeds MAX_TURNS to prevent infinite loops
|
| 464 |
+
const boundedTurns = Math.min(turns, MAX_TURNS);
|
| 465 |
+
if (!boundedTurns) {
|
| 466 |
+
return new Turn(this.getChat(), prompt_id);
|
| 467 |
+
}
|
| 468 |
+
|
| 469 |
+
// Track the original model from the first call to detect model switching
|
| 470 |
+
const initialModel = originalModel || this.config.getModel();
|
| 471 |
+
|
| 472 |
+
const compressed = await this.tryCompressChat(prompt_id);
|
| 473 |
+
|
| 474 |
+
if (compressed) {
|
| 475 |
+
yield { type: GeminiEventType.ChatCompressed, value: compressed };
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
// Check session token limit after compression using accurate token counting
|
| 479 |
+
const sessionTokenLimit = this.config.getSessionTokenLimit();
|
| 480 |
+
if (sessionTokenLimit > 0) {
|
| 481 |
+
// Get all the content that would be sent in an API call
|
| 482 |
+
const currentHistory = this.getChat().getHistory(true);
|
| 483 |
+
const userMemory = this.config.getUserMemory();
|
| 484 |
+
const systemPrompt = getCoreSystemPrompt(userMemory);
|
| 485 |
+
const environment = await getEnvironmentContext(this.config);
|
| 486 |
+
|
| 487 |
+
// Create a mock request content to count total tokens
|
| 488 |
+
const mockRequestContent = [
|
| 489 |
+
{
|
| 490 |
+
role: 'system' as const,
|
| 491 |
+
parts: [{ text: systemPrompt }, ...environment],
|
| 492 |
+
},
|
| 493 |
+
...currentHistory,
|
| 494 |
+
];
|
| 495 |
+
|
| 496 |
+
// Use the improved countTokens method for accurate counting
|
| 497 |
+
const { totalTokens: totalRequestTokens } =
|
| 498 |
+
await this.getContentGenerator().countTokens({
|
| 499 |
+
model: this.config.getModel(),
|
| 500 |
+
contents: mockRequestContent,
|
| 501 |
+
});
|
| 502 |
+
|
| 503 |
+
if (
|
| 504 |
+
totalRequestTokens !== undefined &&
|
| 505 |
+
totalRequestTokens > sessionTokenLimit
|
| 506 |
+
) {
|
| 507 |
+
yield {
|
| 508 |
+
type: GeminiEventType.SessionTokenLimitExceeded,
|
| 509 |
+
value: {
|
| 510 |
+
currentTokens: totalRequestTokens,
|
| 511 |
+
limit: sessionTokenLimit,
|
| 512 |
+
message:
|
| 513 |
+
`Session token limit exceeded: ${totalRequestTokens} tokens > ${sessionTokenLimit} limit. ` +
|
| 514 |
+
'Please start a new session or increase the sessionTokenLimit in your settings.json.',
|
| 515 |
+
},
|
| 516 |
+
};
|
| 517 |
+
return new Turn(this.getChat(), prompt_id);
|
| 518 |
+
}
|
| 519 |
+
}
|
| 520 |
+
|
| 521 |
+
// Prevent context updates from being sent while a tool call is
|
| 522 |
+
// waiting for a response. The Qwen API requires that a functionResponse
|
| 523 |
+
// part from the user immediately follows a functionCall part from the model
|
| 524 |
+
// in the conversation history . The IDE context is not discarded; it will
|
| 525 |
+
// be included in the next regular message sent to the model.
|
| 526 |
+
const history = this.getHistory();
|
| 527 |
+
const lastMessage =
|
| 528 |
+
history.length > 0 ? history[history.length - 1] : undefined;
|
| 529 |
+
const hasPendingToolCall =
|
| 530 |
+
!!lastMessage &&
|
| 531 |
+
lastMessage.role === 'model' &&
|
| 532 |
+
(lastMessage.parts?.some((p) => 'functionCall' in p) || false);
|
| 533 |
+
|
| 534 |
+
if (this.config.getIdeMode() && !hasPendingToolCall) {
|
| 535 |
+
const { contextParts, newIdeContext } = this.getIdeContextParts(
|
| 536 |
+
this.forceFullIdeContext || history.length === 0,
|
| 537 |
+
);
|
| 538 |
+
if (contextParts.length > 0) {
|
| 539 |
+
this.getChat().addHistory({
|
| 540 |
+
role: 'user',
|
| 541 |
+
parts: [{ text: contextParts.join('\n') }],
|
| 542 |
+
});
|
| 543 |
+
}
|
| 544 |
+
this.lastSentIdeContext = newIdeContext;
|
| 545 |
+
this.forceFullIdeContext = false;
|
| 546 |
+
}
|
| 547 |
+
|
| 548 |
+
const turn = new Turn(this.getChat(), prompt_id);
|
| 549 |
+
|
| 550 |
+
const loopDetected = await this.loopDetector.turnStarted(signal);
|
| 551 |
+
if (loopDetected) {
|
| 552 |
+
yield { type: GeminiEventType.LoopDetected };
|
| 553 |
+
return turn;
|
| 554 |
+
}
|
| 555 |
+
|
| 556 |
+
const resultStream = turn.run(request, signal);
|
| 557 |
+
for await (const event of resultStream) {
|
| 558 |
+
if (this.loopDetector.addAndCheck(event)) {
|
| 559 |
+
yield { type: GeminiEventType.LoopDetected };
|
| 560 |
+
return turn;
|
| 561 |
+
}
|
| 562 |
+
yield event;
|
| 563 |
+
if (event.type === GeminiEventType.Error) {
|
| 564 |
+
return turn;
|
| 565 |
+
}
|
| 566 |
+
}
|
| 567 |
+
if (!turn.pendingToolCalls.length && signal && !signal.aborted) {
|
| 568 |
+
// Check if model was switched during the call (likely due to quota error)
|
| 569 |
+
const currentModel = this.config.getModel();
|
| 570 |
+
if (currentModel !== initialModel) {
|
| 571 |
+
// Model was switched (likely due to quota error fallback)
|
| 572 |
+
// Don't continue with recursive call to prevent unwanted Flash execution
|
| 573 |
+
return turn;
|
| 574 |
+
}
|
| 575 |
+
|
| 576 |
+
if (this.config.getSkipNextSpeakerCheck()) {
|
| 577 |
+
return turn;
|
| 578 |
+
}
|
| 579 |
+
|
| 580 |
+
const nextSpeakerCheck = await checkNextSpeaker(
|
| 581 |
+
this.getChat(),
|
| 582 |
+
this,
|
| 583 |
+
signal,
|
| 584 |
+
);
|
| 585 |
+
logNextSpeakerCheck(
|
| 586 |
+
this.config,
|
| 587 |
+
new NextSpeakerCheckEvent(
|
| 588 |
+
prompt_id,
|
| 589 |
+
turn.finishReason?.toString() || '',
|
| 590 |
+
nextSpeakerCheck?.next_speaker || '',
|
| 591 |
+
),
|
| 592 |
+
);
|
| 593 |
+
if (nextSpeakerCheck?.next_speaker === 'model') {
|
| 594 |
+
const nextRequest = [{ text: 'Please continue.' }];
|
| 595 |
+
// This recursive call's events will be yielded out, but the final
|
| 596 |
+
// turn object will be from the top-level call.
|
| 597 |
+
yield* this.sendMessageStream(
|
| 598 |
+
nextRequest,
|
| 599 |
+
signal,
|
| 600 |
+
prompt_id,
|
| 601 |
+
boundedTurns - 1,
|
| 602 |
+
initialModel,
|
| 603 |
+
);
|
| 604 |
+
}
|
| 605 |
+
}
|
| 606 |
+
return turn;
|
| 607 |
+
}
|
| 608 |
+
|
| 609 |
+
async generateJson(
|
| 610 |
+
contents: Content[],
|
| 611 |
+
schema: Record<string, unknown>,
|
| 612 |
+
abortSignal: AbortSignal,
|
| 613 |
+
model?: string,
|
| 614 |
+
config: GenerateContentConfig = {},
|
| 615 |
+
): Promise<Record<string, unknown>> {
|
| 616 |
+
// Use current model from config instead of hardcoded Flash model
|
| 617 |
+
const modelToUse =
|
| 618 |
+
model || this.config.getModel() || DEFAULT_GEMINI_FLASH_MODEL;
|
| 619 |
+
try {
|
| 620 |
+
const userMemory = this.config.getUserMemory();
|
| 621 |
+
const systemInstruction = getCoreSystemPrompt(userMemory);
|
| 622 |
+
const requestConfig = {
|
| 623 |
+
abortSignal,
|
| 624 |
+
...this.generateContentConfig,
|
| 625 |
+
...config,
|
| 626 |
+
};
|
| 627 |
+
|
| 628 |
+
// Convert schema to function declaration
|
| 629 |
+
const functionDeclaration: FunctionDeclaration = {
|
| 630 |
+
name: 'respond_in_schema',
|
| 631 |
+
description: 'Provide the response in provided schema',
|
| 632 |
+
parameters: schema as Schema,
|
| 633 |
+
};
|
| 634 |
+
|
| 635 |
+
const tools: Tool[] = [
|
| 636 |
+
{
|
| 637 |
+
functionDeclarations: [functionDeclaration],
|
| 638 |
+
},
|
| 639 |
+
];
|
| 640 |
+
|
| 641 |
+
const apiCall = () =>
|
| 642 |
+
this.getContentGenerator().generateContent(
|
| 643 |
+
{
|
| 644 |
+
model: modelToUse,
|
| 645 |
+
config: {
|
| 646 |
+
...requestConfig,
|
| 647 |
+
systemInstruction,
|
| 648 |
+
tools,
|
| 649 |
+
},
|
| 650 |
+
contents,
|
| 651 |
+
},
|
| 652 |
+
this.lastPromptId,
|
| 653 |
+
);
|
| 654 |
+
|
| 655 |
+
const result = await retryWithBackoff(apiCall, {
|
| 656 |
+
onPersistent429: async (authType?: string, error?: unknown) =>
|
| 657 |
+
await this.handleFlashFallback(authType, error),
|
| 658 |
+
authType: this.config.getContentGeneratorConfig()?.authType,
|
| 659 |
+
});
|
| 660 |
+
const functionCalls = getFunctionCalls(result);
|
| 661 |
+
if (functionCalls && functionCalls.length > 0) {
|
| 662 |
+
const functionCall = functionCalls.find(
|
| 663 |
+
(call) => call.name === 'respond_in_schema',
|
| 664 |
+
);
|
| 665 |
+
if (functionCall && functionCall.args) {
|
| 666 |
+
return functionCall.args as Record<string, unknown>;
|
| 667 |
+
}
|
| 668 |
+
}
|
| 669 |
+
return {};
|
| 670 |
+
} catch (error) {
|
| 671 |
+
if (abortSignal.aborted) {
|
| 672 |
+
throw error;
|
| 673 |
+
}
|
| 674 |
+
|
| 675 |
+
// Avoid double reporting for the empty response case handled above
|
| 676 |
+
if (
|
| 677 |
+
error instanceof Error &&
|
| 678 |
+
error.message === 'API returned an empty response for generateJson.'
|
| 679 |
+
) {
|
| 680 |
+
throw error;
|
| 681 |
+
}
|
| 682 |
+
|
| 683 |
+
await reportError(
|
| 684 |
+
error,
|
| 685 |
+
'Error generating JSON content via API.',
|
| 686 |
+
contents,
|
| 687 |
+
'generateJson-api',
|
| 688 |
+
);
|
| 689 |
+
throw new Error(
|
| 690 |
+
`Failed to generate JSON content: ${getErrorMessage(error)}`,
|
| 691 |
+
);
|
| 692 |
+
}
|
| 693 |
+
}
|
| 694 |
+
|
| 695 |
+
async generateContent(
|
| 696 |
+
contents: Content[],
|
| 697 |
+
generationConfig: GenerateContentConfig,
|
| 698 |
+
abortSignal: AbortSignal,
|
| 699 |
+
model?: string,
|
| 700 |
+
): Promise<GenerateContentResponse> {
|
| 701 |
+
const modelToUse = model ?? this.config.getModel();
|
| 702 |
+
const configToUse: GenerateContentConfig = {
|
| 703 |
+
...this.generateContentConfig,
|
| 704 |
+
...generationConfig,
|
| 705 |
+
};
|
| 706 |
+
|
| 707 |
+
try {
|
| 708 |
+
const userMemory = this.config.getUserMemory();
|
| 709 |
+
const systemInstruction = getCoreSystemPrompt(userMemory);
|
| 710 |
+
|
| 711 |
+
const requestConfig: GenerateContentConfig = {
|
| 712 |
+
abortSignal,
|
| 713 |
+
...configToUse,
|
| 714 |
+
systemInstruction,
|
| 715 |
+
};
|
| 716 |
+
|
| 717 |
+
const apiCall = () =>
|
| 718 |
+
this.getContentGenerator().generateContent(
|
| 719 |
+
{
|
| 720 |
+
model: modelToUse,
|
| 721 |
+
config: requestConfig,
|
| 722 |
+
contents,
|
| 723 |
+
},
|
| 724 |
+
this.lastPromptId,
|
| 725 |
+
);
|
| 726 |
+
|
| 727 |
+
const result = await retryWithBackoff(apiCall, {
|
| 728 |
+
onPersistent429: async (authType?: string, error?: unknown) =>
|
| 729 |
+
await this.handleFlashFallback(authType, error),
|
| 730 |
+
authType: this.config.getContentGeneratorConfig()?.authType,
|
| 731 |
+
});
|
| 732 |
+
return result;
|
| 733 |
+
} catch (error: unknown) {
|
| 734 |
+
if (abortSignal.aborted) {
|
| 735 |
+
throw error;
|
| 736 |
+
}
|
| 737 |
+
|
| 738 |
+
await reportError(
|
| 739 |
+
error,
|
| 740 |
+
`Error generating content via API with model ${modelToUse}.`,
|
| 741 |
+
{
|
| 742 |
+
requestContents: contents,
|
| 743 |
+
requestConfig: configToUse,
|
| 744 |
+
},
|
| 745 |
+
'generateContent-api',
|
| 746 |
+
);
|
| 747 |
+
throw new Error(
|
| 748 |
+
`Failed to generate content with model ${modelToUse}: ${getErrorMessage(error)}`,
|
| 749 |
+
);
|
| 750 |
+
}
|
| 751 |
+
}
|
| 752 |
+
|
| 753 |
+
async generateEmbedding(texts: string[]): Promise<number[][]> {
|
| 754 |
+
if (!texts || texts.length === 0) {
|
| 755 |
+
return [];
|
| 756 |
+
}
|
| 757 |
+
const embedModelParams: EmbedContentParameters = {
|
| 758 |
+
model: this.embeddingModel,
|
| 759 |
+
contents: texts,
|
| 760 |
+
};
|
| 761 |
+
|
| 762 |
+
const embedContentResponse =
|
| 763 |
+
await this.getContentGenerator().embedContent(embedModelParams);
|
| 764 |
+
if (
|
| 765 |
+
!embedContentResponse.embeddings ||
|
| 766 |
+
embedContentResponse.embeddings.length === 0
|
| 767 |
+
) {
|
| 768 |
+
throw new Error('No embeddings found in API response.');
|
| 769 |
+
}
|
| 770 |
+
|
| 771 |
+
if (embedContentResponse.embeddings.length !== texts.length) {
|
| 772 |
+
throw new Error(
|
| 773 |
+
`API returned a mismatched number of embeddings. Expected ${texts.length}, got ${embedContentResponse.embeddings.length}.`,
|
| 774 |
+
);
|
| 775 |
+
}
|
| 776 |
+
|
| 777 |
+
return embedContentResponse.embeddings.map((embedding, index) => {
|
| 778 |
+
const values = embedding.values;
|
| 779 |
+
if (!values || values.length === 0) {
|
| 780 |
+
throw new Error(
|
| 781 |
+
`API returned an empty embedding for input text at index ${index}: "${texts[index]}"`,
|
| 782 |
+
);
|
| 783 |
+
}
|
| 784 |
+
return values;
|
| 785 |
+
});
|
| 786 |
+
}
|
| 787 |
+
|
| 788 |
+
async tryCompressChat(
|
| 789 |
+
prompt_id: string,
|
| 790 |
+
force: boolean = false,
|
| 791 |
+
): Promise<ChatCompressionInfo | null> {
|
| 792 |
+
const curatedHistory = this.getChat().getHistory(true);
|
| 793 |
+
|
| 794 |
+
// Regardless of `force`, don't do anything if the history is empty.
|
| 795 |
+
if (curatedHistory.length === 0) {
|
| 796 |
+
return null;
|
| 797 |
+
}
|
| 798 |
+
|
| 799 |
+
const model = this.config.getModel();
|
| 800 |
+
|
| 801 |
+
const { totalTokens: originalTokenCount } =
|
| 802 |
+
await this.getContentGenerator().countTokens({
|
| 803 |
+
model,
|
| 804 |
+
contents: curatedHistory,
|
| 805 |
+
});
|
| 806 |
+
if (originalTokenCount === undefined) {
|
| 807 |
+
console.warn(`Could not determine token count for model ${model}.`);
|
| 808 |
+
return null;
|
| 809 |
+
}
|
| 810 |
+
|
| 811 |
+
const contextPercentageThreshold =
|
| 812 |
+
this.config.getChatCompression()?.contextPercentageThreshold;
|
| 813 |
+
|
| 814 |
+
// Don't compress if not forced and we are under the limit.
|
| 815 |
+
if (!force) {
|
| 816 |
+
const threshold =
|
| 817 |
+
contextPercentageThreshold ?? COMPRESSION_TOKEN_THRESHOLD;
|
| 818 |
+
if (originalTokenCount < threshold * tokenLimit(model)) {
|
| 819 |
+
return null;
|
| 820 |
+
}
|
| 821 |
+
}
|
| 822 |
+
|
| 823 |
+
let compressBeforeIndex = findIndexAfterFraction(
|
| 824 |
+
curatedHistory,
|
| 825 |
+
1 - COMPRESSION_PRESERVE_THRESHOLD,
|
| 826 |
+
);
|
| 827 |
+
// Find the first user message after the index. This is the start of the next turn.
|
| 828 |
+
while (
|
| 829 |
+
compressBeforeIndex < curatedHistory.length &&
|
| 830 |
+
(curatedHistory[compressBeforeIndex]?.role === 'model' ||
|
| 831 |
+
isFunctionResponse(curatedHistory[compressBeforeIndex]))
|
| 832 |
+
) {
|
| 833 |
+
compressBeforeIndex++;
|
| 834 |
+
}
|
| 835 |
+
|
| 836 |
+
const historyToCompress = curatedHistory.slice(0, compressBeforeIndex);
|
| 837 |
+
const historyToKeep = curatedHistory.slice(compressBeforeIndex);
|
| 838 |
+
|
| 839 |
+
this.getChat().setHistory(historyToCompress);
|
| 840 |
+
|
| 841 |
+
const { text: summary } = await this.getChat().sendMessage(
|
| 842 |
+
{
|
| 843 |
+
message: {
|
| 844 |
+
text: 'First, reason in your scratchpad. Then, generate the <state_snapshot>.',
|
| 845 |
+
},
|
| 846 |
+
config: {
|
| 847 |
+
systemInstruction: { text: getCompressionPrompt() },
|
| 848 |
+
},
|
| 849 |
+
},
|
| 850 |
+
prompt_id,
|
| 851 |
+
);
|
| 852 |
+
this.chat = await this.startChat([
|
| 853 |
+
{
|
| 854 |
+
role: 'user',
|
| 855 |
+
parts: [{ text: summary }],
|
| 856 |
+
},
|
| 857 |
+
{
|
| 858 |
+
role: 'model',
|
| 859 |
+
parts: [{ text: 'Got it. Thanks for the additional context!' }],
|
| 860 |
+
},
|
| 861 |
+
...historyToKeep,
|
| 862 |
+
]);
|
| 863 |
+
this.forceFullIdeContext = true;
|
| 864 |
+
|
| 865 |
+
const { totalTokens: newTokenCount } =
|
| 866 |
+
await this.getContentGenerator().countTokens({
|
| 867 |
+
// model might change after calling `sendMessage`, so we get the newest value from config
|
| 868 |
+
model: this.config.getModel(),
|
| 869 |
+
contents: this.getChat().getHistory(),
|
| 870 |
+
});
|
| 871 |
+
if (newTokenCount === undefined) {
|
| 872 |
+
console.warn('Could not determine compressed history token count.');
|
| 873 |
+
return null;
|
| 874 |
+
}
|
| 875 |
+
|
| 876 |
+
logChatCompression(
|
| 877 |
+
this.config,
|
| 878 |
+
makeChatCompressionEvent({
|
| 879 |
+
tokens_before: originalTokenCount,
|
| 880 |
+
tokens_after: newTokenCount,
|
| 881 |
+
}),
|
| 882 |
+
);
|
| 883 |
+
|
| 884 |
+
return {
|
| 885 |
+
originalTokenCount,
|
| 886 |
+
newTokenCount,
|
| 887 |
+
};
|
| 888 |
+
}
|
| 889 |
+
|
| 890 |
+
/**
|
| 891 |
+
* Handles falling back to Flash model when persistent 429 errors occur for OAuth users.
|
| 892 |
+
* Uses a fallback handler if provided by the config; otherwise, returns null.
|
| 893 |
+
*/
|
| 894 |
+
private async handleFlashFallback(
|
| 895 |
+
authType?: string,
|
| 896 |
+
error?: unknown,
|
| 897 |
+
): Promise<string | null> {
|
| 898 |
+
// Handle different auth types
|
| 899 |
+
if (authType === AuthType.QWEN_OAUTH) {
|
| 900 |
+
return this.handleQwenOAuthError(error);
|
| 901 |
+
}
|
| 902 |
+
|
| 903 |
+
// Only handle fallback for OAuth users
|
| 904 |
+
if (authType !== AuthType.LOGIN_WITH_GOOGLE) {
|
| 905 |
+
return null;
|
| 906 |
+
}
|
| 907 |
+
|
| 908 |
+
const currentModel = this.config.getModel();
|
| 909 |
+
const fallbackModel = DEFAULT_GEMINI_FLASH_MODEL;
|
| 910 |
+
|
| 911 |
+
// Don't fallback if already using Flash model
|
| 912 |
+
if (currentModel === fallbackModel) {
|
| 913 |
+
return null;
|
| 914 |
+
}
|
| 915 |
+
|
| 916 |
+
// Check if config has a fallback handler (set by CLI package)
|
| 917 |
+
const fallbackHandler = this.config.flashFallbackHandler;
|
| 918 |
+
if (typeof fallbackHandler === 'function') {
|
| 919 |
+
try {
|
| 920 |
+
const accepted = await fallbackHandler(
|
| 921 |
+
currentModel,
|
| 922 |
+
fallbackModel,
|
| 923 |
+
error,
|
| 924 |
+
);
|
| 925 |
+
if (accepted !== false && accepted !== null) {
|
| 926 |
+
this.config.setModel(fallbackModel);
|
| 927 |
+
this.config.setFallbackMode(true);
|
| 928 |
+
return fallbackModel;
|
| 929 |
+
}
|
| 930 |
+
// Check if the model was switched manually in the handler
|
| 931 |
+
if (this.config.getModel() === fallbackModel) {
|
| 932 |
+
return null; // Model was switched but don't continue with current prompt
|
| 933 |
+
}
|
| 934 |
+
} catch (error) {
|
| 935 |
+
console.warn('Flash fallback handler failed:', error);
|
| 936 |
+
}
|
| 937 |
+
}
|
| 938 |
+
|
| 939 |
+
return null;
|
| 940 |
+
}
|
| 941 |
+
|
| 942 |
+
/**
|
| 943 |
+
* Handles Qwen OAuth authentication errors and rate limiting
|
| 944 |
+
*/
|
| 945 |
+
private async handleQwenOAuthError(error?: unknown): Promise<string | null> {
|
| 946 |
+
if (!error) {
|
| 947 |
+
return null;
|
| 948 |
+
}
|
| 949 |
+
|
| 950 |
+
const errorMessage =
|
| 951 |
+
error instanceof Error
|
| 952 |
+
? error.message.toLowerCase()
|
| 953 |
+
: String(error).toLowerCase();
|
| 954 |
+
const errorCode =
|
| 955 |
+
(error as { status?: number; code?: number })?.status ||
|
| 956 |
+
(error as { status?: number; code?: number })?.code;
|
| 957 |
+
|
| 958 |
+
// Check if this is an authentication/authorization error
|
| 959 |
+
const isAuthError =
|
| 960 |
+
errorCode === 401 ||
|
| 961 |
+
errorCode === 403 ||
|
| 962 |
+
errorMessage.includes('unauthorized') ||
|
| 963 |
+
errorMessage.includes('forbidden') ||
|
| 964 |
+
errorMessage.includes('invalid api key') ||
|
| 965 |
+
errorMessage.includes('authentication') ||
|
| 966 |
+
errorMessage.includes('access denied') ||
|
| 967 |
+
(errorMessage.includes('token') && errorMessage.includes('expired'));
|
| 968 |
+
|
| 969 |
+
// Check if this is a rate limiting error
|
| 970 |
+
const isRateLimitError =
|
| 971 |
+
errorCode === 429 ||
|
| 972 |
+
errorMessage.includes('429') ||
|
| 973 |
+
errorMessage.includes('rate limit') ||
|
| 974 |
+
errorMessage.includes('too many requests');
|
| 975 |
+
|
| 976 |
+
if (isAuthError) {
|
| 977 |
+
console.warn('Qwen OAuth authentication error detected:', errorMessage);
|
| 978 |
+
// The QwenContentGenerator should automatically handle token refresh
|
| 979 |
+
// If it still fails, it likely means the refresh token is also expired
|
| 980 |
+
console.log(
|
| 981 |
+
'Note: If this persists, you may need to re-authenticate with Qwen OAuth',
|
| 982 |
+
);
|
| 983 |
+
return null;
|
| 984 |
+
}
|
| 985 |
+
|
| 986 |
+
if (isRateLimitError) {
|
| 987 |
+
console.warn('Qwen API rate limit encountered:', errorMessage);
|
| 988 |
+
// For rate limiting, we don't need to do anything special
|
| 989 |
+
// The retry mechanism will handle the backoff
|
| 990 |
+
return null;
|
| 991 |
+
}
|
| 992 |
+
|
| 993 |
+
// For other errors, don't handle them specially
|
| 994 |
+
return null;
|
| 995 |
+
}
|
| 996 |
+
}
|
| 997 |
+
|
| 998 |
+
export const TEST_ONLY = {
|
| 999 |
+
COMPRESSION_PRESERVE_THRESHOLD,
|
| 1000 |
+
COMPRESSION_TOKEN_THRESHOLD,
|
| 1001 |
+
};
|
projects/ui/qwen-code/packages/core/src/core/contentGenerator.test.ts
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
| 8 |
+
import {
|
| 9 |
+
createContentGenerator,
|
| 10 |
+
AuthType,
|
| 11 |
+
createContentGeneratorConfig,
|
| 12 |
+
ContentGenerator,
|
| 13 |
+
} from './contentGenerator.js';
|
| 14 |
+
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
| 15 |
+
import { GoogleGenAI } from '@google/genai';
|
| 16 |
+
import { Config } from '../config/config.js';
|
| 17 |
+
import { LoggingContentGenerator } from './loggingContentGenerator.js';
|
| 18 |
+
|
| 19 |
+
vi.mock('../code_assist/codeAssist.js');
|
| 20 |
+
vi.mock('@google/genai');
|
| 21 |
+
|
| 22 |
+
const mockConfig = {
|
| 23 |
+
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
| 24 |
+
} as unknown as Config;
|
| 25 |
+
|
| 26 |
+
describe('createContentGenerator', () => {
|
| 27 |
+
it('should create a CodeAssistContentGenerator', async () => {
|
| 28 |
+
const mockGenerator = {} as unknown as ContentGenerator;
|
| 29 |
+
vi.mocked(createCodeAssistContentGenerator).mockResolvedValue(
|
| 30 |
+
mockGenerator as never,
|
| 31 |
+
);
|
| 32 |
+
const generator = await createContentGenerator(
|
| 33 |
+
{
|
| 34 |
+
model: 'test-model',
|
| 35 |
+
authType: AuthType.LOGIN_WITH_GOOGLE,
|
| 36 |
+
},
|
| 37 |
+
mockConfig,
|
| 38 |
+
);
|
| 39 |
+
expect(createCodeAssistContentGenerator).toHaveBeenCalled();
|
| 40 |
+
expect(generator).toEqual(
|
| 41 |
+
new LoggingContentGenerator(mockGenerator, mockConfig),
|
| 42 |
+
);
|
| 43 |
+
});
|
| 44 |
+
|
| 45 |
+
it('should create a GoogleGenAI content generator', async () => {
|
| 46 |
+
const mockConfig = {
|
| 47 |
+
getUsageStatisticsEnabled: () => true,
|
| 48 |
+
} as unknown as Config;
|
| 49 |
+
|
| 50 |
+
const mockGenerator = {
|
| 51 |
+
models: {},
|
| 52 |
+
} as unknown as GoogleGenAI;
|
| 53 |
+
vi.mocked(GoogleGenAI).mockImplementation(() => mockGenerator as never);
|
| 54 |
+
const generator = await createContentGenerator(
|
| 55 |
+
{
|
| 56 |
+
model: 'test-model',
|
| 57 |
+
apiKey: 'test-api-key',
|
| 58 |
+
authType: AuthType.USE_GEMINI,
|
| 59 |
+
},
|
| 60 |
+
mockConfig,
|
| 61 |
+
);
|
| 62 |
+
expect(GoogleGenAI).toHaveBeenCalledWith({
|
| 63 |
+
apiKey: 'test-api-key',
|
| 64 |
+
vertexai: undefined,
|
| 65 |
+
httpOptions: {
|
| 66 |
+
headers: {
|
| 67 |
+
'User-Agent': expect.any(String),
|
| 68 |
+
'x-gemini-api-privileged-user-id': expect.any(String),
|
| 69 |
+
},
|
| 70 |
+
},
|
| 71 |
+
});
|
| 72 |
+
expect(generator).toEqual(
|
| 73 |
+
new LoggingContentGenerator(
|
| 74 |
+
(mockGenerator as GoogleGenAI).models,
|
| 75 |
+
mockConfig,
|
| 76 |
+
),
|
| 77 |
+
);
|
| 78 |
+
});
|
| 79 |
+
|
| 80 |
+
it('should create a GoogleGenAI content generator with client install id logging disabled', async () => {
|
| 81 |
+
const mockConfig = {
|
| 82 |
+
getUsageStatisticsEnabled: () => false,
|
| 83 |
+
} as unknown as Config;
|
| 84 |
+
const mockGenerator = {
|
| 85 |
+
models: {},
|
| 86 |
+
} as unknown as GoogleGenAI;
|
| 87 |
+
vi.mocked(GoogleGenAI).mockImplementation(() => mockGenerator as never);
|
| 88 |
+
const generator = await createContentGenerator(
|
| 89 |
+
{
|
| 90 |
+
model: 'test-model',
|
| 91 |
+
apiKey: 'test-api-key',
|
| 92 |
+
authType: AuthType.USE_GEMINI,
|
| 93 |
+
},
|
| 94 |
+
mockConfig,
|
| 95 |
+
);
|
| 96 |
+
expect(GoogleGenAI).toHaveBeenCalledWith({
|
| 97 |
+
apiKey: 'test-api-key',
|
| 98 |
+
vertexai: undefined,
|
| 99 |
+
httpOptions: {
|
| 100 |
+
headers: {
|
| 101 |
+
'User-Agent': expect.any(String),
|
| 102 |
+
},
|
| 103 |
+
},
|
| 104 |
+
});
|
| 105 |
+
expect(generator).toEqual(
|
| 106 |
+
new LoggingContentGenerator(
|
| 107 |
+
(mockGenerator as GoogleGenAI).models,
|
| 108 |
+
mockConfig,
|
| 109 |
+
),
|
| 110 |
+
);
|
| 111 |
+
});
|
| 112 |
+
});
|
| 113 |
+
|
| 114 |
+
describe('createContentGeneratorConfig', () => {
|
| 115 |
+
const mockConfig = {
|
| 116 |
+
getModel: vi.fn().mockReturnValue('gemini-pro'),
|
| 117 |
+
setModel: vi.fn(),
|
| 118 |
+
flashFallbackHandler: vi.fn(),
|
| 119 |
+
getProxy: vi.fn(),
|
| 120 |
+
getEnableOpenAILogging: vi.fn().mockReturnValue(false),
|
| 121 |
+
getSamplingParams: vi.fn().mockReturnValue(undefined),
|
| 122 |
+
getContentGeneratorTimeout: vi.fn().mockReturnValue(undefined),
|
| 123 |
+
getContentGeneratorMaxRetries: vi.fn().mockReturnValue(undefined),
|
| 124 |
+
getContentGeneratorSamplingParams: vi.fn().mockReturnValue(undefined),
|
| 125 |
+
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
| 126 |
+
} as unknown as Config;
|
| 127 |
+
|
| 128 |
+
beforeEach(() => {
|
| 129 |
+
// Reset modules to re-evaluate imports and environment variables
|
| 130 |
+
vi.resetModules();
|
| 131 |
+
vi.clearAllMocks();
|
| 132 |
+
});
|
| 133 |
+
|
| 134 |
+
afterEach(() => {
|
| 135 |
+
vi.unstubAllEnvs();
|
| 136 |
+
});
|
| 137 |
+
|
| 138 |
+
it('should configure for Gemini using GEMINI_API_KEY when set', async () => {
|
| 139 |
+
vi.stubEnv('GEMINI_API_KEY', 'env-gemini-key');
|
| 140 |
+
const config = await createContentGeneratorConfig(
|
| 141 |
+
mockConfig,
|
| 142 |
+
AuthType.USE_GEMINI,
|
| 143 |
+
);
|
| 144 |
+
expect(config.apiKey).toBe('env-gemini-key');
|
| 145 |
+
expect(config.vertexai).toBe(false);
|
| 146 |
+
});
|
| 147 |
+
|
| 148 |
+
it('should not configure for Gemini if GEMINI_API_KEY is empty', async () => {
|
| 149 |
+
vi.stubEnv('GEMINI_API_KEY', '');
|
| 150 |
+
const config = await createContentGeneratorConfig(
|
| 151 |
+
mockConfig,
|
| 152 |
+
AuthType.USE_GEMINI,
|
| 153 |
+
);
|
| 154 |
+
expect(config.apiKey).toBeUndefined();
|
| 155 |
+
expect(config.vertexai).toBeUndefined();
|
| 156 |
+
});
|
| 157 |
+
|
| 158 |
+
it('should configure for Vertex AI using GOOGLE_API_KEY when set', async () => {
|
| 159 |
+
vi.stubEnv('GOOGLE_API_KEY', 'env-google-key');
|
| 160 |
+
const config = await createContentGeneratorConfig(
|
| 161 |
+
mockConfig,
|
| 162 |
+
AuthType.USE_VERTEX_AI,
|
| 163 |
+
);
|
| 164 |
+
expect(config.apiKey).toBe('env-google-key');
|
| 165 |
+
expect(config.vertexai).toBe(true);
|
| 166 |
+
});
|
| 167 |
+
|
| 168 |
+
it('should configure for Vertex AI using GCP project and location when set', async () => {
|
| 169 |
+
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'env-gcp-project');
|
| 170 |
+
vi.stubEnv('GOOGLE_CLOUD_LOCATION', 'env-gcp-location');
|
| 171 |
+
const config = await createContentGeneratorConfig(
|
| 172 |
+
mockConfig,
|
| 173 |
+
AuthType.USE_VERTEX_AI,
|
| 174 |
+
);
|
| 175 |
+
expect(config.vertexai).toBe(true);
|
| 176 |
+
expect(config.apiKey).toBeUndefined();
|
| 177 |
+
});
|
| 178 |
+
|
| 179 |
+
it('should not configure for Vertex AI if required env vars are empty', async () => {
|
| 180 |
+
vi.stubEnv('GOOGLE_API_KEY', '');
|
| 181 |
+
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
|
| 182 |
+
vi.stubEnv('GOOGLE_CLOUD_LOCATION', '');
|
| 183 |
+
const config = await createContentGeneratorConfig(
|
| 184 |
+
mockConfig,
|
| 185 |
+
AuthType.USE_VERTEX_AI,
|
| 186 |
+
);
|
| 187 |
+
expect(config.apiKey).toBeUndefined();
|
| 188 |
+
expect(config.vertexai).toBeUndefined();
|
| 189 |
+
});
|
| 190 |
+
});
|
projects/ui/qwen-code/packages/core/src/core/contentGenerator.ts
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import {
|
| 8 |
+
CountTokensResponse,
|
| 9 |
+
GenerateContentResponse,
|
| 10 |
+
GenerateContentParameters,
|
| 11 |
+
CountTokensParameters,
|
| 12 |
+
EmbedContentResponse,
|
| 13 |
+
EmbedContentParameters,
|
| 14 |
+
GoogleGenAI,
|
| 15 |
+
} from '@google/genai';
|
| 16 |
+
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
| 17 |
+
import { DEFAULT_GEMINI_MODEL, DEFAULT_QWEN_MODEL } from '../config/models.js';
|
| 18 |
+
import { Config } from '../config/config.js';
|
| 19 |
+
|
| 20 |
+
import { UserTierId } from '../code_assist/types.js';
|
| 21 |
+
import { LoggingContentGenerator } from './loggingContentGenerator.js';
|
| 22 |
+
import { getInstallationId } from '../utils/user_id.js';
|
| 23 |
+
|
| 24 |
+
/**
|
| 25 |
+
* Interface abstracting the core functionalities for generating content and counting tokens.
|
| 26 |
+
*/
|
| 27 |
+
export interface ContentGenerator {
|
| 28 |
+
generateContent(
|
| 29 |
+
request: GenerateContentParameters,
|
| 30 |
+
userPromptId: string,
|
| 31 |
+
): Promise<GenerateContentResponse>;
|
| 32 |
+
|
| 33 |
+
generateContentStream(
|
| 34 |
+
request: GenerateContentParameters,
|
| 35 |
+
userPromptId: string,
|
| 36 |
+
): Promise<AsyncGenerator<GenerateContentResponse>>;
|
| 37 |
+
|
| 38 |
+
countTokens(request: CountTokensParameters): Promise<CountTokensResponse>;
|
| 39 |
+
|
| 40 |
+
embedContent(request: EmbedContentParameters): Promise<EmbedContentResponse>;
|
| 41 |
+
|
| 42 |
+
userTier?: UserTierId;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
export enum AuthType {
|
| 46 |
+
LOGIN_WITH_GOOGLE = 'oauth-personal',
|
| 47 |
+
USE_GEMINI = 'gemini-api-key',
|
| 48 |
+
USE_VERTEX_AI = 'vertex-ai',
|
| 49 |
+
CLOUD_SHELL = 'cloud-shell',
|
| 50 |
+
USE_OPENAI = 'openai',
|
| 51 |
+
QWEN_OAUTH = 'qwen-oauth',
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
export type ContentGeneratorConfig = {
|
| 55 |
+
model: string;
|
| 56 |
+
apiKey?: string;
|
| 57 |
+
baseUrl?: string;
|
| 58 |
+
vertexai?: boolean;
|
| 59 |
+
authType?: AuthType | undefined;
|
| 60 |
+
enableOpenAILogging?: boolean;
|
| 61 |
+
// Timeout configuration in milliseconds
|
| 62 |
+
timeout?: number;
|
| 63 |
+
// Maximum retries for failed requests
|
| 64 |
+
maxRetries?: number;
|
| 65 |
+
samplingParams?: {
|
| 66 |
+
top_p?: number;
|
| 67 |
+
top_k?: number;
|
| 68 |
+
repetition_penalty?: number;
|
| 69 |
+
presence_penalty?: number;
|
| 70 |
+
frequency_penalty?: number;
|
| 71 |
+
temperature?: number;
|
| 72 |
+
max_tokens?: number;
|
| 73 |
+
};
|
| 74 |
+
proxy?: string | undefined;
|
| 75 |
+
userAgent?: string;
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
export function createContentGeneratorConfig(
|
| 79 |
+
config: Config,
|
| 80 |
+
authType: AuthType | undefined,
|
| 81 |
+
): ContentGeneratorConfig {
|
| 82 |
+
const geminiApiKey = process.env['GEMINI_API_KEY'] || undefined;
|
| 83 |
+
const googleApiKey = process.env['GOOGLE_API_KEY'] || undefined;
|
| 84 |
+
const googleCloudProject = process.env['GOOGLE_CLOUD_PROJECT'] || undefined;
|
| 85 |
+
const googleCloudLocation = process.env['GOOGLE_CLOUD_LOCATION'] || undefined;
|
| 86 |
+
|
| 87 |
+
// openai auth
|
| 88 |
+
const openaiApiKey = process.env['OPENAI_API_KEY'] || undefined;
|
| 89 |
+
const openaiBaseUrl = process.env['OPENAI_BASE_URL'] || undefined;
|
| 90 |
+
const openaiModel = process.env['OPENAI_MODEL'] || undefined;
|
| 91 |
+
|
| 92 |
+
// Use runtime model from config if available; otherwise, fall back to parameter or default
|
| 93 |
+
const effectiveModel = config.getModel() || DEFAULT_GEMINI_MODEL;
|
| 94 |
+
|
| 95 |
+
const contentGeneratorConfig: ContentGeneratorConfig = {
|
| 96 |
+
model: effectiveModel,
|
| 97 |
+
authType,
|
| 98 |
+
proxy: config?.getProxy(),
|
| 99 |
+
enableOpenAILogging: config.getEnableOpenAILogging(),
|
| 100 |
+
timeout: config.getContentGeneratorTimeout(),
|
| 101 |
+
maxRetries: config.getContentGeneratorMaxRetries(),
|
| 102 |
+
samplingParams: config.getContentGeneratorSamplingParams(),
|
| 103 |
+
};
|
| 104 |
+
|
| 105 |
+
// If we are using Google auth or we are in Cloud Shell, there is nothing else to validate for now
|
| 106 |
+
if (
|
| 107 |
+
authType === AuthType.LOGIN_WITH_GOOGLE ||
|
| 108 |
+
authType === AuthType.CLOUD_SHELL
|
| 109 |
+
) {
|
| 110 |
+
return contentGeneratorConfig;
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
if (authType === AuthType.USE_GEMINI && geminiApiKey) {
|
| 114 |
+
contentGeneratorConfig.apiKey = geminiApiKey;
|
| 115 |
+
contentGeneratorConfig.vertexai = false;
|
| 116 |
+
|
| 117 |
+
return contentGeneratorConfig;
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
if (
|
| 121 |
+
authType === AuthType.USE_VERTEX_AI &&
|
| 122 |
+
(googleApiKey || (googleCloudProject && googleCloudLocation))
|
| 123 |
+
) {
|
| 124 |
+
contentGeneratorConfig.apiKey = googleApiKey;
|
| 125 |
+
contentGeneratorConfig.vertexai = true;
|
| 126 |
+
|
| 127 |
+
return contentGeneratorConfig;
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
if (authType === AuthType.USE_OPENAI && openaiApiKey) {
|
| 131 |
+
contentGeneratorConfig.apiKey = openaiApiKey;
|
| 132 |
+
contentGeneratorConfig.baseUrl = openaiBaseUrl;
|
| 133 |
+
contentGeneratorConfig.model = openaiModel || DEFAULT_QWEN_MODEL;
|
| 134 |
+
|
| 135 |
+
return contentGeneratorConfig;
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
if (authType === AuthType.QWEN_OAUTH) {
|
| 139 |
+
// For Qwen OAuth, we'll handle the API key dynamically in createContentGenerator
|
| 140 |
+
// Set a special marker to indicate this is Qwen OAuth
|
| 141 |
+
contentGeneratorConfig.apiKey = 'QWEN_OAUTH_DYNAMIC_TOKEN';
|
| 142 |
+
|
| 143 |
+
// Prefer to use qwen3-coder-plus as the default Qwen model if QWEN_MODEL is not set.
|
| 144 |
+
contentGeneratorConfig.model =
|
| 145 |
+
process.env['QWEN_MODEL'] || DEFAULT_QWEN_MODEL;
|
| 146 |
+
|
| 147 |
+
return contentGeneratorConfig;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
return contentGeneratorConfig;
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
export async function createContentGenerator(
|
| 154 |
+
config: ContentGeneratorConfig,
|
| 155 |
+
gcConfig: Config,
|
| 156 |
+
sessionId?: string,
|
| 157 |
+
): Promise<ContentGenerator> {
|
| 158 |
+
const version = process.env['CLI_VERSION'] || process.version;
|
| 159 |
+
const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
|
| 160 |
+
const baseHeaders: Record<string, string> = {
|
| 161 |
+
'User-Agent': userAgent,
|
| 162 |
+
};
|
| 163 |
+
|
| 164 |
+
if (
|
| 165 |
+
config.authType === AuthType.LOGIN_WITH_GOOGLE ||
|
| 166 |
+
config.authType === AuthType.CLOUD_SHELL
|
| 167 |
+
) {
|
| 168 |
+
const httpOptions = { headers: baseHeaders };
|
| 169 |
+
return new LoggingContentGenerator(
|
| 170 |
+
await createCodeAssistContentGenerator(
|
| 171 |
+
httpOptions,
|
| 172 |
+
config.authType,
|
| 173 |
+
gcConfig,
|
| 174 |
+
sessionId,
|
| 175 |
+
),
|
| 176 |
+
gcConfig,
|
| 177 |
+
);
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
if (
|
| 181 |
+
config.authType === AuthType.USE_GEMINI ||
|
| 182 |
+
config.authType === AuthType.USE_VERTEX_AI
|
| 183 |
+
) {
|
| 184 |
+
let headers: Record<string, string> = { ...baseHeaders };
|
| 185 |
+
if (gcConfig?.getUsageStatisticsEnabled()) {
|
| 186 |
+
const installationId = getInstallationId();
|
| 187 |
+
headers = {
|
| 188 |
+
...headers,
|
| 189 |
+
'x-gemini-api-privileged-user-id': `${installationId}`,
|
| 190 |
+
};
|
| 191 |
+
}
|
| 192 |
+
const httpOptions = { headers };
|
| 193 |
+
|
| 194 |
+
const googleGenAI = new GoogleGenAI({
|
| 195 |
+
apiKey: config.apiKey === '' ? undefined : config.apiKey,
|
| 196 |
+
vertexai: config.vertexai,
|
| 197 |
+
httpOptions,
|
| 198 |
+
});
|
| 199 |
+
return new LoggingContentGenerator(googleGenAI.models, gcConfig);
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
if (config.authType === AuthType.USE_OPENAI) {
|
| 203 |
+
if (!config.apiKey) {
|
| 204 |
+
throw new Error('OpenAI API key is required');
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
// Import OpenAIContentGenerator dynamically to avoid circular dependencies
|
| 208 |
+
const { OpenAIContentGenerator } = await import(
|
| 209 |
+
'./openaiContentGenerator.js'
|
| 210 |
+
);
|
| 211 |
+
|
| 212 |
+
// Always use OpenAIContentGenerator, logging is controlled by enableOpenAILogging flag
|
| 213 |
+
return new OpenAIContentGenerator(config, gcConfig);
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
if (config.authType === AuthType.QWEN_OAUTH) {
|
| 217 |
+
if (config.apiKey !== 'QWEN_OAUTH_DYNAMIC_TOKEN') {
|
| 218 |
+
throw new Error('Invalid Qwen OAuth configuration');
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
// Import required classes dynamically
|
| 222 |
+
const { getQwenOAuthClient: getQwenOauthClient } = await import(
|
| 223 |
+
'../qwen/qwenOAuth2.js'
|
| 224 |
+
);
|
| 225 |
+
const { QwenContentGenerator } = await import(
|
| 226 |
+
'../qwen/qwenContentGenerator.js'
|
| 227 |
+
);
|
| 228 |
+
|
| 229 |
+
try {
|
| 230 |
+
// Get the Qwen OAuth client (now includes integrated token management)
|
| 231 |
+
const qwenClient = await getQwenOauthClient(gcConfig);
|
| 232 |
+
|
| 233 |
+
// Create the content generator with dynamic token management
|
| 234 |
+
return new QwenContentGenerator(qwenClient, config, gcConfig);
|
| 235 |
+
} catch (error) {
|
| 236 |
+
throw new Error(
|
| 237 |
+
`Failed to initialize Qwen: ${error instanceof Error ? error.message : String(error)}`,
|
| 238 |
+
);
|
| 239 |
+
}
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
throw new Error(
|
| 243 |
+
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
|
| 244 |
+
);
|
| 245 |
+
}
|
projects/ui/qwen-code/packages/core/src/core/coreToolScheduler.test.ts
ADDED
|
@@ -0,0 +1,961 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import { describe, it, expect, vi } from 'vitest';
|
| 8 |
+
import {
|
| 9 |
+
CoreToolScheduler,
|
| 10 |
+
ToolCall,
|
| 11 |
+
WaitingToolCall,
|
| 12 |
+
convertToFunctionResponse,
|
| 13 |
+
} from './coreToolScheduler.js';
|
| 14 |
+
import {
|
| 15 |
+
BaseDeclarativeTool,
|
| 16 |
+
BaseToolInvocation,
|
| 17 |
+
ToolCallConfirmationDetails,
|
| 18 |
+
ToolConfirmationOutcome,
|
| 19 |
+
ToolConfirmationPayload,
|
| 20 |
+
ToolInvocation,
|
| 21 |
+
ToolResult,
|
| 22 |
+
Config,
|
| 23 |
+
Kind,
|
| 24 |
+
ApprovalMode,
|
| 25 |
+
ToolRegistry,
|
| 26 |
+
} from '../index.js';
|
| 27 |
+
import { Part, PartListUnion } from '@google/genai';
|
| 28 |
+
import { MockModifiableTool, MockTool } from '../test-utils/tools.js';
|
| 29 |
+
|
| 30 |
+
class TestApprovalTool extends BaseDeclarativeTool<{ id: string }, ToolResult> {
|
| 31 |
+
static readonly Name = 'testApprovalTool';
|
| 32 |
+
|
| 33 |
+
constructor(private config: Config) {
|
| 34 |
+
super(
|
| 35 |
+
TestApprovalTool.Name,
|
| 36 |
+
'TestApprovalTool',
|
| 37 |
+
'A tool for testing approval logic',
|
| 38 |
+
Kind.Edit,
|
| 39 |
+
{
|
| 40 |
+
properties: { id: { type: 'string' } },
|
| 41 |
+
required: ['id'],
|
| 42 |
+
type: 'object',
|
| 43 |
+
},
|
| 44 |
+
);
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
protected createInvocation(params: {
|
| 48 |
+
id: string;
|
| 49 |
+
}): ToolInvocation<{ id: string }, ToolResult> {
|
| 50 |
+
return new TestApprovalInvocation(this.config, params);
|
| 51 |
+
}
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
class TestApprovalInvocation extends BaseToolInvocation<
|
| 55 |
+
{ id: string },
|
| 56 |
+
ToolResult
|
| 57 |
+
> {
|
| 58 |
+
constructor(
|
| 59 |
+
private config: Config,
|
| 60 |
+
params: { id: string },
|
| 61 |
+
) {
|
| 62 |
+
super(params);
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
getDescription(): string {
|
| 66 |
+
return `Test tool ${this.params.id}`;
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
override async shouldConfirmExecute(): Promise<
|
| 70 |
+
ToolCallConfirmationDetails | false
|
| 71 |
+
> {
|
| 72 |
+
// Need confirmation unless approval mode is AUTO_EDIT
|
| 73 |
+
if (this.config.getApprovalMode() === ApprovalMode.AUTO_EDIT) {
|
| 74 |
+
return false;
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
return {
|
| 78 |
+
type: 'edit',
|
| 79 |
+
title: `Confirm Test Tool ${this.params.id}`,
|
| 80 |
+
fileName: `test-${this.params.id}.txt`,
|
| 81 |
+
filePath: `/test-${this.params.id}.txt`,
|
| 82 |
+
fileDiff: 'Test diff content',
|
| 83 |
+
originalContent: '',
|
| 84 |
+
newContent: 'Test content',
|
| 85 |
+
onConfirm: async (outcome: ToolConfirmationOutcome) => {
|
| 86 |
+
if (outcome === ToolConfirmationOutcome.ProceedAlways) {
|
| 87 |
+
this.config.setApprovalMode(ApprovalMode.AUTO_EDIT);
|
| 88 |
+
}
|
| 89 |
+
},
|
| 90 |
+
};
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
async execute(): Promise<ToolResult> {
|
| 94 |
+
return {
|
| 95 |
+
llmContent: `Executed test tool ${this.params.id}`,
|
| 96 |
+
returnDisplay: `Executed test tool ${this.params.id}`,
|
| 97 |
+
};
|
| 98 |
+
}
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
describe('CoreToolScheduler', () => {
|
| 102 |
+
it('should cancel a tool call if the signal is aborted before confirmation', async () => {
|
| 103 |
+
const mockTool = new MockTool();
|
| 104 |
+
mockTool.shouldConfirm = true;
|
| 105 |
+
const declarativeTool = mockTool;
|
| 106 |
+
const mockToolRegistry = {
|
| 107 |
+
getTool: () => declarativeTool,
|
| 108 |
+
getFunctionDeclarations: () => [],
|
| 109 |
+
tools: new Map(),
|
| 110 |
+
discovery: {},
|
| 111 |
+
registerTool: () => {},
|
| 112 |
+
getToolByName: () => declarativeTool,
|
| 113 |
+
getToolByDisplayName: () => declarativeTool,
|
| 114 |
+
getTools: () => [],
|
| 115 |
+
discoverTools: async () => {},
|
| 116 |
+
getAllTools: () => [],
|
| 117 |
+
getToolsByServer: () => [],
|
| 118 |
+
} as unknown as ToolRegistry;
|
| 119 |
+
|
| 120 |
+
const onAllToolCallsComplete = vi.fn();
|
| 121 |
+
const onToolCallsUpdate = vi.fn();
|
| 122 |
+
|
| 123 |
+
const mockConfig = {
|
| 124 |
+
getSessionId: () => 'test-session-id',
|
| 125 |
+
getUsageStatisticsEnabled: () => true,
|
| 126 |
+
getDebugMode: () => false,
|
| 127 |
+
getApprovalMode: () => ApprovalMode.DEFAULT,
|
| 128 |
+
getContentGeneratorConfig: () => ({
|
| 129 |
+
model: 'test-model',
|
| 130 |
+
authType: 'oauth-personal',
|
| 131 |
+
}),
|
| 132 |
+
} as unknown as Config;
|
| 133 |
+
|
| 134 |
+
const scheduler = new CoreToolScheduler({
|
| 135 |
+
config: mockConfig,
|
| 136 |
+
toolRegistry: mockToolRegistry,
|
| 137 |
+
onAllToolCallsComplete,
|
| 138 |
+
onToolCallsUpdate,
|
| 139 |
+
getPreferredEditor: () => 'vscode',
|
| 140 |
+
onEditorClose: vi.fn(),
|
| 141 |
+
});
|
| 142 |
+
|
| 143 |
+
const abortController = new AbortController();
|
| 144 |
+
const request = {
|
| 145 |
+
callId: '1',
|
| 146 |
+
name: 'mockTool',
|
| 147 |
+
args: {},
|
| 148 |
+
isClientInitiated: false,
|
| 149 |
+
prompt_id: 'prompt-id-1',
|
| 150 |
+
};
|
| 151 |
+
|
| 152 |
+
abortController.abort();
|
| 153 |
+
await scheduler.schedule([request], abortController.signal);
|
| 154 |
+
|
| 155 |
+
expect(onAllToolCallsComplete).toHaveBeenCalled();
|
| 156 |
+
const completedCalls = onAllToolCallsComplete.mock
|
| 157 |
+
.calls[0][0] as ToolCall[];
|
| 158 |
+
expect(completedCalls[0].status).toBe('cancelled');
|
| 159 |
+
});
|
| 160 |
+
});
|
| 161 |
+
|
| 162 |
+
describe('CoreToolScheduler with payload', () => {
|
| 163 |
+
it('should update args and diff and execute tool when payload is provided', async () => {
|
| 164 |
+
const mockTool = new MockModifiableTool();
|
| 165 |
+
const declarativeTool = mockTool;
|
| 166 |
+
const mockToolRegistry = {
|
| 167 |
+
getTool: () => declarativeTool,
|
| 168 |
+
getFunctionDeclarations: () => [],
|
| 169 |
+
tools: new Map(),
|
| 170 |
+
discovery: {},
|
| 171 |
+
registerTool: () => {},
|
| 172 |
+
getToolByName: () => declarativeTool,
|
| 173 |
+
getToolByDisplayName: () => declarativeTool,
|
| 174 |
+
getTools: () => [],
|
| 175 |
+
discoverTools: async () => {},
|
| 176 |
+
getAllTools: () => [],
|
| 177 |
+
getToolsByServer: () => [],
|
| 178 |
+
} as unknown as ToolRegistry;
|
| 179 |
+
|
| 180 |
+
const onAllToolCallsComplete = vi.fn();
|
| 181 |
+
const onToolCallsUpdate = vi.fn();
|
| 182 |
+
|
| 183 |
+
const mockConfig = {
|
| 184 |
+
getSessionId: () => 'test-session-id',
|
| 185 |
+
getUsageStatisticsEnabled: () => true,
|
| 186 |
+
getDebugMode: () => false,
|
| 187 |
+
getApprovalMode: () => ApprovalMode.DEFAULT,
|
| 188 |
+
getContentGeneratorConfig: () => ({
|
| 189 |
+
model: 'test-model',
|
| 190 |
+
authType: 'oauth-personal',
|
| 191 |
+
}),
|
| 192 |
+
} as unknown as Config;
|
| 193 |
+
|
| 194 |
+
const scheduler = new CoreToolScheduler({
|
| 195 |
+
config: mockConfig,
|
| 196 |
+
toolRegistry: mockToolRegistry,
|
| 197 |
+
onAllToolCallsComplete,
|
| 198 |
+
onToolCallsUpdate,
|
| 199 |
+
getPreferredEditor: () => 'vscode',
|
| 200 |
+
onEditorClose: vi.fn(),
|
| 201 |
+
});
|
| 202 |
+
|
| 203 |
+
const abortController = new AbortController();
|
| 204 |
+
const request = {
|
| 205 |
+
callId: '1',
|
| 206 |
+
name: 'mockModifiableTool',
|
| 207 |
+
args: {},
|
| 208 |
+
isClientInitiated: false,
|
| 209 |
+
prompt_id: 'prompt-id-2',
|
| 210 |
+
};
|
| 211 |
+
|
| 212 |
+
await scheduler.schedule([request], abortController.signal);
|
| 213 |
+
|
| 214 |
+
await vi.waitFor(() => {
|
| 215 |
+
const awaitingCall = onToolCallsUpdate.mock.calls.find(
|
| 216 |
+
(call) => call[0][0].status === 'awaiting_approval',
|
| 217 |
+
)?.[0][0];
|
| 218 |
+
expect(awaitingCall).toBeDefined();
|
| 219 |
+
});
|
| 220 |
+
|
| 221 |
+
const awaitingCall = onToolCallsUpdate.mock.calls.find(
|
| 222 |
+
(call) => call[0][0].status === 'awaiting_approval',
|
| 223 |
+
)?.[0][0];
|
| 224 |
+
const confirmationDetails = awaitingCall.confirmationDetails;
|
| 225 |
+
|
| 226 |
+
if (confirmationDetails) {
|
| 227 |
+
const payload: ToolConfirmationPayload = { newContent: 'final version' };
|
| 228 |
+
await confirmationDetails.onConfirm(
|
| 229 |
+
ToolConfirmationOutcome.ProceedOnce,
|
| 230 |
+
payload,
|
| 231 |
+
);
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
expect(onAllToolCallsComplete).toHaveBeenCalled();
|
| 235 |
+
const completedCalls = onAllToolCallsComplete.mock
|
| 236 |
+
.calls[0][0] as ToolCall[];
|
| 237 |
+
expect(completedCalls[0].status).toBe('success');
|
| 238 |
+
expect(mockTool.executeFn).toHaveBeenCalledWith({
|
| 239 |
+
newContent: 'final version',
|
| 240 |
+
});
|
| 241 |
+
});
|
| 242 |
+
});
|
| 243 |
+
|
| 244 |
+
describe('convertToFunctionResponse', () => {
|
| 245 |
+
const toolName = 'testTool';
|
| 246 |
+
const callId = 'call1';
|
| 247 |
+
|
| 248 |
+
it('should handle simple string llmContent', () => {
|
| 249 |
+
const llmContent = 'Simple text output';
|
| 250 |
+
const result = convertToFunctionResponse(toolName, callId, llmContent);
|
| 251 |
+
expect(result).toEqual({
|
| 252 |
+
functionResponse: {
|
| 253 |
+
name: toolName,
|
| 254 |
+
id: callId,
|
| 255 |
+
response: { output: 'Simple text output' },
|
| 256 |
+
},
|
| 257 |
+
});
|
| 258 |
+
});
|
| 259 |
+
|
| 260 |
+
it('should handle llmContent as a single Part with text', () => {
|
| 261 |
+
const llmContent: Part = { text: 'Text from Part object' };
|
| 262 |
+
const result = convertToFunctionResponse(toolName, callId, llmContent);
|
| 263 |
+
expect(result).toEqual({
|
| 264 |
+
functionResponse: {
|
| 265 |
+
name: toolName,
|
| 266 |
+
id: callId,
|
| 267 |
+
response: { output: 'Text from Part object' },
|
| 268 |
+
},
|
| 269 |
+
});
|
| 270 |
+
});
|
| 271 |
+
|
| 272 |
+
it('should handle llmContent as a PartListUnion array with a single text Part', () => {
|
| 273 |
+
const llmContent: PartListUnion = [{ text: 'Text from array' }];
|
| 274 |
+
const result = convertToFunctionResponse(toolName, callId, llmContent);
|
| 275 |
+
expect(result).toEqual({
|
| 276 |
+
functionResponse: {
|
| 277 |
+
name: toolName,
|
| 278 |
+
id: callId,
|
| 279 |
+
response: { output: 'Text from array' },
|
| 280 |
+
},
|
| 281 |
+
});
|
| 282 |
+
});
|
| 283 |
+
|
| 284 |
+
it('should handle llmContent with inlineData', () => {
|
| 285 |
+
const llmContent: Part = {
|
| 286 |
+
inlineData: { mimeType: 'image/png', data: 'base64...' },
|
| 287 |
+
};
|
| 288 |
+
const result = convertToFunctionResponse(toolName, callId, llmContent);
|
| 289 |
+
expect(result).toEqual([
|
| 290 |
+
{
|
| 291 |
+
functionResponse: {
|
| 292 |
+
name: toolName,
|
| 293 |
+
id: callId,
|
| 294 |
+
response: {
|
| 295 |
+
output: 'Binary content of type image/png was processed.',
|
| 296 |
+
},
|
| 297 |
+
},
|
| 298 |
+
},
|
| 299 |
+
llmContent,
|
| 300 |
+
]);
|
| 301 |
+
});
|
| 302 |
+
|
| 303 |
+
it('should handle llmContent with fileData', () => {
|
| 304 |
+
const llmContent: Part = {
|
| 305 |
+
fileData: { mimeType: 'application/pdf', fileUri: 'gs://...' },
|
| 306 |
+
};
|
| 307 |
+
const result = convertToFunctionResponse(toolName, callId, llmContent);
|
| 308 |
+
expect(result).toEqual([
|
| 309 |
+
{
|
| 310 |
+
functionResponse: {
|
| 311 |
+
name: toolName,
|
| 312 |
+
id: callId,
|
| 313 |
+
response: {
|
| 314 |
+
output: 'Binary content of type application/pdf was processed.',
|
| 315 |
+
},
|
| 316 |
+
},
|
| 317 |
+
},
|
| 318 |
+
llmContent,
|
| 319 |
+
]);
|
| 320 |
+
});
|
| 321 |
+
|
| 322 |
+
it('should handle llmContent as an array of multiple Parts (text and inlineData)', () => {
|
| 323 |
+
const llmContent: PartListUnion = [
|
| 324 |
+
{ text: 'Some textual description' },
|
| 325 |
+
{ inlineData: { mimeType: 'image/jpeg', data: 'base64data...' } },
|
| 326 |
+
{ text: 'Another text part' },
|
| 327 |
+
];
|
| 328 |
+
const result = convertToFunctionResponse(toolName, callId, llmContent);
|
| 329 |
+
expect(result).toEqual([
|
| 330 |
+
{
|
| 331 |
+
functionResponse: {
|
| 332 |
+
name: toolName,
|
| 333 |
+
id: callId,
|
| 334 |
+
response: { output: 'Tool execution succeeded.' },
|
| 335 |
+
},
|
| 336 |
+
},
|
| 337 |
+
...llmContent,
|
| 338 |
+
]);
|
| 339 |
+
});
|
| 340 |
+
|
| 341 |
+
it('should handle llmContent as an array with a single inlineData Part', () => {
|
| 342 |
+
const llmContent: PartListUnion = [
|
| 343 |
+
{ inlineData: { mimeType: 'image/gif', data: 'gifdata...' } },
|
| 344 |
+
];
|
| 345 |
+
const result = convertToFunctionResponse(toolName, callId, llmContent);
|
| 346 |
+
expect(result).toEqual([
|
| 347 |
+
{
|
| 348 |
+
functionResponse: {
|
| 349 |
+
name: toolName,
|
| 350 |
+
id: callId,
|
| 351 |
+
response: {
|
| 352 |
+
output: 'Binary content of type image/gif was processed.',
|
| 353 |
+
},
|
| 354 |
+
},
|
| 355 |
+
},
|
| 356 |
+
...llmContent,
|
| 357 |
+
]);
|
| 358 |
+
});
|
| 359 |
+
|
| 360 |
+
it('should handle llmContent as a generic Part (not text, inlineData, or fileData)', () => {
|
| 361 |
+
const llmContent: Part = { functionCall: { name: 'test', args: {} } };
|
| 362 |
+
const result = convertToFunctionResponse(toolName, callId, llmContent);
|
| 363 |
+
expect(result).toEqual({
|
| 364 |
+
functionResponse: {
|
| 365 |
+
name: toolName,
|
| 366 |
+
id: callId,
|
| 367 |
+
response: { output: 'Tool execution succeeded.' },
|
| 368 |
+
},
|
| 369 |
+
});
|
| 370 |
+
});
|
| 371 |
+
|
| 372 |
+
it('should handle empty string llmContent', () => {
|
| 373 |
+
const llmContent = '';
|
| 374 |
+
const result = convertToFunctionResponse(toolName, callId, llmContent);
|
| 375 |
+
expect(result).toEqual({
|
| 376 |
+
functionResponse: {
|
| 377 |
+
name: toolName,
|
| 378 |
+
id: callId,
|
| 379 |
+
response: { output: '' },
|
| 380 |
+
},
|
| 381 |
+
});
|
| 382 |
+
});
|
| 383 |
+
|
| 384 |
+
it('should handle llmContent as an empty array', () => {
|
| 385 |
+
const llmContent: PartListUnion = [];
|
| 386 |
+
const result = convertToFunctionResponse(toolName, callId, llmContent);
|
| 387 |
+
expect(result).toEqual([
|
| 388 |
+
{
|
| 389 |
+
functionResponse: {
|
| 390 |
+
name: toolName,
|
| 391 |
+
id: callId,
|
| 392 |
+
response: { output: 'Tool execution succeeded.' },
|
| 393 |
+
},
|
| 394 |
+
},
|
| 395 |
+
]);
|
| 396 |
+
});
|
| 397 |
+
|
| 398 |
+
it('should handle llmContent as a Part with undefined inlineData/fileData/text', () => {
|
| 399 |
+
const llmContent: Part = {}; // An empty part object
|
| 400 |
+
const result = convertToFunctionResponse(toolName, callId, llmContent);
|
| 401 |
+
expect(result).toEqual({
|
| 402 |
+
functionResponse: {
|
| 403 |
+
name: toolName,
|
| 404 |
+
id: callId,
|
| 405 |
+
response: { output: 'Tool execution succeeded.' },
|
| 406 |
+
},
|
| 407 |
+
});
|
| 408 |
+
});
|
| 409 |
+
});
|
| 410 |
+
|
| 411 |
+
class MockEditToolInvocation extends BaseToolInvocation<
|
| 412 |
+
Record<string, unknown>,
|
| 413 |
+
ToolResult
|
| 414 |
+
> {
|
| 415 |
+
constructor(params: Record<string, unknown>) {
|
| 416 |
+
super(params);
|
| 417 |
+
}
|
| 418 |
+
|
| 419 |
+
getDescription(): string {
|
| 420 |
+
return 'A mock edit tool invocation';
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
override async shouldConfirmExecute(
|
| 424 |
+
_abortSignal: AbortSignal,
|
| 425 |
+
): Promise<ToolCallConfirmationDetails | false> {
|
| 426 |
+
return {
|
| 427 |
+
type: 'edit',
|
| 428 |
+
title: 'Confirm Edit',
|
| 429 |
+
fileName: 'test.txt',
|
| 430 |
+
filePath: 'test.txt',
|
| 431 |
+
fileDiff:
|
| 432 |
+
'--- test.txt\n+++ test.txt\n@@ -1,1 +1,1 @@\n-old content\n+new content',
|
| 433 |
+
originalContent: 'old content',
|
| 434 |
+
newContent: 'new content',
|
| 435 |
+
onConfirm: async () => {},
|
| 436 |
+
};
|
| 437 |
+
}
|
| 438 |
+
|
| 439 |
+
async execute(_abortSignal: AbortSignal): Promise<ToolResult> {
|
| 440 |
+
return {
|
| 441 |
+
llmContent: 'Edited successfully',
|
| 442 |
+
returnDisplay: 'Edited successfully',
|
| 443 |
+
};
|
| 444 |
+
}
|
| 445 |
+
}
|
| 446 |
+
|
| 447 |
+
class MockEditTool extends BaseDeclarativeTool<
|
| 448 |
+
Record<string, unknown>,
|
| 449 |
+
ToolResult
|
| 450 |
+
> {
|
| 451 |
+
constructor() {
|
| 452 |
+
super('mockEditTool', 'mockEditTool', 'A mock edit tool', Kind.Edit, {});
|
| 453 |
+
}
|
| 454 |
+
|
| 455 |
+
protected createInvocation(
|
| 456 |
+
params: Record<string, unknown>,
|
| 457 |
+
): ToolInvocation<Record<string, unknown>, ToolResult> {
|
| 458 |
+
return new MockEditToolInvocation(params);
|
| 459 |
+
}
|
| 460 |
+
}
|
| 461 |
+
|
| 462 |
+
describe('CoreToolScheduler edit cancellation', () => {
|
| 463 |
+
it('should preserve diff when an edit is cancelled', async () => {
|
| 464 |
+
const mockEditTool = new MockEditTool();
|
| 465 |
+
const declarativeTool = mockEditTool;
|
| 466 |
+
const mockToolRegistry = {
|
| 467 |
+
getTool: () => declarativeTool,
|
| 468 |
+
getFunctionDeclarations: () => [],
|
| 469 |
+
tools: new Map(),
|
| 470 |
+
discovery: {},
|
| 471 |
+
registerTool: () => {},
|
| 472 |
+
getToolByName: () => declarativeTool,
|
| 473 |
+
getToolByDisplayName: () => declarativeTool,
|
| 474 |
+
getTools: () => [],
|
| 475 |
+
discoverTools: async () => {},
|
| 476 |
+
getAllTools: () => [],
|
| 477 |
+
getToolsByServer: () => [],
|
| 478 |
+
} as unknown as ToolRegistry;
|
| 479 |
+
|
| 480 |
+
const onAllToolCallsComplete = vi.fn();
|
| 481 |
+
const onToolCallsUpdate = vi.fn();
|
| 482 |
+
|
| 483 |
+
const mockConfig = {
|
| 484 |
+
getSessionId: () => 'test-session-id',
|
| 485 |
+
getUsageStatisticsEnabled: () => true,
|
| 486 |
+
getDebugMode: () => false,
|
| 487 |
+
getApprovalMode: () => ApprovalMode.DEFAULT,
|
| 488 |
+
getContentGeneratorConfig: () => ({
|
| 489 |
+
model: 'test-model',
|
| 490 |
+
authType: 'oauth-personal',
|
| 491 |
+
}),
|
| 492 |
+
} as unknown as Config;
|
| 493 |
+
|
| 494 |
+
const scheduler = new CoreToolScheduler({
|
| 495 |
+
config: mockConfig,
|
| 496 |
+
toolRegistry: mockToolRegistry,
|
| 497 |
+
onAllToolCallsComplete,
|
| 498 |
+
onToolCallsUpdate,
|
| 499 |
+
getPreferredEditor: () => 'vscode',
|
| 500 |
+
onEditorClose: vi.fn(),
|
| 501 |
+
});
|
| 502 |
+
|
| 503 |
+
const abortController = new AbortController();
|
| 504 |
+
const request = {
|
| 505 |
+
callId: '1',
|
| 506 |
+
name: 'mockEditTool',
|
| 507 |
+
args: {},
|
| 508 |
+
isClientInitiated: false,
|
| 509 |
+
prompt_id: 'prompt-id-1',
|
| 510 |
+
};
|
| 511 |
+
|
| 512 |
+
await scheduler.schedule([request], abortController.signal);
|
| 513 |
+
|
| 514 |
+
// Wait for the tool to reach awaiting_approval state
|
| 515 |
+
const awaitingCall = onToolCallsUpdate.mock.calls.find(
|
| 516 |
+
(call) => call[0][0].status === 'awaiting_approval',
|
| 517 |
+
)?.[0][0];
|
| 518 |
+
|
| 519 |
+
expect(awaitingCall).toBeDefined();
|
| 520 |
+
|
| 521 |
+
// Cancel the edit
|
| 522 |
+
const confirmationDetails = awaitingCall.confirmationDetails;
|
| 523 |
+
if (confirmationDetails) {
|
| 524 |
+
await confirmationDetails.onConfirm(ToolConfirmationOutcome.Cancel);
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
expect(onAllToolCallsComplete).toHaveBeenCalled();
|
| 528 |
+
const completedCalls = onAllToolCallsComplete.mock
|
| 529 |
+
.calls[0][0] as ToolCall[];
|
| 530 |
+
|
| 531 |
+
expect(completedCalls[0].status).toBe('cancelled');
|
| 532 |
+
|
| 533 |
+
// Check that the diff is preserved
|
| 534 |
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
| 535 |
+
const cancelledCall = completedCalls[0] as any;
|
| 536 |
+
expect(cancelledCall.response.resultDisplay).toBeDefined();
|
| 537 |
+
expect(cancelledCall.response.resultDisplay.fileDiff).toBe(
|
| 538 |
+
'--- test.txt\n+++ test.txt\n@@ -1,1 +1,1 @@\n-old content\n+new content',
|
| 539 |
+
);
|
| 540 |
+
expect(cancelledCall.response.resultDisplay.fileName).toBe('test.txt');
|
| 541 |
+
});
|
| 542 |
+
});
|
| 543 |
+
|
| 544 |
+
describe('CoreToolScheduler YOLO mode', () => {
|
| 545 |
+
it('should execute tool requiring confirmation directly without waiting', async () => {
|
| 546 |
+
// Arrange
|
| 547 |
+
const mockTool = new MockTool();
|
| 548 |
+
mockTool.executeFn.mockReturnValue({
|
| 549 |
+
llmContent: 'Tool executed',
|
| 550 |
+
returnDisplay: 'Tool executed',
|
| 551 |
+
});
|
| 552 |
+
// This tool would normally require confirmation.
|
| 553 |
+
mockTool.shouldConfirm = true;
|
| 554 |
+
const declarativeTool = mockTool;
|
| 555 |
+
|
| 556 |
+
const mockToolRegistry = {
|
| 557 |
+
getTool: () => declarativeTool,
|
| 558 |
+
getToolByName: () => declarativeTool,
|
| 559 |
+
// Other properties are not needed for this test but are included for type consistency.
|
| 560 |
+
getFunctionDeclarations: () => [],
|
| 561 |
+
tools: new Map(),
|
| 562 |
+
discovery: {},
|
| 563 |
+
registerTool: () => {},
|
| 564 |
+
getToolByDisplayName: () => declarativeTool,
|
| 565 |
+
getTools: () => [],
|
| 566 |
+
discoverTools: async () => {},
|
| 567 |
+
getAllTools: () => [],
|
| 568 |
+
getToolsByServer: () => [],
|
| 569 |
+
} as unknown as ToolRegistry;
|
| 570 |
+
|
| 571 |
+
const onAllToolCallsComplete = vi.fn();
|
| 572 |
+
const onToolCallsUpdate = vi.fn();
|
| 573 |
+
|
| 574 |
+
// Configure the scheduler for YOLO mode.
|
| 575 |
+
const mockConfig = {
|
| 576 |
+
getSessionId: () => 'test-session-id',
|
| 577 |
+
getUsageStatisticsEnabled: () => true,
|
| 578 |
+
getDebugMode: () => false,
|
| 579 |
+
getApprovalMode: () => ApprovalMode.YOLO,
|
| 580 |
+
getContentGeneratorConfig: () => ({
|
| 581 |
+
model: 'test-model',
|
| 582 |
+
authType: 'oauth-personal',
|
| 583 |
+
}),
|
| 584 |
+
} as unknown as Config;
|
| 585 |
+
|
| 586 |
+
const scheduler = new CoreToolScheduler({
|
| 587 |
+
config: mockConfig,
|
| 588 |
+
toolRegistry: mockToolRegistry,
|
| 589 |
+
onAllToolCallsComplete,
|
| 590 |
+
onToolCallsUpdate,
|
| 591 |
+
getPreferredEditor: () => 'vscode',
|
| 592 |
+
onEditorClose: vi.fn(),
|
| 593 |
+
});
|
| 594 |
+
|
| 595 |
+
const abortController = new AbortController();
|
| 596 |
+
const request = {
|
| 597 |
+
callId: '1',
|
| 598 |
+
name: 'mockTool',
|
| 599 |
+
args: { param: 'value' },
|
| 600 |
+
isClientInitiated: false,
|
| 601 |
+
prompt_id: 'prompt-id-yolo',
|
| 602 |
+
};
|
| 603 |
+
|
| 604 |
+
// Act
|
| 605 |
+
await scheduler.schedule([request], abortController.signal);
|
| 606 |
+
|
| 607 |
+
// Assert
|
| 608 |
+
// 1. The tool's execute method was called directly.
|
| 609 |
+
expect(mockTool.executeFn).toHaveBeenCalledWith({ param: 'value' });
|
| 610 |
+
|
| 611 |
+
// 2. The tool call status never entered 'awaiting_approval'.
|
| 612 |
+
const statusUpdates = onToolCallsUpdate.mock.calls
|
| 613 |
+
.map((call) => (call[0][0] as ToolCall)?.status)
|
| 614 |
+
.filter(Boolean);
|
| 615 |
+
expect(statusUpdates).not.toContain('awaiting_approval');
|
| 616 |
+
expect(statusUpdates).toEqual([
|
| 617 |
+
'validating',
|
| 618 |
+
'scheduled',
|
| 619 |
+
'executing',
|
| 620 |
+
'success',
|
| 621 |
+
]);
|
| 622 |
+
|
| 623 |
+
// 3. The final callback indicates the tool call was successful.
|
| 624 |
+
expect(onAllToolCallsComplete).toHaveBeenCalled();
|
| 625 |
+
const completedCalls = onAllToolCallsComplete.mock
|
| 626 |
+
.calls[0][0] as ToolCall[];
|
| 627 |
+
expect(completedCalls).toHaveLength(1);
|
| 628 |
+
const completedCall = completedCalls[0];
|
| 629 |
+
expect(completedCall.status).toBe('success');
|
| 630 |
+
if (completedCall.status === 'success') {
|
| 631 |
+
expect(completedCall.response.resultDisplay).toBe('Tool executed');
|
| 632 |
+
}
|
| 633 |
+
});
|
| 634 |
+
});
|
| 635 |
+
|
| 636 |
+
describe('CoreToolScheduler request queueing', () => {
|
| 637 |
+
it('should queue a request if another is running', async () => {
|
| 638 |
+
let resolveFirstCall: (result: ToolResult) => void;
|
| 639 |
+
const firstCallPromise = new Promise<ToolResult>((resolve) => {
|
| 640 |
+
resolveFirstCall = resolve;
|
| 641 |
+
});
|
| 642 |
+
|
| 643 |
+
const mockTool = new MockTool();
|
| 644 |
+
mockTool.executeFn.mockImplementation(() => firstCallPromise);
|
| 645 |
+
const declarativeTool = mockTool;
|
| 646 |
+
|
| 647 |
+
const mockToolRegistry = {
|
| 648 |
+
getTool: () => declarativeTool,
|
| 649 |
+
getToolByName: () => declarativeTool,
|
| 650 |
+
getFunctionDeclarations: () => [],
|
| 651 |
+
tools: new Map(),
|
| 652 |
+
discovery: {},
|
| 653 |
+
registerTool: () => {},
|
| 654 |
+
getToolByDisplayName: () => declarativeTool,
|
| 655 |
+
getTools: () => [],
|
| 656 |
+
discoverTools: async () => {},
|
| 657 |
+
getAllTools: () => [],
|
| 658 |
+
getToolsByServer: () => [],
|
| 659 |
+
} as unknown as ToolRegistry;
|
| 660 |
+
|
| 661 |
+
const onAllToolCallsComplete = vi.fn();
|
| 662 |
+
const onToolCallsUpdate = vi.fn();
|
| 663 |
+
|
| 664 |
+
const mockConfig = {
|
| 665 |
+
getSessionId: () => 'test-session-id',
|
| 666 |
+
getUsageStatisticsEnabled: () => true,
|
| 667 |
+
getDebugMode: () => false,
|
| 668 |
+
getApprovalMode: () => ApprovalMode.YOLO, // Use YOLO to avoid confirmation prompts
|
| 669 |
+
getContentGeneratorConfig: () => ({
|
| 670 |
+
model: 'test-model',
|
| 671 |
+
authType: 'oauth-personal',
|
| 672 |
+
}),
|
| 673 |
+
} as unknown as Config;
|
| 674 |
+
|
| 675 |
+
const scheduler = new CoreToolScheduler({
|
| 676 |
+
config: mockConfig,
|
| 677 |
+
toolRegistry: mockToolRegistry,
|
| 678 |
+
onAllToolCallsComplete,
|
| 679 |
+
onToolCallsUpdate,
|
| 680 |
+
getPreferredEditor: () => 'vscode',
|
| 681 |
+
onEditorClose: vi.fn(),
|
| 682 |
+
});
|
| 683 |
+
|
| 684 |
+
const abortController = new AbortController();
|
| 685 |
+
const request1 = {
|
| 686 |
+
callId: '1',
|
| 687 |
+
name: 'mockTool',
|
| 688 |
+
args: { a: 1 },
|
| 689 |
+
isClientInitiated: false,
|
| 690 |
+
prompt_id: 'prompt-1',
|
| 691 |
+
};
|
| 692 |
+
const request2 = {
|
| 693 |
+
callId: '2',
|
| 694 |
+
name: 'mockTool',
|
| 695 |
+
args: { b: 2 },
|
| 696 |
+
isClientInitiated: false,
|
| 697 |
+
prompt_id: 'prompt-2',
|
| 698 |
+
};
|
| 699 |
+
|
| 700 |
+
// Schedule the first call, which will pause execution.
|
| 701 |
+
scheduler.schedule([request1], abortController.signal);
|
| 702 |
+
|
| 703 |
+
// Wait for the first call to be in the 'executing' state.
|
| 704 |
+
await vi.waitFor(() => {
|
| 705 |
+
const calls = onToolCallsUpdate.mock.calls.at(-1)?.[0] as ToolCall[];
|
| 706 |
+
expect(calls?.[0]?.status).toBe('executing');
|
| 707 |
+
});
|
| 708 |
+
|
| 709 |
+
// Schedule the second call while the first is "running".
|
| 710 |
+
const schedulePromise2 = scheduler.schedule(
|
| 711 |
+
[request2],
|
| 712 |
+
abortController.signal,
|
| 713 |
+
);
|
| 714 |
+
|
| 715 |
+
// Ensure the second tool call hasn't been executed yet.
|
| 716 |
+
expect(mockTool.executeFn).toHaveBeenCalledTimes(1);
|
| 717 |
+
expect(mockTool.executeFn).toHaveBeenCalledWith({ a: 1 });
|
| 718 |
+
|
| 719 |
+
// Complete the first tool call.
|
| 720 |
+
resolveFirstCall!({
|
| 721 |
+
llmContent: 'First call complete',
|
| 722 |
+
returnDisplay: 'First call complete',
|
| 723 |
+
});
|
| 724 |
+
|
| 725 |
+
// Wait for the second schedule promise to resolve.
|
| 726 |
+
await schedulePromise2;
|
| 727 |
+
|
| 728 |
+
// Wait for the second call to be in the 'executing' state.
|
| 729 |
+
await vi.waitFor(() => {
|
| 730 |
+
const calls = onToolCallsUpdate.mock.calls.at(-1)?.[0] as ToolCall[];
|
| 731 |
+
expect(calls?.[0]?.status).toBe('executing');
|
| 732 |
+
});
|
| 733 |
+
|
| 734 |
+
// Now the second tool call should have been executed.
|
| 735 |
+
expect(mockTool.executeFn).toHaveBeenCalledTimes(2);
|
| 736 |
+
expect(mockTool.executeFn).toHaveBeenCalledWith({ b: 2 });
|
| 737 |
+
|
| 738 |
+
// Let the second call finish.
|
| 739 |
+
const secondCallResult = {
|
| 740 |
+
llmContent: 'Second call complete',
|
| 741 |
+
returnDisplay: 'Second call complete',
|
| 742 |
+
};
|
| 743 |
+
// Since the mock is shared, we need to resolve the current promise.
|
| 744 |
+
// In a real scenario, a new promise would be created for the second call.
|
| 745 |
+
resolveFirstCall!(secondCallResult);
|
| 746 |
+
|
| 747 |
+
// Wait for the second completion.
|
| 748 |
+
await vi.waitFor(() => {
|
| 749 |
+
expect(onAllToolCallsComplete).toHaveBeenCalledTimes(2);
|
| 750 |
+
});
|
| 751 |
+
|
| 752 |
+
// Verify the completion callbacks were called correctly.
|
| 753 |
+
expect(onAllToolCallsComplete.mock.calls[0][0][0].status).toBe('success');
|
| 754 |
+
expect(onAllToolCallsComplete.mock.calls[1][0][0].status).toBe('success');
|
| 755 |
+
});
|
| 756 |
+
|
| 757 |
+
it('should handle two synchronous calls to schedule', async () => {
|
| 758 |
+
const mockTool = new MockTool();
|
| 759 |
+
const declarativeTool = mockTool;
|
| 760 |
+
const mockToolRegistry = {
|
| 761 |
+
getTool: () => declarativeTool,
|
| 762 |
+
getToolByName: () => declarativeTool,
|
| 763 |
+
getFunctionDeclarations: () => [],
|
| 764 |
+
tools: new Map(),
|
| 765 |
+
discovery: {},
|
| 766 |
+
registerTool: () => {},
|
| 767 |
+
getToolByDisplayName: () => declarativeTool,
|
| 768 |
+
getTools: () => [],
|
| 769 |
+
discoverTools: async () => {},
|
| 770 |
+
getAllTools: () => [],
|
| 771 |
+
getToolsByServer: () => [],
|
| 772 |
+
} as unknown as ToolRegistry;
|
| 773 |
+
|
| 774 |
+
const onAllToolCallsComplete = vi.fn();
|
| 775 |
+
const onToolCallsUpdate = vi.fn();
|
| 776 |
+
|
| 777 |
+
const mockConfig = {
|
| 778 |
+
getSessionId: () => 'test-session-id',
|
| 779 |
+
getUsageStatisticsEnabled: () => true,
|
| 780 |
+
getDebugMode: () => false,
|
| 781 |
+
getApprovalMode: () => ApprovalMode.YOLO,
|
| 782 |
+
getContentGeneratorConfig: () => ({
|
| 783 |
+
model: 'test-model',
|
| 784 |
+
authType: 'oauth-personal',
|
| 785 |
+
}),
|
| 786 |
+
} as unknown as Config;
|
| 787 |
+
|
| 788 |
+
const scheduler = new CoreToolScheduler({
|
| 789 |
+
config: mockConfig,
|
| 790 |
+
toolRegistry: mockToolRegistry,
|
| 791 |
+
onAllToolCallsComplete,
|
| 792 |
+
onToolCallsUpdate,
|
| 793 |
+
getPreferredEditor: () => 'vscode',
|
| 794 |
+
onEditorClose: vi.fn(),
|
| 795 |
+
});
|
| 796 |
+
|
| 797 |
+
const abortController = new AbortController();
|
| 798 |
+
const request1 = {
|
| 799 |
+
callId: '1',
|
| 800 |
+
name: 'mockTool',
|
| 801 |
+
args: { a: 1 },
|
| 802 |
+
isClientInitiated: false,
|
| 803 |
+
prompt_id: 'prompt-1',
|
| 804 |
+
};
|
| 805 |
+
const request2 = {
|
| 806 |
+
callId: '2',
|
| 807 |
+
name: 'mockTool',
|
| 808 |
+
args: { b: 2 },
|
| 809 |
+
isClientInitiated: false,
|
| 810 |
+
prompt_id: 'prompt-2',
|
| 811 |
+
};
|
| 812 |
+
|
| 813 |
+
// Schedule two calls synchronously.
|
| 814 |
+
const schedulePromise1 = scheduler.schedule(
|
| 815 |
+
[request1],
|
| 816 |
+
abortController.signal,
|
| 817 |
+
);
|
| 818 |
+
const schedulePromise2 = scheduler.schedule(
|
| 819 |
+
[request2],
|
| 820 |
+
abortController.signal,
|
| 821 |
+
);
|
| 822 |
+
|
| 823 |
+
// Wait for both promises to resolve.
|
| 824 |
+
await Promise.all([schedulePromise1, schedulePromise2]);
|
| 825 |
+
|
| 826 |
+
// Ensure the tool was called twice with the correct arguments.
|
| 827 |
+
expect(mockTool.executeFn).toHaveBeenCalledTimes(2);
|
| 828 |
+
expect(mockTool.executeFn).toHaveBeenCalledWith({ a: 1 });
|
| 829 |
+
expect(mockTool.executeFn).toHaveBeenCalledWith({ b: 2 });
|
| 830 |
+
|
| 831 |
+
// Ensure completion callbacks were called twice.
|
| 832 |
+
expect(onAllToolCallsComplete).toHaveBeenCalledTimes(2);
|
| 833 |
+
});
|
| 834 |
+
|
| 835 |
+
it('should auto-approve remaining tool calls when first tool call is approved with ProceedAlways', async () => {
|
| 836 |
+
let approvalMode = ApprovalMode.DEFAULT;
|
| 837 |
+
const mockConfig = {
|
| 838 |
+
getSessionId: () => 'test-session-id',
|
| 839 |
+
getUsageStatisticsEnabled: () => true,
|
| 840 |
+
getDebugMode: () => false,
|
| 841 |
+
getApprovalMode: () => approvalMode,
|
| 842 |
+
setApprovalMode: (mode: ApprovalMode) => {
|
| 843 |
+
approvalMode = mode;
|
| 844 |
+
},
|
| 845 |
+
} as unknown as Config;
|
| 846 |
+
|
| 847 |
+
const testTool = new TestApprovalTool(mockConfig);
|
| 848 |
+
const toolRegistry = {
|
| 849 |
+
getTool: () => testTool,
|
| 850 |
+
getFunctionDeclarations: () => [],
|
| 851 |
+
getFunctionDeclarationsFiltered: () => [],
|
| 852 |
+
registerTool: () => {},
|
| 853 |
+
discoverAllTools: async () => {},
|
| 854 |
+
discoverMcpTools: async () => {},
|
| 855 |
+
discoverToolsForServer: async () => {},
|
| 856 |
+
removeMcpToolsByServer: () => {},
|
| 857 |
+
getAllTools: () => [],
|
| 858 |
+
getToolsByServer: () => [],
|
| 859 |
+
tools: new Map(),
|
| 860 |
+
config: mockConfig,
|
| 861 |
+
mcpClientManager: undefined,
|
| 862 |
+
getToolByName: () => testTool,
|
| 863 |
+
getToolByDisplayName: () => testTool,
|
| 864 |
+
getTools: () => [],
|
| 865 |
+
discoverTools: async () => {},
|
| 866 |
+
discovery: {},
|
| 867 |
+
};
|
| 868 |
+
|
| 869 |
+
const onAllToolCallsComplete = vi.fn();
|
| 870 |
+
const onToolCallsUpdate = vi.fn();
|
| 871 |
+
const pendingConfirmations: Array<
|
| 872 |
+
(outcome: ToolConfirmationOutcome) => void
|
| 873 |
+
> = [];
|
| 874 |
+
|
| 875 |
+
const scheduler = new CoreToolScheduler({
|
| 876 |
+
config: mockConfig,
|
| 877 |
+
toolRegistry: toolRegistry as unknown as ToolRegistry,
|
| 878 |
+
onAllToolCallsComplete,
|
| 879 |
+
onToolCallsUpdate: (toolCalls) => {
|
| 880 |
+
onToolCallsUpdate(toolCalls);
|
| 881 |
+
// Capture confirmation handlers for awaiting_approval tools
|
| 882 |
+
toolCalls.forEach((call) => {
|
| 883 |
+
if (call.status === 'awaiting_approval') {
|
| 884 |
+
const waitingCall = call as WaitingToolCall;
|
| 885 |
+
if (waitingCall.confirmationDetails?.onConfirm) {
|
| 886 |
+
const originalHandler = pendingConfirmations.find(
|
| 887 |
+
(h) => h === waitingCall.confirmationDetails.onConfirm,
|
| 888 |
+
);
|
| 889 |
+
if (!originalHandler) {
|
| 890 |
+
pendingConfirmations.push(
|
| 891 |
+
waitingCall.confirmationDetails.onConfirm,
|
| 892 |
+
);
|
| 893 |
+
}
|
| 894 |
+
}
|
| 895 |
+
}
|
| 896 |
+
});
|
| 897 |
+
},
|
| 898 |
+
getPreferredEditor: () => 'vscode',
|
| 899 |
+
onEditorClose: vi.fn(),
|
| 900 |
+
});
|
| 901 |
+
|
| 902 |
+
const abortController = new AbortController();
|
| 903 |
+
|
| 904 |
+
// Schedule multiple tools that need confirmation
|
| 905 |
+
const requests = [
|
| 906 |
+
{
|
| 907 |
+
callId: '1',
|
| 908 |
+
name: 'testApprovalTool',
|
| 909 |
+
args: { id: 'first' },
|
| 910 |
+
isClientInitiated: false,
|
| 911 |
+
prompt_id: 'prompt-1',
|
| 912 |
+
},
|
| 913 |
+
{
|
| 914 |
+
callId: '2',
|
| 915 |
+
name: 'testApprovalTool',
|
| 916 |
+
args: { id: 'second' },
|
| 917 |
+
isClientInitiated: false,
|
| 918 |
+
prompt_id: 'prompt-2',
|
| 919 |
+
},
|
| 920 |
+
{
|
| 921 |
+
callId: '3',
|
| 922 |
+
name: 'testApprovalTool',
|
| 923 |
+
args: { id: 'third' },
|
| 924 |
+
isClientInitiated: false,
|
| 925 |
+
prompt_id: 'prompt-3',
|
| 926 |
+
},
|
| 927 |
+
];
|
| 928 |
+
|
| 929 |
+
await scheduler.schedule(requests, abortController.signal);
|
| 930 |
+
|
| 931 |
+
// Wait for all tools to be awaiting approval
|
| 932 |
+
await vi.waitFor(() => {
|
| 933 |
+
const calls = onToolCallsUpdate.mock.calls.at(-1)?.[0] as ToolCall[];
|
| 934 |
+
expect(calls?.length).toBe(3);
|
| 935 |
+
expect(calls?.every((call) => call.status === 'awaiting_approval')).toBe(
|
| 936 |
+
true,
|
| 937 |
+
);
|
| 938 |
+
});
|
| 939 |
+
|
| 940 |
+
expect(pendingConfirmations.length).toBe(3);
|
| 941 |
+
|
| 942 |
+
// Approve the first tool with ProceedAlways
|
| 943 |
+
const firstConfirmation = pendingConfirmations[0];
|
| 944 |
+
firstConfirmation(ToolConfirmationOutcome.ProceedAlways);
|
| 945 |
+
|
| 946 |
+
// Wait for all tools to be completed
|
| 947 |
+
await vi.waitFor(() => {
|
| 948 |
+
expect(onAllToolCallsComplete).toHaveBeenCalled();
|
| 949 |
+
const completedCalls = onAllToolCallsComplete.mock.calls.at(
|
| 950 |
+
-1,
|
| 951 |
+
)?.[0] as ToolCall[];
|
| 952 |
+
expect(completedCalls?.length).toBe(3);
|
| 953 |
+
expect(completedCalls?.every((call) => call.status === 'success')).toBe(
|
| 954 |
+
true,
|
| 955 |
+
);
|
| 956 |
+
});
|
| 957 |
+
|
| 958 |
+
// Verify approval mode was changed
|
| 959 |
+
expect(approvalMode).toBe(ApprovalMode.AUTO_EDIT);
|
| 960 |
+
});
|
| 961 |
+
});
|
projects/ui/qwen-code/packages/core/src/core/coreToolScheduler.ts
ADDED
|
@@ -0,0 +1,966 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import {
|
| 8 |
+
ToolCallRequestInfo,
|
| 9 |
+
ToolCallResponseInfo,
|
| 10 |
+
ToolConfirmationOutcome,
|
| 11 |
+
ToolCallConfirmationDetails,
|
| 12 |
+
ToolResult,
|
| 13 |
+
ToolResultDisplay,
|
| 14 |
+
ToolRegistry,
|
| 15 |
+
ApprovalMode,
|
| 16 |
+
EditorType,
|
| 17 |
+
Config,
|
| 18 |
+
logToolCall,
|
| 19 |
+
ToolCallEvent,
|
| 20 |
+
ToolConfirmationPayload,
|
| 21 |
+
ToolErrorType,
|
| 22 |
+
AnyDeclarativeTool,
|
| 23 |
+
AnyToolInvocation,
|
| 24 |
+
} from '../index.js';
|
| 25 |
+
import { Part, PartListUnion } from '@google/genai';
|
| 26 |
+
import { getResponseTextFromParts } from '../utils/generateContentResponseUtilities.js';
|
| 27 |
+
import {
|
| 28 |
+
isModifiableDeclarativeTool,
|
| 29 |
+
ModifyContext,
|
| 30 |
+
modifyWithEditor,
|
| 31 |
+
} from '../tools/modifiable-tool.js';
|
| 32 |
+
import * as Diff from 'diff';
|
| 33 |
+
|
| 34 |
+
export type ValidatingToolCall = {
|
| 35 |
+
status: 'validating';
|
| 36 |
+
request: ToolCallRequestInfo;
|
| 37 |
+
tool: AnyDeclarativeTool;
|
| 38 |
+
invocation: AnyToolInvocation;
|
| 39 |
+
startTime?: number;
|
| 40 |
+
outcome?: ToolConfirmationOutcome;
|
| 41 |
+
};
|
| 42 |
+
|
| 43 |
+
export type ScheduledToolCall = {
|
| 44 |
+
status: 'scheduled';
|
| 45 |
+
request: ToolCallRequestInfo;
|
| 46 |
+
tool: AnyDeclarativeTool;
|
| 47 |
+
invocation: AnyToolInvocation;
|
| 48 |
+
startTime?: number;
|
| 49 |
+
outcome?: ToolConfirmationOutcome;
|
| 50 |
+
};
|
| 51 |
+
|
| 52 |
+
export type ErroredToolCall = {
|
| 53 |
+
status: 'error';
|
| 54 |
+
request: ToolCallRequestInfo;
|
| 55 |
+
response: ToolCallResponseInfo;
|
| 56 |
+
tool?: AnyDeclarativeTool;
|
| 57 |
+
durationMs?: number;
|
| 58 |
+
outcome?: ToolConfirmationOutcome;
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
export type SuccessfulToolCall = {
|
| 62 |
+
status: 'success';
|
| 63 |
+
request: ToolCallRequestInfo;
|
| 64 |
+
tool: AnyDeclarativeTool;
|
| 65 |
+
response: ToolCallResponseInfo;
|
| 66 |
+
invocation: AnyToolInvocation;
|
| 67 |
+
durationMs?: number;
|
| 68 |
+
outcome?: ToolConfirmationOutcome;
|
| 69 |
+
};
|
| 70 |
+
|
| 71 |
+
export type ExecutingToolCall = {
|
| 72 |
+
status: 'executing';
|
| 73 |
+
request: ToolCallRequestInfo;
|
| 74 |
+
tool: AnyDeclarativeTool;
|
| 75 |
+
invocation: AnyToolInvocation;
|
| 76 |
+
liveOutput?: string;
|
| 77 |
+
startTime?: number;
|
| 78 |
+
outcome?: ToolConfirmationOutcome;
|
| 79 |
+
};
|
| 80 |
+
|
| 81 |
+
export type CancelledToolCall = {
|
| 82 |
+
status: 'cancelled';
|
| 83 |
+
request: ToolCallRequestInfo;
|
| 84 |
+
response: ToolCallResponseInfo;
|
| 85 |
+
tool: AnyDeclarativeTool;
|
| 86 |
+
invocation: AnyToolInvocation;
|
| 87 |
+
durationMs?: number;
|
| 88 |
+
outcome?: ToolConfirmationOutcome;
|
| 89 |
+
};
|
| 90 |
+
|
| 91 |
+
export type WaitingToolCall = {
|
| 92 |
+
status: 'awaiting_approval';
|
| 93 |
+
request: ToolCallRequestInfo;
|
| 94 |
+
tool: AnyDeclarativeTool;
|
| 95 |
+
invocation: AnyToolInvocation;
|
| 96 |
+
confirmationDetails: ToolCallConfirmationDetails;
|
| 97 |
+
startTime?: number;
|
| 98 |
+
outcome?: ToolConfirmationOutcome;
|
| 99 |
+
};
|
| 100 |
+
|
| 101 |
+
export type Status = ToolCall['status'];
|
| 102 |
+
|
| 103 |
+
export type ToolCall =
|
| 104 |
+
| ValidatingToolCall
|
| 105 |
+
| ScheduledToolCall
|
| 106 |
+
| ErroredToolCall
|
| 107 |
+
| SuccessfulToolCall
|
| 108 |
+
| ExecutingToolCall
|
| 109 |
+
| CancelledToolCall
|
| 110 |
+
| WaitingToolCall;
|
| 111 |
+
|
| 112 |
+
export type CompletedToolCall =
|
| 113 |
+
| SuccessfulToolCall
|
| 114 |
+
| CancelledToolCall
|
| 115 |
+
| ErroredToolCall;
|
| 116 |
+
|
| 117 |
+
export type ConfirmHandler = (
|
| 118 |
+
toolCall: WaitingToolCall,
|
| 119 |
+
) => Promise<ToolConfirmationOutcome>;
|
| 120 |
+
|
| 121 |
+
export type OutputUpdateHandler = (
|
| 122 |
+
toolCallId: string,
|
| 123 |
+
outputChunk: string,
|
| 124 |
+
) => void;
|
| 125 |
+
|
| 126 |
+
export type AllToolCallsCompleteHandler = (
|
| 127 |
+
completedToolCalls: CompletedToolCall[],
|
| 128 |
+
) => Promise<void>;
|
| 129 |
+
|
| 130 |
+
export type ToolCallsUpdateHandler = (toolCalls: ToolCall[]) => void;
|
| 131 |
+
|
| 132 |
+
/**
|
| 133 |
+
* Formats tool output for a Gemini FunctionResponse.
|
| 134 |
+
*/
|
| 135 |
+
function createFunctionResponsePart(
|
| 136 |
+
callId: string,
|
| 137 |
+
toolName: string,
|
| 138 |
+
output: string,
|
| 139 |
+
): Part {
|
| 140 |
+
return {
|
| 141 |
+
functionResponse: {
|
| 142 |
+
id: callId,
|
| 143 |
+
name: toolName,
|
| 144 |
+
response: { output },
|
| 145 |
+
},
|
| 146 |
+
};
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
export function convertToFunctionResponse(
|
| 150 |
+
toolName: string,
|
| 151 |
+
callId: string,
|
| 152 |
+
llmContent: PartListUnion,
|
| 153 |
+
): PartListUnion {
|
| 154 |
+
const contentToProcess =
|
| 155 |
+
Array.isArray(llmContent) && llmContent.length === 1
|
| 156 |
+
? llmContent[0]
|
| 157 |
+
: llmContent;
|
| 158 |
+
|
| 159 |
+
if (typeof contentToProcess === 'string') {
|
| 160 |
+
return createFunctionResponsePart(callId, toolName, contentToProcess);
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
if (Array.isArray(contentToProcess)) {
|
| 164 |
+
const functionResponse = createFunctionResponsePart(
|
| 165 |
+
callId,
|
| 166 |
+
toolName,
|
| 167 |
+
'Tool execution succeeded.',
|
| 168 |
+
);
|
| 169 |
+
return [functionResponse, ...contentToProcess];
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
// After this point, contentToProcess is a single Part object.
|
| 173 |
+
if (contentToProcess.functionResponse) {
|
| 174 |
+
if (contentToProcess.functionResponse.response?.['content']) {
|
| 175 |
+
const stringifiedOutput =
|
| 176 |
+
getResponseTextFromParts(
|
| 177 |
+
contentToProcess.functionResponse.response['content'] as Part[],
|
| 178 |
+
) || '';
|
| 179 |
+
return createFunctionResponsePart(callId, toolName, stringifiedOutput);
|
| 180 |
+
}
|
| 181 |
+
// It's a functionResponse that we should pass through as is.
|
| 182 |
+
return contentToProcess;
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
if (contentToProcess.inlineData || contentToProcess.fileData) {
|
| 186 |
+
const mimeType =
|
| 187 |
+
contentToProcess.inlineData?.mimeType ||
|
| 188 |
+
contentToProcess.fileData?.mimeType ||
|
| 189 |
+
'unknown';
|
| 190 |
+
const functionResponse = createFunctionResponsePart(
|
| 191 |
+
callId,
|
| 192 |
+
toolName,
|
| 193 |
+
`Binary content of type ${mimeType} was processed.`,
|
| 194 |
+
);
|
| 195 |
+
return [functionResponse, contentToProcess];
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
if (contentToProcess.text !== undefined) {
|
| 199 |
+
return createFunctionResponsePart(callId, toolName, contentToProcess.text);
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
// Default case for other kinds of parts.
|
| 203 |
+
return createFunctionResponsePart(
|
| 204 |
+
callId,
|
| 205 |
+
toolName,
|
| 206 |
+
'Tool execution succeeded.',
|
| 207 |
+
);
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
const createErrorResponse = (
|
| 211 |
+
request: ToolCallRequestInfo,
|
| 212 |
+
error: Error,
|
| 213 |
+
errorType: ToolErrorType | undefined,
|
| 214 |
+
): ToolCallResponseInfo => ({
|
| 215 |
+
callId: request.callId,
|
| 216 |
+
error,
|
| 217 |
+
responseParts: {
|
| 218 |
+
functionResponse: {
|
| 219 |
+
id: request.callId,
|
| 220 |
+
name: request.name,
|
| 221 |
+
response: { error: error.message },
|
| 222 |
+
},
|
| 223 |
+
},
|
| 224 |
+
resultDisplay: error.message,
|
| 225 |
+
errorType,
|
| 226 |
+
});
|
| 227 |
+
|
| 228 |
+
interface CoreToolSchedulerOptions {
|
| 229 |
+
toolRegistry: ToolRegistry;
|
| 230 |
+
outputUpdateHandler?: OutputUpdateHandler;
|
| 231 |
+
onAllToolCallsComplete?: AllToolCallsCompleteHandler;
|
| 232 |
+
onToolCallsUpdate?: ToolCallsUpdateHandler;
|
| 233 |
+
getPreferredEditor: () => EditorType | undefined;
|
| 234 |
+
config: Config;
|
| 235 |
+
onEditorClose: () => void;
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
export class CoreToolScheduler {
|
| 239 |
+
private toolRegistry: ToolRegistry;
|
| 240 |
+
private toolCalls: ToolCall[] = [];
|
| 241 |
+
private outputUpdateHandler?: OutputUpdateHandler;
|
| 242 |
+
private onAllToolCallsComplete?: AllToolCallsCompleteHandler;
|
| 243 |
+
private onToolCallsUpdate?: ToolCallsUpdateHandler;
|
| 244 |
+
private getPreferredEditor: () => EditorType | undefined;
|
| 245 |
+
private config: Config;
|
| 246 |
+
private onEditorClose: () => void;
|
| 247 |
+
private isFinalizingToolCalls = false;
|
| 248 |
+
private isScheduling = false;
|
| 249 |
+
private requestQueue: Array<{
|
| 250 |
+
request: ToolCallRequestInfo | ToolCallRequestInfo[];
|
| 251 |
+
signal: AbortSignal;
|
| 252 |
+
resolve: () => void;
|
| 253 |
+
reject: (reason?: Error) => void;
|
| 254 |
+
}> = [];
|
| 255 |
+
|
| 256 |
+
constructor(options: CoreToolSchedulerOptions) {
|
| 257 |
+
this.config = options.config;
|
| 258 |
+
this.toolRegistry = options.toolRegistry;
|
| 259 |
+
this.outputUpdateHandler = options.outputUpdateHandler;
|
| 260 |
+
this.onAllToolCallsComplete = options.onAllToolCallsComplete;
|
| 261 |
+
this.onToolCallsUpdate = options.onToolCallsUpdate;
|
| 262 |
+
this.getPreferredEditor = options.getPreferredEditor;
|
| 263 |
+
this.onEditorClose = options.onEditorClose;
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
private setStatusInternal(
|
| 267 |
+
targetCallId: string,
|
| 268 |
+
status: 'success',
|
| 269 |
+
response: ToolCallResponseInfo,
|
| 270 |
+
): void;
|
| 271 |
+
private setStatusInternal(
|
| 272 |
+
targetCallId: string,
|
| 273 |
+
status: 'awaiting_approval',
|
| 274 |
+
confirmationDetails: ToolCallConfirmationDetails,
|
| 275 |
+
): void;
|
| 276 |
+
private setStatusInternal(
|
| 277 |
+
targetCallId: string,
|
| 278 |
+
status: 'error',
|
| 279 |
+
response: ToolCallResponseInfo,
|
| 280 |
+
): void;
|
| 281 |
+
private setStatusInternal(
|
| 282 |
+
targetCallId: string,
|
| 283 |
+
status: 'cancelled',
|
| 284 |
+
reason: string,
|
| 285 |
+
): void;
|
| 286 |
+
private setStatusInternal(
|
| 287 |
+
targetCallId: string,
|
| 288 |
+
status: 'executing' | 'scheduled' | 'validating',
|
| 289 |
+
): void;
|
| 290 |
+
private setStatusInternal(
|
| 291 |
+
targetCallId: string,
|
| 292 |
+
newStatus: Status,
|
| 293 |
+
auxiliaryData?: unknown,
|
| 294 |
+
): void {
|
| 295 |
+
this.toolCalls = this.toolCalls.map((currentCall) => {
|
| 296 |
+
if (
|
| 297 |
+
currentCall.request.callId !== targetCallId ||
|
| 298 |
+
currentCall.status === 'success' ||
|
| 299 |
+
currentCall.status === 'error' ||
|
| 300 |
+
currentCall.status === 'cancelled'
|
| 301 |
+
) {
|
| 302 |
+
return currentCall;
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
// currentCall is a non-terminal state here and should have startTime and tool.
|
| 306 |
+
const existingStartTime = currentCall.startTime;
|
| 307 |
+
const toolInstance = currentCall.tool;
|
| 308 |
+
const invocation = currentCall.invocation;
|
| 309 |
+
|
| 310 |
+
const outcome = currentCall.outcome;
|
| 311 |
+
|
| 312 |
+
switch (newStatus) {
|
| 313 |
+
case 'success': {
|
| 314 |
+
const durationMs = existingStartTime
|
| 315 |
+
? Date.now() - existingStartTime
|
| 316 |
+
: undefined;
|
| 317 |
+
return {
|
| 318 |
+
request: currentCall.request,
|
| 319 |
+
tool: toolInstance,
|
| 320 |
+
invocation,
|
| 321 |
+
status: 'success',
|
| 322 |
+
response: auxiliaryData as ToolCallResponseInfo,
|
| 323 |
+
durationMs,
|
| 324 |
+
outcome,
|
| 325 |
+
} as SuccessfulToolCall;
|
| 326 |
+
}
|
| 327 |
+
case 'error': {
|
| 328 |
+
const durationMs = existingStartTime
|
| 329 |
+
? Date.now() - existingStartTime
|
| 330 |
+
: undefined;
|
| 331 |
+
return {
|
| 332 |
+
request: currentCall.request,
|
| 333 |
+
status: 'error',
|
| 334 |
+
tool: toolInstance,
|
| 335 |
+
response: auxiliaryData as ToolCallResponseInfo,
|
| 336 |
+
durationMs,
|
| 337 |
+
outcome,
|
| 338 |
+
} as ErroredToolCall;
|
| 339 |
+
}
|
| 340 |
+
case 'awaiting_approval':
|
| 341 |
+
return {
|
| 342 |
+
request: currentCall.request,
|
| 343 |
+
tool: toolInstance,
|
| 344 |
+
status: 'awaiting_approval',
|
| 345 |
+
confirmationDetails: auxiliaryData as ToolCallConfirmationDetails,
|
| 346 |
+
startTime: existingStartTime,
|
| 347 |
+
outcome,
|
| 348 |
+
invocation,
|
| 349 |
+
} as WaitingToolCall;
|
| 350 |
+
case 'scheduled':
|
| 351 |
+
return {
|
| 352 |
+
request: currentCall.request,
|
| 353 |
+
tool: toolInstance,
|
| 354 |
+
status: 'scheduled',
|
| 355 |
+
startTime: existingStartTime,
|
| 356 |
+
outcome,
|
| 357 |
+
invocation,
|
| 358 |
+
} as ScheduledToolCall;
|
| 359 |
+
case 'cancelled': {
|
| 360 |
+
const durationMs = existingStartTime
|
| 361 |
+
? Date.now() - existingStartTime
|
| 362 |
+
: undefined;
|
| 363 |
+
|
| 364 |
+
// Preserve diff for cancelled edit operations
|
| 365 |
+
let resultDisplay: ToolResultDisplay | undefined = undefined;
|
| 366 |
+
if (currentCall.status === 'awaiting_approval') {
|
| 367 |
+
const waitingCall = currentCall as WaitingToolCall;
|
| 368 |
+
if (waitingCall.confirmationDetails.type === 'edit') {
|
| 369 |
+
resultDisplay = {
|
| 370 |
+
fileDiff: waitingCall.confirmationDetails.fileDiff,
|
| 371 |
+
fileName: waitingCall.confirmationDetails.fileName,
|
| 372 |
+
originalContent:
|
| 373 |
+
waitingCall.confirmationDetails.originalContent,
|
| 374 |
+
newContent: waitingCall.confirmationDetails.newContent,
|
| 375 |
+
};
|
| 376 |
+
}
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
return {
|
| 380 |
+
request: currentCall.request,
|
| 381 |
+
tool: toolInstance,
|
| 382 |
+
invocation,
|
| 383 |
+
status: 'cancelled',
|
| 384 |
+
response: {
|
| 385 |
+
callId: currentCall.request.callId,
|
| 386 |
+
responseParts: {
|
| 387 |
+
functionResponse: {
|
| 388 |
+
id: currentCall.request.callId,
|
| 389 |
+
name: currentCall.request.name,
|
| 390 |
+
response: {
|
| 391 |
+
error: `[Operation Cancelled] Reason: ${auxiliaryData}`,
|
| 392 |
+
},
|
| 393 |
+
},
|
| 394 |
+
},
|
| 395 |
+
resultDisplay,
|
| 396 |
+
error: undefined,
|
| 397 |
+
errorType: undefined,
|
| 398 |
+
},
|
| 399 |
+
durationMs,
|
| 400 |
+
outcome,
|
| 401 |
+
} as CancelledToolCall;
|
| 402 |
+
}
|
| 403 |
+
case 'validating':
|
| 404 |
+
return {
|
| 405 |
+
request: currentCall.request,
|
| 406 |
+
tool: toolInstance,
|
| 407 |
+
status: 'validating',
|
| 408 |
+
startTime: existingStartTime,
|
| 409 |
+
outcome,
|
| 410 |
+
invocation,
|
| 411 |
+
} as ValidatingToolCall;
|
| 412 |
+
case 'executing':
|
| 413 |
+
return {
|
| 414 |
+
request: currentCall.request,
|
| 415 |
+
tool: toolInstance,
|
| 416 |
+
status: 'executing',
|
| 417 |
+
startTime: existingStartTime,
|
| 418 |
+
outcome,
|
| 419 |
+
invocation,
|
| 420 |
+
} as ExecutingToolCall;
|
| 421 |
+
default: {
|
| 422 |
+
const exhaustiveCheck: never = newStatus;
|
| 423 |
+
return exhaustiveCheck;
|
| 424 |
+
}
|
| 425 |
+
}
|
| 426 |
+
});
|
| 427 |
+
this.notifyToolCallsUpdate();
|
| 428 |
+
this.checkAndNotifyCompletion();
|
| 429 |
+
}
|
| 430 |
+
|
| 431 |
+
private setArgsInternal(targetCallId: string, args: unknown): void {
|
| 432 |
+
this.toolCalls = this.toolCalls.map((call) => {
|
| 433 |
+
// We should never be asked to set args on an ErroredToolCall, but
|
| 434 |
+
// we guard for the case anyways.
|
| 435 |
+
if (call.request.callId !== targetCallId || call.status === 'error') {
|
| 436 |
+
return call;
|
| 437 |
+
}
|
| 438 |
+
|
| 439 |
+
const invocationOrError = this.buildInvocation(
|
| 440 |
+
call.tool,
|
| 441 |
+
args as Record<string, unknown>,
|
| 442 |
+
);
|
| 443 |
+
if (invocationOrError instanceof Error) {
|
| 444 |
+
const response = createErrorResponse(
|
| 445 |
+
call.request,
|
| 446 |
+
invocationOrError,
|
| 447 |
+
ToolErrorType.INVALID_TOOL_PARAMS,
|
| 448 |
+
);
|
| 449 |
+
return {
|
| 450 |
+
request: { ...call.request, args: args as Record<string, unknown> },
|
| 451 |
+
status: 'error',
|
| 452 |
+
tool: call.tool,
|
| 453 |
+
response,
|
| 454 |
+
} as ErroredToolCall;
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
return {
|
| 458 |
+
...call,
|
| 459 |
+
request: { ...call.request, args: args as Record<string, unknown> },
|
| 460 |
+
invocation: invocationOrError,
|
| 461 |
+
};
|
| 462 |
+
});
|
| 463 |
+
}
|
| 464 |
+
|
| 465 |
+
private isRunning(): boolean {
|
| 466 |
+
return (
|
| 467 |
+
this.isFinalizingToolCalls ||
|
| 468 |
+
this.toolCalls.some(
|
| 469 |
+
(call) =>
|
| 470 |
+
call.status === 'executing' || call.status === 'awaiting_approval',
|
| 471 |
+
)
|
| 472 |
+
);
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
private buildInvocation(
|
| 476 |
+
tool: AnyDeclarativeTool,
|
| 477 |
+
args: object,
|
| 478 |
+
): AnyToolInvocation | Error {
|
| 479 |
+
try {
|
| 480 |
+
return tool.build(args);
|
| 481 |
+
} catch (e) {
|
| 482 |
+
if (e instanceof Error) {
|
| 483 |
+
return e;
|
| 484 |
+
}
|
| 485 |
+
return new Error(String(e));
|
| 486 |
+
}
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
schedule(
|
| 490 |
+
request: ToolCallRequestInfo | ToolCallRequestInfo[],
|
| 491 |
+
signal: AbortSignal,
|
| 492 |
+
): Promise<void> {
|
| 493 |
+
if (this.isRunning() || this.isScheduling) {
|
| 494 |
+
return new Promise((resolve, reject) => {
|
| 495 |
+
const abortHandler = () => {
|
| 496 |
+
// Find and remove the request from the queue
|
| 497 |
+
const index = this.requestQueue.findIndex(
|
| 498 |
+
(item) => item.request === request,
|
| 499 |
+
);
|
| 500 |
+
if (index > -1) {
|
| 501 |
+
this.requestQueue.splice(index, 1);
|
| 502 |
+
reject(new Error('Tool call cancelled while in queue.'));
|
| 503 |
+
}
|
| 504 |
+
};
|
| 505 |
+
|
| 506 |
+
signal.addEventListener('abort', abortHandler, { once: true });
|
| 507 |
+
|
| 508 |
+
this.requestQueue.push({
|
| 509 |
+
request,
|
| 510 |
+
signal,
|
| 511 |
+
resolve: () => {
|
| 512 |
+
signal.removeEventListener('abort', abortHandler);
|
| 513 |
+
resolve();
|
| 514 |
+
},
|
| 515 |
+
reject: (reason?: Error) => {
|
| 516 |
+
signal.removeEventListener('abort', abortHandler);
|
| 517 |
+
reject(reason);
|
| 518 |
+
},
|
| 519 |
+
});
|
| 520 |
+
});
|
| 521 |
+
}
|
| 522 |
+
return this._schedule(request, signal);
|
| 523 |
+
}
|
| 524 |
+
|
| 525 |
+
private async _schedule(
|
| 526 |
+
request: ToolCallRequestInfo | ToolCallRequestInfo[],
|
| 527 |
+
signal: AbortSignal,
|
| 528 |
+
): Promise<void> {
|
| 529 |
+
this.isScheduling = true;
|
| 530 |
+
try {
|
| 531 |
+
if (this.isRunning()) {
|
| 532 |
+
throw new Error(
|
| 533 |
+
'Cannot schedule new tool calls while other tool calls are actively running (executing or awaiting approval).',
|
| 534 |
+
);
|
| 535 |
+
}
|
| 536 |
+
const requestsToProcess = Array.isArray(request) ? request : [request];
|
| 537 |
+
|
| 538 |
+
const newToolCalls: ToolCall[] = requestsToProcess.map(
|
| 539 |
+
(reqInfo): ToolCall => {
|
| 540 |
+
const toolInstance = this.toolRegistry.getTool(reqInfo.name);
|
| 541 |
+
if (!toolInstance) {
|
| 542 |
+
return {
|
| 543 |
+
status: 'error',
|
| 544 |
+
request: reqInfo,
|
| 545 |
+
response: createErrorResponse(
|
| 546 |
+
reqInfo,
|
| 547 |
+
new Error(`Tool "${reqInfo.name}" not found in registry.`),
|
| 548 |
+
ToolErrorType.TOOL_NOT_REGISTERED,
|
| 549 |
+
),
|
| 550 |
+
durationMs: 0,
|
| 551 |
+
};
|
| 552 |
+
}
|
| 553 |
+
|
| 554 |
+
const invocationOrError = this.buildInvocation(
|
| 555 |
+
toolInstance,
|
| 556 |
+
reqInfo.args,
|
| 557 |
+
);
|
| 558 |
+
if (invocationOrError instanceof Error) {
|
| 559 |
+
return {
|
| 560 |
+
status: 'error',
|
| 561 |
+
request: reqInfo,
|
| 562 |
+
tool: toolInstance,
|
| 563 |
+
response: createErrorResponse(
|
| 564 |
+
reqInfo,
|
| 565 |
+
invocationOrError,
|
| 566 |
+
ToolErrorType.INVALID_TOOL_PARAMS,
|
| 567 |
+
),
|
| 568 |
+
durationMs: 0,
|
| 569 |
+
};
|
| 570 |
+
}
|
| 571 |
+
|
| 572 |
+
return {
|
| 573 |
+
status: 'validating',
|
| 574 |
+
request: reqInfo,
|
| 575 |
+
tool: toolInstance,
|
| 576 |
+
invocation: invocationOrError,
|
| 577 |
+
startTime: Date.now(),
|
| 578 |
+
};
|
| 579 |
+
},
|
| 580 |
+
);
|
| 581 |
+
|
| 582 |
+
this.toolCalls = this.toolCalls.concat(newToolCalls);
|
| 583 |
+
this.notifyToolCallsUpdate();
|
| 584 |
+
|
| 585 |
+
for (const toolCall of newToolCalls) {
|
| 586 |
+
if (toolCall.status !== 'validating') {
|
| 587 |
+
continue;
|
| 588 |
+
}
|
| 589 |
+
|
| 590 |
+
const { request: reqInfo, invocation } = toolCall;
|
| 591 |
+
|
| 592 |
+
try {
|
| 593 |
+
if (signal.aborted) {
|
| 594 |
+
this.setStatusInternal(
|
| 595 |
+
reqInfo.callId,
|
| 596 |
+
'cancelled',
|
| 597 |
+
'Tool call cancelled by user.',
|
| 598 |
+
);
|
| 599 |
+
continue;
|
| 600 |
+
}
|
| 601 |
+
if (this.config.getApprovalMode() === ApprovalMode.YOLO) {
|
| 602 |
+
this.setToolCallOutcome(
|
| 603 |
+
reqInfo.callId,
|
| 604 |
+
ToolConfirmationOutcome.ProceedAlways,
|
| 605 |
+
);
|
| 606 |
+
this.setStatusInternal(reqInfo.callId, 'scheduled');
|
| 607 |
+
} else {
|
| 608 |
+
const confirmationDetails =
|
| 609 |
+
await invocation.shouldConfirmExecute(signal);
|
| 610 |
+
|
| 611 |
+
if (confirmationDetails) {
|
| 612 |
+
// Allow IDE to resolve confirmation
|
| 613 |
+
if (
|
| 614 |
+
confirmationDetails.type === 'edit' &&
|
| 615 |
+
confirmationDetails.ideConfirmation
|
| 616 |
+
) {
|
| 617 |
+
confirmationDetails.ideConfirmation.then((resolution) => {
|
| 618 |
+
if (resolution.status === 'accepted') {
|
| 619 |
+
this.handleConfirmationResponse(
|
| 620 |
+
reqInfo.callId,
|
| 621 |
+
confirmationDetails.onConfirm,
|
| 622 |
+
ToolConfirmationOutcome.ProceedOnce,
|
| 623 |
+
signal,
|
| 624 |
+
);
|
| 625 |
+
} else {
|
| 626 |
+
this.handleConfirmationResponse(
|
| 627 |
+
reqInfo.callId,
|
| 628 |
+
confirmationDetails.onConfirm,
|
| 629 |
+
ToolConfirmationOutcome.Cancel,
|
| 630 |
+
signal,
|
| 631 |
+
);
|
| 632 |
+
}
|
| 633 |
+
});
|
| 634 |
+
}
|
| 635 |
+
|
| 636 |
+
const originalOnConfirm = confirmationDetails.onConfirm;
|
| 637 |
+
const wrappedConfirmationDetails: ToolCallConfirmationDetails = {
|
| 638 |
+
...confirmationDetails,
|
| 639 |
+
onConfirm: (
|
| 640 |
+
outcome: ToolConfirmationOutcome,
|
| 641 |
+
payload?: ToolConfirmationPayload,
|
| 642 |
+
) =>
|
| 643 |
+
this.handleConfirmationResponse(
|
| 644 |
+
reqInfo.callId,
|
| 645 |
+
originalOnConfirm,
|
| 646 |
+
outcome,
|
| 647 |
+
signal,
|
| 648 |
+
payload,
|
| 649 |
+
),
|
| 650 |
+
};
|
| 651 |
+
this.setStatusInternal(
|
| 652 |
+
reqInfo.callId,
|
| 653 |
+
'awaiting_approval',
|
| 654 |
+
wrappedConfirmationDetails,
|
| 655 |
+
);
|
| 656 |
+
} else {
|
| 657 |
+
this.setToolCallOutcome(
|
| 658 |
+
reqInfo.callId,
|
| 659 |
+
ToolConfirmationOutcome.ProceedAlways,
|
| 660 |
+
);
|
| 661 |
+
this.setStatusInternal(reqInfo.callId, 'scheduled');
|
| 662 |
+
}
|
| 663 |
+
}
|
| 664 |
+
} catch (error) {
|
| 665 |
+
this.setStatusInternal(
|
| 666 |
+
reqInfo.callId,
|
| 667 |
+
'error',
|
| 668 |
+
createErrorResponse(
|
| 669 |
+
reqInfo,
|
| 670 |
+
error instanceof Error ? error : new Error(String(error)),
|
| 671 |
+
ToolErrorType.UNHANDLED_EXCEPTION,
|
| 672 |
+
),
|
| 673 |
+
);
|
| 674 |
+
}
|
| 675 |
+
}
|
| 676 |
+
this.attemptExecutionOfScheduledCalls(signal);
|
| 677 |
+
void this.checkAndNotifyCompletion();
|
| 678 |
+
} finally {
|
| 679 |
+
this.isScheduling = false;
|
| 680 |
+
}
|
| 681 |
+
}
|
| 682 |
+
|
| 683 |
+
async handleConfirmationResponse(
|
| 684 |
+
callId: string,
|
| 685 |
+
originalOnConfirm: (outcome: ToolConfirmationOutcome) => Promise<void>,
|
| 686 |
+
outcome: ToolConfirmationOutcome,
|
| 687 |
+
signal: AbortSignal,
|
| 688 |
+
payload?: ToolConfirmationPayload,
|
| 689 |
+
): Promise<void> {
|
| 690 |
+
const toolCall = this.toolCalls.find(
|
| 691 |
+
(c) => c.request.callId === callId && c.status === 'awaiting_approval',
|
| 692 |
+
);
|
| 693 |
+
|
| 694 |
+
if (toolCall && toolCall.status === 'awaiting_approval') {
|
| 695 |
+
await originalOnConfirm(outcome);
|
| 696 |
+
}
|
| 697 |
+
|
| 698 |
+
if (outcome === ToolConfirmationOutcome.ProceedAlways) {
|
| 699 |
+
await this.autoApproveCompatiblePendingTools(signal, callId);
|
| 700 |
+
}
|
| 701 |
+
|
| 702 |
+
this.setToolCallOutcome(callId, outcome);
|
| 703 |
+
|
| 704 |
+
if (outcome === ToolConfirmationOutcome.Cancel || signal.aborted) {
|
| 705 |
+
this.setStatusInternal(
|
| 706 |
+
callId,
|
| 707 |
+
'cancelled',
|
| 708 |
+
'User did not allow tool call',
|
| 709 |
+
);
|
| 710 |
+
} else if (outcome === ToolConfirmationOutcome.ModifyWithEditor) {
|
| 711 |
+
const waitingToolCall = toolCall as WaitingToolCall;
|
| 712 |
+
if (isModifiableDeclarativeTool(waitingToolCall.tool)) {
|
| 713 |
+
const modifyContext = waitingToolCall.tool.getModifyContext(signal);
|
| 714 |
+
const editorType = this.getPreferredEditor();
|
| 715 |
+
if (!editorType) {
|
| 716 |
+
return;
|
| 717 |
+
}
|
| 718 |
+
|
| 719 |
+
this.setStatusInternal(callId, 'awaiting_approval', {
|
| 720 |
+
...waitingToolCall.confirmationDetails,
|
| 721 |
+
isModifying: true,
|
| 722 |
+
} as ToolCallConfirmationDetails);
|
| 723 |
+
|
| 724 |
+
const { updatedParams, updatedDiff } = await modifyWithEditor<
|
| 725 |
+
typeof waitingToolCall.request.args
|
| 726 |
+
>(
|
| 727 |
+
waitingToolCall.request.args,
|
| 728 |
+
modifyContext as ModifyContext<typeof waitingToolCall.request.args>,
|
| 729 |
+
editorType,
|
| 730 |
+
signal,
|
| 731 |
+
this.onEditorClose,
|
| 732 |
+
);
|
| 733 |
+
this.setArgsInternal(callId, updatedParams);
|
| 734 |
+
this.setStatusInternal(callId, 'awaiting_approval', {
|
| 735 |
+
...waitingToolCall.confirmationDetails,
|
| 736 |
+
fileDiff: updatedDiff,
|
| 737 |
+
isModifying: false,
|
| 738 |
+
} as ToolCallConfirmationDetails);
|
| 739 |
+
}
|
| 740 |
+
} else {
|
| 741 |
+
// If the client provided new content, apply it before scheduling.
|
| 742 |
+
if (payload?.newContent && toolCall) {
|
| 743 |
+
await this._applyInlineModify(
|
| 744 |
+
toolCall as WaitingToolCall,
|
| 745 |
+
payload,
|
| 746 |
+
signal,
|
| 747 |
+
);
|
| 748 |
+
}
|
| 749 |
+
this.setStatusInternal(callId, 'scheduled');
|
| 750 |
+
}
|
| 751 |
+
this.attemptExecutionOfScheduledCalls(signal);
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
/**
|
| 755 |
+
* Applies user-provided content changes to a tool call that is awaiting confirmation.
|
| 756 |
+
* This method updates the tool's arguments and refreshes the confirmation prompt with a new diff
|
| 757 |
+
* before the tool is scheduled for execution.
|
| 758 |
+
* @private
|
| 759 |
+
*/
|
| 760 |
+
private async _applyInlineModify(
|
| 761 |
+
toolCall: WaitingToolCall,
|
| 762 |
+
payload: ToolConfirmationPayload,
|
| 763 |
+
signal: AbortSignal,
|
| 764 |
+
): Promise<void> {
|
| 765 |
+
if (
|
| 766 |
+
toolCall.confirmationDetails.type !== 'edit' ||
|
| 767 |
+
!isModifiableDeclarativeTool(toolCall.tool)
|
| 768 |
+
) {
|
| 769 |
+
return;
|
| 770 |
+
}
|
| 771 |
+
|
| 772 |
+
const modifyContext = toolCall.tool.getModifyContext(signal);
|
| 773 |
+
const currentContent = await modifyContext.getCurrentContent(
|
| 774 |
+
toolCall.request.args,
|
| 775 |
+
);
|
| 776 |
+
|
| 777 |
+
const updatedParams = modifyContext.createUpdatedParams(
|
| 778 |
+
currentContent,
|
| 779 |
+
payload.newContent,
|
| 780 |
+
toolCall.request.args,
|
| 781 |
+
);
|
| 782 |
+
const updatedDiff = Diff.createPatch(
|
| 783 |
+
modifyContext.getFilePath(toolCall.request.args),
|
| 784 |
+
currentContent,
|
| 785 |
+
payload.newContent,
|
| 786 |
+
'Current',
|
| 787 |
+
'Proposed',
|
| 788 |
+
);
|
| 789 |
+
|
| 790 |
+
this.setArgsInternal(toolCall.request.callId, updatedParams);
|
| 791 |
+
this.setStatusInternal(toolCall.request.callId, 'awaiting_approval', {
|
| 792 |
+
...toolCall.confirmationDetails,
|
| 793 |
+
fileDiff: updatedDiff,
|
| 794 |
+
});
|
| 795 |
+
}
|
| 796 |
+
|
| 797 |
+
private attemptExecutionOfScheduledCalls(signal: AbortSignal): void {
|
| 798 |
+
const allCallsFinalOrScheduled = this.toolCalls.every(
|
| 799 |
+
(call) =>
|
| 800 |
+
call.status === 'scheduled' ||
|
| 801 |
+
call.status === 'cancelled' ||
|
| 802 |
+
call.status === 'success' ||
|
| 803 |
+
call.status === 'error',
|
| 804 |
+
);
|
| 805 |
+
|
| 806 |
+
if (allCallsFinalOrScheduled) {
|
| 807 |
+
const callsToExecute = this.toolCalls.filter(
|
| 808 |
+
(call) => call.status === 'scheduled',
|
| 809 |
+
);
|
| 810 |
+
|
| 811 |
+
callsToExecute.forEach((toolCall) => {
|
| 812 |
+
if (toolCall.status !== 'scheduled') return;
|
| 813 |
+
|
| 814 |
+
const scheduledCall = toolCall;
|
| 815 |
+
const { callId, name: toolName } = scheduledCall.request;
|
| 816 |
+
const invocation = scheduledCall.invocation;
|
| 817 |
+
this.setStatusInternal(callId, 'executing');
|
| 818 |
+
|
| 819 |
+
const liveOutputCallback =
|
| 820 |
+
scheduledCall.tool.canUpdateOutput && this.outputUpdateHandler
|
| 821 |
+
? (outputChunk: string) => {
|
| 822 |
+
if (this.outputUpdateHandler) {
|
| 823 |
+
this.outputUpdateHandler(callId, outputChunk);
|
| 824 |
+
}
|
| 825 |
+
this.toolCalls = this.toolCalls.map((tc) =>
|
| 826 |
+
tc.request.callId === callId && tc.status === 'executing'
|
| 827 |
+
? { ...tc, liveOutput: outputChunk }
|
| 828 |
+
: tc,
|
| 829 |
+
);
|
| 830 |
+
this.notifyToolCallsUpdate();
|
| 831 |
+
}
|
| 832 |
+
: undefined;
|
| 833 |
+
|
| 834 |
+
invocation
|
| 835 |
+
.execute(signal, liveOutputCallback)
|
| 836 |
+
.then(async (toolResult: ToolResult) => {
|
| 837 |
+
if (signal.aborted) {
|
| 838 |
+
this.setStatusInternal(
|
| 839 |
+
callId,
|
| 840 |
+
'cancelled',
|
| 841 |
+
'User cancelled tool execution.',
|
| 842 |
+
);
|
| 843 |
+
return;
|
| 844 |
+
}
|
| 845 |
+
|
| 846 |
+
if (toolResult.error === undefined) {
|
| 847 |
+
const response = convertToFunctionResponse(
|
| 848 |
+
toolName,
|
| 849 |
+
callId,
|
| 850 |
+
toolResult.llmContent,
|
| 851 |
+
);
|
| 852 |
+
const successResponse: ToolCallResponseInfo = {
|
| 853 |
+
callId,
|
| 854 |
+
responseParts: response,
|
| 855 |
+
resultDisplay: toolResult.returnDisplay,
|
| 856 |
+
error: undefined,
|
| 857 |
+
errorType: undefined,
|
| 858 |
+
};
|
| 859 |
+
this.setStatusInternal(callId, 'success', successResponse);
|
| 860 |
+
} else {
|
| 861 |
+
// It is a failure
|
| 862 |
+
const error = new Error(toolResult.error.message);
|
| 863 |
+
const errorResponse = createErrorResponse(
|
| 864 |
+
scheduledCall.request,
|
| 865 |
+
error,
|
| 866 |
+
toolResult.error.type,
|
| 867 |
+
);
|
| 868 |
+
this.setStatusInternal(callId, 'error', errorResponse);
|
| 869 |
+
}
|
| 870 |
+
})
|
| 871 |
+
.catch((executionError: Error) => {
|
| 872 |
+
this.setStatusInternal(
|
| 873 |
+
callId,
|
| 874 |
+
'error',
|
| 875 |
+
createErrorResponse(
|
| 876 |
+
scheduledCall.request,
|
| 877 |
+
executionError instanceof Error
|
| 878 |
+
? executionError
|
| 879 |
+
: new Error(String(executionError)),
|
| 880 |
+
ToolErrorType.UNHANDLED_EXCEPTION,
|
| 881 |
+
),
|
| 882 |
+
);
|
| 883 |
+
});
|
| 884 |
+
});
|
| 885 |
+
}
|
| 886 |
+
}
|
| 887 |
+
|
| 888 |
+
private async checkAndNotifyCompletion(): Promise<void> {
|
| 889 |
+
const allCallsAreTerminal = this.toolCalls.every(
|
| 890 |
+
(call) =>
|
| 891 |
+
call.status === 'success' ||
|
| 892 |
+
call.status === 'error' ||
|
| 893 |
+
call.status === 'cancelled',
|
| 894 |
+
);
|
| 895 |
+
|
| 896 |
+
if (this.toolCalls.length > 0 && allCallsAreTerminal) {
|
| 897 |
+
const completedCalls = [...this.toolCalls] as CompletedToolCall[];
|
| 898 |
+
this.toolCalls = [];
|
| 899 |
+
|
| 900 |
+
for (const call of completedCalls) {
|
| 901 |
+
logToolCall(this.config, new ToolCallEvent(call));
|
| 902 |
+
}
|
| 903 |
+
|
| 904 |
+
if (this.onAllToolCallsComplete) {
|
| 905 |
+
this.isFinalizingToolCalls = true;
|
| 906 |
+
await this.onAllToolCallsComplete(completedCalls);
|
| 907 |
+
this.isFinalizingToolCalls = false;
|
| 908 |
+
}
|
| 909 |
+
this.notifyToolCallsUpdate();
|
| 910 |
+
// After completion, process the next item in the queue.
|
| 911 |
+
if (this.requestQueue.length > 0) {
|
| 912 |
+
const next = this.requestQueue.shift()!;
|
| 913 |
+
this._schedule(next.request, next.signal)
|
| 914 |
+
.then(next.resolve)
|
| 915 |
+
.catch(next.reject);
|
| 916 |
+
}
|
| 917 |
+
}
|
| 918 |
+
}
|
| 919 |
+
|
| 920 |
+
private notifyToolCallsUpdate(): void {
|
| 921 |
+
if (this.onToolCallsUpdate) {
|
| 922 |
+
this.onToolCallsUpdate([...this.toolCalls]);
|
| 923 |
+
}
|
| 924 |
+
}
|
| 925 |
+
|
| 926 |
+
private setToolCallOutcome(callId: string, outcome: ToolConfirmationOutcome) {
|
| 927 |
+
this.toolCalls = this.toolCalls.map((call) => {
|
| 928 |
+
if (call.request.callId !== callId) return call;
|
| 929 |
+
return {
|
| 930 |
+
...call,
|
| 931 |
+
outcome,
|
| 932 |
+
};
|
| 933 |
+
});
|
| 934 |
+
}
|
| 935 |
+
|
| 936 |
+
private async autoApproveCompatiblePendingTools(
|
| 937 |
+
signal: AbortSignal,
|
| 938 |
+
triggeringCallId: string,
|
| 939 |
+
): Promise<void> {
|
| 940 |
+
const pendingTools = this.toolCalls.filter(
|
| 941 |
+
(call) =>
|
| 942 |
+
call.status === 'awaiting_approval' &&
|
| 943 |
+
call.request.callId !== triggeringCallId,
|
| 944 |
+
) as WaitingToolCall[];
|
| 945 |
+
|
| 946 |
+
for (const pendingTool of pendingTools) {
|
| 947 |
+
try {
|
| 948 |
+
const stillNeedsConfirmation =
|
| 949 |
+
await pendingTool.invocation.shouldConfirmExecute(signal);
|
| 950 |
+
|
| 951 |
+
if (!stillNeedsConfirmation) {
|
| 952 |
+
this.setToolCallOutcome(
|
| 953 |
+
pendingTool.request.callId,
|
| 954 |
+
ToolConfirmationOutcome.ProceedAlways,
|
| 955 |
+
);
|
| 956 |
+
this.setStatusInternal(pendingTool.request.callId, 'scheduled');
|
| 957 |
+
}
|
| 958 |
+
} catch (error) {
|
| 959 |
+
console.error(
|
| 960 |
+
`Error checking confirmation for tool ${pendingTool.request.callId}:`,
|
| 961 |
+
error,
|
| 962 |
+
);
|
| 963 |
+
}
|
| 964 |
+
}
|
| 965 |
+
}
|
| 966 |
+
}
|
projects/ui/qwen-code/packages/core/src/core/geminiChat.test.ts
ADDED
|
@@ -0,0 +1,890 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* @license
|
| 3 |
+
* Copyright 2025 Google LLC
|
| 4 |
+
* SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
| 8 |
+
import {
|
| 9 |
+
Content,
|
| 10 |
+
Models,
|
| 11 |
+
GenerateContentConfig,
|
| 12 |
+
Part,
|
| 13 |
+
GenerateContentResponse,
|
| 14 |
+
} from '@google/genai';
|
| 15 |
+
import { GeminiChat, EmptyStreamError } from './geminiChat.js';
|
| 16 |
+
import { Config } from '../config/config.js';
|
| 17 |
+
import { setSimulate429 } from '../utils/testUtils.js';
|
| 18 |
+
|
| 19 |
+
// Mocks
|
| 20 |
+
const mockModelsModule = {
|
| 21 |
+
generateContent: vi.fn(),
|
| 22 |
+
generateContentStream: vi.fn(),
|
| 23 |
+
countTokens: vi.fn(),
|
| 24 |
+
embedContent: vi.fn(),
|
| 25 |
+
batchEmbedContents: vi.fn(),
|
| 26 |
+
} as unknown as Models;
|
| 27 |
+
|
| 28 |
+
const { mockLogInvalidChunk, mockLogContentRetry, mockLogContentRetryFailure } =
|
| 29 |
+
vi.hoisted(() => ({
|
| 30 |
+
mockLogInvalidChunk: vi.fn(),
|
| 31 |
+
mockLogContentRetry: vi.fn(),
|
| 32 |
+
mockLogContentRetryFailure: vi.fn(),
|
| 33 |
+
}));
|
| 34 |
+
|
| 35 |
+
vi.mock('../telemetry/loggers.js', () => ({
|
| 36 |
+
logInvalidChunk: mockLogInvalidChunk,
|
| 37 |
+
logContentRetry: mockLogContentRetry,
|
| 38 |
+
logContentRetryFailure: mockLogContentRetryFailure,
|
| 39 |
+
}));
|
| 40 |
+
|
| 41 |
+
describe('GeminiChat', () => {
|
| 42 |
+
let chat: GeminiChat;
|
| 43 |
+
let mockConfig: Config;
|
| 44 |
+
const config: GenerateContentConfig = {};
|
| 45 |
+
|
| 46 |
+
beforeEach(() => {
|
| 47 |
+
vi.clearAllMocks();
|
| 48 |
+
mockConfig = {
|
| 49 |
+
getSessionId: () => 'test-session-id',
|
| 50 |
+
getTelemetryLogPromptsEnabled: () => true,
|
| 51 |
+
getUsageStatisticsEnabled: () => true,
|
| 52 |
+
getDebugMode: () => false,
|
| 53 |
+
getContentGeneratorConfig: () => ({
|
| 54 |
+
authType: 'oauth-personal',
|
| 55 |
+
model: 'test-model',
|
| 56 |
+
}),
|
| 57 |
+
getModel: vi.fn().mockReturnValue('gemini-pro'),
|
| 58 |
+
setModel: vi.fn(),
|
| 59 |
+
getQuotaErrorOccurred: vi.fn().mockReturnValue(false),
|
| 60 |
+
setQuotaErrorOccurred: vi.fn(),
|
| 61 |
+
flashFallbackHandler: undefined,
|
| 62 |
+
} as unknown as Config;
|
| 63 |
+
|
| 64 |
+
// Disable 429 simulation for tests
|
| 65 |
+
setSimulate429(false);
|
| 66 |
+
// Reset history for each test by creating a new instance
|
| 67 |
+
chat = new GeminiChat(mockConfig, mockModelsModule, config, []);
|
| 68 |
+
});
|
| 69 |
+
|
| 70 |
+
afterEach(() => {
|
| 71 |
+
vi.restoreAllMocks();
|
| 72 |
+
vi.resetAllMocks();
|
| 73 |
+
});
|
| 74 |
+
|
| 75 |
+
describe('sendMessage', () => {
|
| 76 |
+
it('should call generateContent with the correct parameters', async () => {
|
| 77 |
+
const response = {
|
| 78 |
+
candidates: [
|
| 79 |
+
{
|
| 80 |
+
content: {
|
| 81 |
+
parts: [{ text: 'response' }],
|
| 82 |
+
role: 'model',
|
| 83 |
+
},
|
| 84 |
+
finishReason: 'STOP',
|
| 85 |
+
index: 0,
|
| 86 |
+
safetyRatings: [],
|
| 87 |
+
},
|
| 88 |
+
],
|
| 89 |
+
text: () => 'response',
|
| 90 |
+
} as unknown as GenerateContentResponse;
|
| 91 |
+
vi.mocked(mockModelsModule.generateContent).mockResolvedValue(response);
|
| 92 |
+
|
| 93 |
+
await chat.sendMessage({ message: 'hello' }, 'prompt-id-1');
|
| 94 |
+
|
| 95 |
+
expect(mockModelsModule.generateContent).toHaveBeenCalledWith(
|
| 96 |
+
{
|
| 97 |
+
model: 'gemini-pro',
|
| 98 |
+
contents: [{ role: 'user', parts: [{ text: 'hello' }] }],
|
| 99 |
+
config: {},
|
| 100 |
+
},
|
| 101 |
+
'prompt-id-1',
|
| 102 |
+
);
|
| 103 |
+
});
|
| 104 |
+
});
|
| 105 |
+
|
| 106 |
+
describe('sendMessageStream', () => {
|
| 107 |
+
it('should call generateContentStream with the correct parameters', async () => {
|
| 108 |
+
const response = (async function* () {
|
| 109 |
+
yield {
|
| 110 |
+
candidates: [
|
| 111 |
+
{
|
| 112 |
+
content: {
|
| 113 |
+
parts: [{ text: 'response' }],
|
| 114 |
+
role: 'model',
|
| 115 |
+
},
|
| 116 |
+
finishReason: 'STOP',
|
| 117 |
+
index: 0,
|
| 118 |
+
safetyRatings: [],
|
| 119 |
+
},
|
| 120 |
+
],
|
| 121 |
+
text: () => 'response',
|
| 122 |
+
} as unknown as GenerateContentResponse;
|
| 123 |
+
})();
|
| 124 |
+
vi.mocked(mockModelsModule.generateContentStream).mockResolvedValue(
|
| 125 |
+
response,
|
| 126 |
+
);
|
| 127 |
+
|
| 128 |
+
const stream = await chat.sendMessageStream(
|
| 129 |
+
{ message: 'hello' },
|
| 130 |
+
'prompt-id-1',
|
| 131 |
+
);
|
| 132 |
+
for await (const _ of stream) {
|
| 133 |
+
// consume stream to trigger internal logic
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
expect(mockModelsModule.generateContentStream).toHaveBeenCalledWith(
|
| 137 |
+
{
|
| 138 |
+
model: 'gemini-pro',
|
| 139 |
+
contents: [{ role: 'user', parts: [{ text: 'hello' }] }],
|
| 140 |
+
config: {},
|
| 141 |
+
},
|
| 142 |
+
'prompt-id-1',
|
| 143 |
+
);
|
| 144 |
+
});
|
| 145 |
+
});
|
| 146 |
+
|
| 147 |
+
describe('recordHistory', () => {
|
| 148 |
+
const userInput: Content = {
|
| 149 |
+
role: 'user',
|
| 150 |
+
parts: [{ text: 'User input' }],
|
| 151 |
+
};
|
| 152 |
+
|
| 153 |
+
it('should add user input and a single model output to history', () => {
|
| 154 |
+
const modelOutput: Content[] = [
|
| 155 |
+
{ role: 'model', parts: [{ text: 'Model output' }] },
|
| 156 |
+
];
|
| 157 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 158 |
+
chat.recordHistory(userInput, modelOutput);
|
| 159 |
+
const history = chat.getHistory();
|
| 160 |
+
expect(history).toEqual([userInput, modelOutput[0]]);
|
| 161 |
+
});
|
| 162 |
+
|
| 163 |
+
it('should consolidate adjacent model outputs', () => {
|
| 164 |
+
const modelOutputParts: Content[] = [
|
| 165 |
+
{ role: 'model', parts: [{ text: 'Model part 1' }] },
|
| 166 |
+
{ role: 'model', parts: [{ text: 'Model part 2' }] },
|
| 167 |
+
];
|
| 168 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 169 |
+
chat.recordHistory(userInput, modelOutputParts);
|
| 170 |
+
const history = chat.getHistory();
|
| 171 |
+
expect(history.length).toBe(2);
|
| 172 |
+
expect(history[0]).toEqual(userInput);
|
| 173 |
+
expect(history[1].role).toBe('model');
|
| 174 |
+
expect(history[1].parts).toEqual([{ text: 'Model part 1Model part 2' }]);
|
| 175 |
+
});
|
| 176 |
+
|
| 177 |
+
it('should handle a mix of user and model roles in outputContents (though unusual)', () => {
|
| 178 |
+
const mixedOutput: Content[] = [
|
| 179 |
+
{ role: 'model', parts: [{ text: 'Model 1' }] },
|
| 180 |
+
{ role: 'user', parts: [{ text: 'Unexpected User' }] }, // This should be pushed as is
|
| 181 |
+
{ role: 'model', parts: [{ text: 'Model 2' }] },
|
| 182 |
+
];
|
| 183 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 184 |
+
chat.recordHistory(userInput, mixedOutput);
|
| 185 |
+
const history = chat.getHistory();
|
| 186 |
+
expect(history.length).toBe(4); // user, model1, user_unexpected, model2
|
| 187 |
+
expect(history[0]).toEqual(userInput);
|
| 188 |
+
expect(history[1]).toEqual(mixedOutput[0]);
|
| 189 |
+
expect(history[2]).toEqual(mixedOutput[1]);
|
| 190 |
+
expect(history[3]).toEqual(mixedOutput[2]);
|
| 191 |
+
});
|
| 192 |
+
|
| 193 |
+
it('should consolidate multiple adjacent model outputs correctly', () => {
|
| 194 |
+
const modelOutputParts: Content[] = [
|
| 195 |
+
{ role: 'model', parts: [{ text: 'M1' }] },
|
| 196 |
+
{ role: 'model', parts: [{ text: 'M2' }] },
|
| 197 |
+
{ role: 'model', parts: [{ text: 'M3' }] },
|
| 198 |
+
];
|
| 199 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 200 |
+
chat.recordHistory(userInput, modelOutputParts);
|
| 201 |
+
const history = chat.getHistory();
|
| 202 |
+
expect(history.length).toBe(2);
|
| 203 |
+
expect(history[1].parts).toEqual([{ text: 'M1M2M3' }]);
|
| 204 |
+
});
|
| 205 |
+
|
| 206 |
+
it('should not consolidate if roles are different between model outputs', () => {
|
| 207 |
+
const modelOutputParts: Content[] = [
|
| 208 |
+
{ role: 'model', parts: [{ text: 'M1' }] },
|
| 209 |
+
{ role: 'user', parts: [{ text: 'Interjecting User' }] },
|
| 210 |
+
{ role: 'model', parts: [{ text: 'M2' }] },
|
| 211 |
+
];
|
| 212 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 213 |
+
chat.recordHistory(userInput, modelOutputParts);
|
| 214 |
+
const history = chat.getHistory();
|
| 215 |
+
expect(history.length).toBe(4); // user, M1, Interjecting User, M2
|
| 216 |
+
expect(history[1].parts).toEqual([{ text: 'M1' }]);
|
| 217 |
+
expect(history[3].parts).toEqual([{ text: 'M2' }]);
|
| 218 |
+
});
|
| 219 |
+
|
| 220 |
+
it('should merge with last history entry if it is also a model output', () => {
|
| 221 |
+
// @ts-expect-error Accessing private property for test setup
|
| 222 |
+
chat.history = [
|
| 223 |
+
userInput,
|
| 224 |
+
{ role: 'model', parts: [{ text: 'Initial Model Output' }] },
|
| 225 |
+
]; // Prime the history
|
| 226 |
+
|
| 227 |
+
const newModelOutput: Content[] = [
|
| 228 |
+
{ role: 'model', parts: [{ text: 'New Model Part 1' }] },
|
| 229 |
+
{ role: 'model', parts: [{ text: 'New Model Part 2' }] },
|
| 230 |
+
];
|
| 231 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 232 |
+
chat.recordHistory(userInput, newModelOutput); // userInput here is for the *next* turn, but history is already primed
|
| 233 |
+
|
| 234 |
+
// Reset and set up a more realistic scenario for merging with existing history
|
| 235 |
+
chat = new GeminiChat(mockConfig, mockModelsModule, config, []);
|
| 236 |
+
const firstUserInput: Content = {
|
| 237 |
+
role: 'user',
|
| 238 |
+
parts: [{ text: 'First user input' }],
|
| 239 |
+
};
|
| 240 |
+
const firstModelOutput: Content[] = [
|
| 241 |
+
{ role: 'model', parts: [{ text: 'First model response' }] },
|
| 242 |
+
];
|
| 243 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 244 |
+
chat.recordHistory(firstUserInput, firstModelOutput);
|
| 245 |
+
|
| 246 |
+
const secondUserInput: Content = {
|
| 247 |
+
role: 'user',
|
| 248 |
+
parts: [{ text: 'Second user input' }],
|
| 249 |
+
};
|
| 250 |
+
const secondModelOutput: Content[] = [
|
| 251 |
+
{ role: 'model', parts: [{ text: 'Second model response part 1' }] },
|
| 252 |
+
{ role: 'model', parts: [{ text: 'Second model response part 2' }] },
|
| 253 |
+
];
|
| 254 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 255 |
+
chat.recordHistory(secondUserInput, secondModelOutput);
|
| 256 |
+
|
| 257 |
+
const finalHistory = chat.getHistory();
|
| 258 |
+
expect(finalHistory.length).toBe(4); // user1, model1, user2, model2(consolidated)
|
| 259 |
+
expect(finalHistory[0]).toEqual(firstUserInput);
|
| 260 |
+
expect(finalHistory[1]).toEqual(firstModelOutput[0]);
|
| 261 |
+
expect(finalHistory[2]).toEqual(secondUserInput);
|
| 262 |
+
expect(finalHistory[3].role).toBe('model');
|
| 263 |
+
expect(finalHistory[3].parts).toEqual([
|
| 264 |
+
{ text: 'Second model response part 1Second model response part 2' },
|
| 265 |
+
]);
|
| 266 |
+
});
|
| 267 |
+
|
| 268 |
+
it('should correctly merge consolidated new output with existing model history', () => {
|
| 269 |
+
// Setup: history ends with a model turn
|
| 270 |
+
const initialUser: Content = {
|
| 271 |
+
role: 'user',
|
| 272 |
+
parts: [{ text: 'Initial user query' }],
|
| 273 |
+
};
|
| 274 |
+
const initialModel: Content = {
|
| 275 |
+
role: 'model',
|
| 276 |
+
parts: [{ text: 'Initial model answer.' }],
|
| 277 |
+
};
|
| 278 |
+
chat = new GeminiChat(mockConfig, mockModelsModule, config, [
|
| 279 |
+
initialUser,
|
| 280 |
+
initialModel,
|
| 281 |
+
]);
|
| 282 |
+
|
| 283 |
+
// New interaction
|
| 284 |
+
const currentUserInput: Content = {
|
| 285 |
+
role: 'user',
|
| 286 |
+
parts: [{ text: 'Follow-up question' }],
|
| 287 |
+
};
|
| 288 |
+
const newModelParts: Content[] = [
|
| 289 |
+
{ role: 'model', parts: [{ text: 'Part A of new answer.' }] },
|
| 290 |
+
{ role: 'model', parts: [{ text: 'Part B of new answer.' }] },
|
| 291 |
+
];
|
| 292 |
+
|
| 293 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 294 |
+
chat.recordHistory(currentUserInput, newModelParts);
|
| 295 |
+
const history = chat.getHistory();
|
| 296 |
+
|
| 297 |
+
// Expected: initialUser, initialModel, currentUserInput, consolidatedNewModelParts
|
| 298 |
+
expect(history.length).toBe(4);
|
| 299 |
+
expect(history[0]).toEqual(initialUser);
|
| 300 |
+
expect(history[1]).toEqual(initialModel);
|
| 301 |
+
expect(history[2]).toEqual(currentUserInput);
|
| 302 |
+
expect(history[3].role).toBe('model');
|
| 303 |
+
expect(history[3].parts).toEqual([
|
| 304 |
+
{ text: 'Part A of new answer.Part B of new answer.' },
|
| 305 |
+
]);
|
| 306 |
+
});
|
| 307 |
+
|
| 308 |
+
it('should handle empty modelOutput array', () => {
|
| 309 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 310 |
+
chat.recordHistory(userInput, []);
|
| 311 |
+
const history = chat.getHistory();
|
| 312 |
+
// If modelOutput is empty, it might push a default empty model part depending on isFunctionResponse
|
| 313 |
+
// Assuming isFunctionResponse(userInput) is false for this simple text input
|
| 314 |
+
expect(history.length).toBe(2);
|
| 315 |
+
expect(history[0]).toEqual(userInput);
|
| 316 |
+
expect(history[1].role).toBe('model');
|
| 317 |
+
expect(history[1].parts).toEqual([]);
|
| 318 |
+
});
|
| 319 |
+
|
| 320 |
+
it('should handle aggregating modelOutput', () => {
|
| 321 |
+
const modelOutputUndefinedParts: Content[] = [
|
| 322 |
+
{ role: 'model', parts: [{ text: 'First model part' }] },
|
| 323 |
+
{ role: 'model', parts: [{ text: 'Second model part' }] },
|
| 324 |
+
{ role: 'model', parts: undefined as unknown as Part[] }, // Test undefined parts
|
| 325 |
+
{ role: 'model', parts: [{ text: 'Third model part' }] },
|
| 326 |
+
{ role: 'model', parts: [] }, // Test empty parts array
|
| 327 |
+
];
|
| 328 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 329 |
+
chat.recordHistory(userInput, modelOutputUndefinedParts);
|
| 330 |
+
const history = chat.getHistory();
|
| 331 |
+
expect(history.length).toBe(5);
|
| 332 |
+
expect(history[0]).toEqual(userInput);
|
| 333 |
+
expect(history[1].role).toBe('model');
|
| 334 |
+
expect(history[1].parts).toEqual([
|
| 335 |
+
{ text: 'First model partSecond model part' },
|
| 336 |
+
]);
|
| 337 |
+
expect(history[2].role).toBe('model');
|
| 338 |
+
expect(history[2].parts).toBeUndefined();
|
| 339 |
+
expect(history[3].role).toBe('model');
|
| 340 |
+
expect(history[3].parts).toEqual([{ text: 'Third model part' }]);
|
| 341 |
+
expect(history[4].role).toBe('model');
|
| 342 |
+
expect(history[4].parts).toEqual([]);
|
| 343 |
+
});
|
| 344 |
+
|
| 345 |
+
it('should handle modelOutput with parts being undefined or empty (if they pass initial every check)', () => {
|
| 346 |
+
const modelOutputUndefinedParts: Content[] = [
|
| 347 |
+
{ role: 'model', parts: [{ text: 'Text part' }] },
|
| 348 |
+
{ role: 'model', parts: undefined as unknown as Part[] }, // Test undefined parts
|
| 349 |
+
{ role: 'model', parts: [] }, // Test empty parts array
|
| 350 |
+
];
|
| 351 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 352 |
+
chat.recordHistory(userInput, modelOutputUndefinedParts);
|
| 353 |
+
const history = chat.getHistory();
|
| 354 |
+
expect(history.length).toBe(4); // userInput, model1 (text), model2 (undefined parts), model3 (empty parts)
|
| 355 |
+
expect(history[0]).toEqual(userInput);
|
| 356 |
+
expect(history[1].role).toBe('model');
|
| 357 |
+
expect(history[1].parts).toEqual([{ text: 'Text part' }]);
|
| 358 |
+
expect(history[2].role).toBe('model');
|
| 359 |
+
expect(history[2].parts).toBeUndefined();
|
| 360 |
+
expect(history[3].role).toBe('model');
|
| 361 |
+
expect(history[3].parts).toEqual([]);
|
| 362 |
+
});
|
| 363 |
+
|
| 364 |
+
it('should correctly handle automaticFunctionCallingHistory', () => {
|
| 365 |
+
const afcHistory: Content[] = [
|
| 366 |
+
{ role: 'user', parts: [{ text: 'AFC User' }] },
|
| 367 |
+
{ role: 'model', parts: [{ text: 'AFC Model' }] },
|
| 368 |
+
];
|
| 369 |
+
const modelOutput: Content[] = [
|
| 370 |
+
{ role: 'model', parts: [{ text: 'Regular Model Output' }] },
|
| 371 |
+
];
|
| 372 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 373 |
+
chat.recordHistory(userInput, modelOutput, afcHistory);
|
| 374 |
+
const history = chat.getHistory();
|
| 375 |
+
expect(history.length).toBe(3);
|
| 376 |
+
expect(history[0]).toEqual(afcHistory[0]);
|
| 377 |
+
expect(history[1]).toEqual(afcHistory[1]);
|
| 378 |
+
expect(history[2]).toEqual(modelOutput[0]);
|
| 379 |
+
});
|
| 380 |
+
|
| 381 |
+
it('should add userInput if AFC history is present but empty', () => {
|
| 382 |
+
const modelOutput: Content[] = [
|
| 383 |
+
{ role: 'model', parts: [{ text: 'Model Output' }] },
|
| 384 |
+
];
|
| 385 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 386 |
+
chat.recordHistory(userInput, modelOutput, []); // Empty AFC history
|
| 387 |
+
const history = chat.getHistory();
|
| 388 |
+
expect(history.length).toBe(2);
|
| 389 |
+
expect(history[0]).toEqual(userInput);
|
| 390 |
+
expect(history[1]).toEqual(modelOutput[0]);
|
| 391 |
+
});
|
| 392 |
+
|
| 393 |
+
it('should skip "thought" content from modelOutput', () => {
|
| 394 |
+
const modelOutputWithThought: Content[] = [
|
| 395 |
+
{ role: 'model', parts: [{ thought: true }, { text: 'Visible text' }] },
|
| 396 |
+
{ role: 'model', parts: [{ text: 'Another visible text' }] },
|
| 397 |
+
];
|
| 398 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 399 |
+
chat.recordHistory(userInput, modelOutputWithThought);
|
| 400 |
+
const history = chat.getHistory();
|
| 401 |
+
expect(history.length).toBe(2); // User input + consolidated model output
|
| 402 |
+
expect(history[0]).toEqual(userInput);
|
| 403 |
+
expect(history[1].role).toBe('model');
|
| 404 |
+
// The 'thought' part is skipped, 'Another visible text' becomes the first part.
|
| 405 |
+
expect(history[1].parts).toEqual([{ text: 'Another visible text' }]);
|
| 406 |
+
});
|
| 407 |
+
|
| 408 |
+
it('should skip "thought" content even if it is the only content', () => {
|
| 409 |
+
const modelOutputOnlyThought: Content[] = [
|
| 410 |
+
{ role: 'model', parts: [{ thought: true }] },
|
| 411 |
+
];
|
| 412 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 413 |
+
chat.recordHistory(userInput, modelOutputOnlyThought);
|
| 414 |
+
const history = chat.getHistory();
|
| 415 |
+
expect(history.length).toBe(1); // User input + default empty model part
|
| 416 |
+
expect(history[0]).toEqual(userInput);
|
| 417 |
+
});
|
| 418 |
+
|
| 419 |
+
it('should correctly consolidate text parts when a thought part is in between', () => {
|
| 420 |
+
const modelOutputMixed: Content[] = [
|
| 421 |
+
{ role: 'model', parts: [{ text: 'Part 1.' }] },
|
| 422 |
+
{
|
| 423 |
+
role: 'model',
|
| 424 |
+
parts: [{ thought: true }, { text: 'Should be skipped' }],
|
| 425 |
+
},
|
| 426 |
+
{ role: 'model', parts: [{ text: 'Part 2.' }] },
|
| 427 |
+
];
|
| 428 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 429 |
+
chat.recordHistory(userInput, modelOutputMixed);
|
| 430 |
+
const history = chat.getHistory();
|
| 431 |
+
expect(history.length).toBe(2);
|
| 432 |
+
expect(history[0]).toEqual(userInput);
|
| 433 |
+
expect(history[1].role).toBe('model');
|
| 434 |
+
expect(history[1].parts).toEqual([{ text: 'Part 1.Part 2.' }]);
|
| 435 |
+
});
|
| 436 |
+
|
| 437 |
+
it('should handle multiple thought parts correctly', () => {
|
| 438 |
+
const modelOutputMultipleThoughts: Content[] = [
|
| 439 |
+
{ role: 'model', parts: [{ thought: true }] },
|
| 440 |
+
{ role: 'model', parts: [{ text: 'Visible 1' }] },
|
| 441 |
+
{ role: 'model', parts: [{ thought: true }] },
|
| 442 |
+
{ role: 'model', parts: [{ text: 'Visible 2' }] },
|
| 443 |
+
];
|
| 444 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 445 |
+
chat.recordHistory(userInput, modelOutputMultipleThoughts);
|
| 446 |
+
const history = chat.getHistory();
|
| 447 |
+
expect(history.length).toBe(2);
|
| 448 |
+
expect(history[0]).toEqual(userInput);
|
| 449 |
+
expect(history[1].role).toBe('model');
|
| 450 |
+
expect(history[1].parts).toEqual([{ text: 'Visible 1Visible 2' }]);
|
| 451 |
+
});
|
| 452 |
+
|
| 453 |
+
it('should handle thought part at the end of outputContents', () => {
|
| 454 |
+
const modelOutputThoughtAtEnd: Content[] = [
|
| 455 |
+
{ role: 'model', parts: [{ text: 'Visible text' }] },
|
| 456 |
+
{ role: 'model', parts: [{ thought: true }] },
|
| 457 |
+
];
|
| 458 |
+
// @ts-expect-error Accessing private method for testing purposes
|
| 459 |
+
chat.recordHistory(userInput, modelOutputThoughtAtEnd);
|
| 460 |
+
const history = chat.getHistory();
|
| 461 |
+
expect(history.length).toBe(2);
|
| 462 |
+
expect(history[0]).toEqual(userInput);
|
| 463 |
+
expect(history[1].role).toBe('model');
|
| 464 |
+
expect(history[1].parts).toEqual([{ text: 'Visible text' }]);
|
| 465 |
+
});
|
| 466 |
+
});
|
| 467 |
+
|
| 468 |
+
describe('addHistory', () => {
|
| 469 |
+
it('should add a new content item to the history', () => {
|
| 470 |
+
const newContent: Content = {
|
| 471 |
+
role: 'user',
|
| 472 |
+
parts: [{ text: 'A new message' }],
|
| 473 |
+
};
|
| 474 |
+
chat.addHistory(newContent);
|
| 475 |
+
const history = chat.getHistory();
|
| 476 |
+
expect(history.length).toBe(1);
|
| 477 |
+
expect(history[0]).toEqual(newContent);
|
| 478 |
+
});
|
| 479 |
+
|
| 480 |
+
it('should add multiple items correctly', () => {
|
| 481 |
+
const content1: Content = {
|
| 482 |
+
role: 'user',
|
| 483 |
+
parts: [{ text: 'Message 1' }],
|
| 484 |
+
};
|
| 485 |
+
const content2: Content = {
|
| 486 |
+
role: 'model',
|
| 487 |
+
parts: [{ text: 'Message 2' }],
|
| 488 |
+
};
|
| 489 |
+
chat.addHistory(content1);
|
| 490 |
+
chat.addHistory(content2);
|
| 491 |
+
const history = chat.getHistory();
|
| 492 |
+
expect(history.length).toBe(2);
|
| 493 |
+
expect(history[0]).toEqual(content1);
|
| 494 |
+
expect(history[1]).toEqual(content2);
|
| 495 |
+
});
|
| 496 |
+
});
|
| 497 |
+
|
| 498 |
+
describe('sendMessageStream with retries', () => {
|
| 499 |
+
it('should retry on invalid content, succeed, and report metrics', async () => {
|
| 500 |
+
// Use mockImplementationOnce to provide a fresh, promise-wrapped generator for each attempt.
|
| 501 |
+
vi.mocked(mockModelsModule.generateContentStream)
|
| 502 |
+
.mockImplementationOnce(async () =>
|
| 503 |
+
// First call returns an invalid stream
|
| 504 |
+
(async function* () {
|
| 505 |
+
yield {
|
| 506 |
+
candidates: [{ content: { parts: [{ text: '' }] } }], // Invalid empty text part
|
| 507 |
+
} as unknown as GenerateContentResponse;
|
| 508 |
+
})(),
|
| 509 |
+
)
|
| 510 |
+
.mockImplementationOnce(async () =>
|
| 511 |
+
// Second call returns a valid stream
|
| 512 |
+
(async function* () {
|
| 513 |
+
yield {
|
| 514 |
+
candidates: [
|
| 515 |
+
{ content: { parts: [{ text: 'Successful response' }] } },
|
| 516 |
+
],
|
| 517 |
+
} as unknown as GenerateContentResponse;
|
| 518 |
+
})(),
|
| 519 |
+
);
|
| 520 |
+
|
| 521 |
+
const stream = await chat.sendMessageStream(
|
| 522 |
+
{ message: 'test' },
|
| 523 |
+
'prompt-id-retry-success',
|
| 524 |
+
);
|
| 525 |
+
const chunks = [];
|
| 526 |
+
for await (const chunk of stream) {
|
| 527 |
+
chunks.push(chunk);
|
| 528 |
+
}
|
| 529 |
+
|
| 530 |
+
// Assertions
|
| 531 |
+
expect(mockLogInvalidChunk).toHaveBeenCalledTimes(1);
|
| 532 |
+
expect(mockLogContentRetry).toHaveBeenCalledTimes(1);
|
| 533 |
+
expect(mockLogContentRetryFailure).not.toHaveBeenCalled();
|
| 534 |
+
expect(mockModelsModule.generateContentStream).toHaveBeenCalledTimes(2);
|
| 535 |
+
expect(
|
| 536 |
+
chunks.some(
|
| 537 |
+
(c) =>
|
| 538 |
+
c.candidates?.[0]?.content?.parts?.[0]?.text ===
|
| 539 |
+
'Successful response',
|
| 540 |
+
),
|
| 541 |
+
).toBe(true);
|
| 542 |
+
|
| 543 |
+
// Check that history was recorded correctly once, with no duplicates.
|
| 544 |
+
const history = chat.getHistory();
|
| 545 |
+
expect(history.length).toBe(2);
|
| 546 |
+
expect(history[0]).toEqual({
|
| 547 |
+
role: 'user',
|
| 548 |
+
parts: [{ text: 'test' }],
|
| 549 |
+
});
|
| 550 |
+
expect(history[1]).toEqual({
|
| 551 |
+
role: 'model',
|
| 552 |
+
parts: [{ text: 'Successful response' }],
|
| 553 |
+
});
|
| 554 |
+
});
|
| 555 |
+
|
| 556 |
+
it('should fail after all retries on persistent invalid content and report metrics', async () => {
|
| 557 |
+
vi.mocked(mockModelsModule.generateContentStream).mockImplementation(
|
| 558 |
+
async () =>
|
| 559 |
+
(async function* () {
|
| 560 |
+
yield {
|
| 561 |
+
candidates: [
|
| 562 |
+
{
|
| 563 |
+
content: {
|
| 564 |
+
parts: [{ text: '' }],
|
| 565 |
+
role: 'model',
|
| 566 |
+
},
|
| 567 |
+
},
|
| 568 |
+
],
|
| 569 |
+
} as unknown as GenerateContentResponse;
|
| 570 |
+
})(),
|
| 571 |
+
);
|
| 572 |
+
|
| 573 |
+
// This helper function consumes the stream and allows us to test for rejection.
|
| 574 |
+
async function consumeStreamAndExpectError() {
|
| 575 |
+
const stream = await chat.sendMessageStream(
|
| 576 |
+
{ message: 'test' },
|
| 577 |
+
'prompt-id-retry-fail',
|
| 578 |
+
);
|
| 579 |
+
for await (const _ of stream) {
|
| 580 |
+
// Must loop to trigger the internal logic that throws.
|
| 581 |
+
}
|
| 582 |
+
}
|
| 583 |
+
|
| 584 |
+
await expect(consumeStreamAndExpectError()).rejects.toThrow(
|
| 585 |
+
EmptyStreamError,
|
| 586 |
+
);
|
| 587 |
+
|
| 588 |
+
// Should be called 3 times (initial + 2 retries)
|
| 589 |
+
expect(mockModelsModule.generateContentStream).toHaveBeenCalledTimes(3);
|
| 590 |
+
expect(mockLogInvalidChunk).toHaveBeenCalledTimes(3);
|
| 591 |
+
expect(mockLogContentRetry).toHaveBeenCalledTimes(2);
|
| 592 |
+
expect(mockLogContentRetryFailure).toHaveBeenCalledTimes(1);
|
| 593 |
+
|
| 594 |
+
// History should be clean, as if the failed turn never happened.
|
| 595 |
+
const history = chat.getHistory();
|
| 596 |
+
expect(history.length).toBe(0);
|
| 597 |
+
});
|
| 598 |
+
});
|
| 599 |
+
it('should correctly retry and append to an existing history mid-conversation', async () => {
|
| 600 |
+
// 1. Setup
|
| 601 |
+
const initialHistory: Content[] = [
|
| 602 |
+
{ role: 'user', parts: [{ text: 'First question' }] },
|
| 603 |
+
{ role: 'model', parts: [{ text: 'First answer' }] },
|
| 604 |
+
];
|
| 605 |
+
chat.setHistory(initialHistory);
|
| 606 |
+
|
| 607 |
+
// 2. Mock the API to fail once with an empty stream, then succeed.
|
| 608 |
+
vi.mocked(mockModelsModule.generateContentStream)
|
| 609 |
+
.mockImplementationOnce(async () =>
|
| 610 |
+
(async function* () {
|
| 611 |
+
yield {
|
| 612 |
+
candidates: [{ content: { parts: [{ text: '' }] } }],
|
| 613 |
+
} as unknown as GenerateContentResponse;
|
| 614 |
+
})(),
|
| 615 |
+
)
|
| 616 |
+
.mockImplementationOnce(async () =>
|
| 617 |
+
// Second attempt succeeds
|
| 618 |
+
(async function* () {
|
| 619 |
+
yield {
|
| 620 |
+
candidates: [{ content: { parts: [{ text: 'Second answer' }] } }],
|
| 621 |
+
} as unknown as GenerateContentResponse;
|
| 622 |
+
})(),
|
| 623 |
+
);
|
| 624 |
+
|
| 625 |
+
// 3. Send a new message
|
| 626 |
+
const stream = await chat.sendMessageStream(
|
| 627 |
+
{ message: 'Second question' },
|
| 628 |
+
'prompt-id-retry-existing',
|
| 629 |
+
);
|
| 630 |
+
for await (const _ of stream) {
|
| 631 |
+
// consume stream
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
// 4. Assert the final history and metrics
|
| 635 |
+
const history = chat.getHistory();
|
| 636 |
+
expect(history.length).toBe(4);
|
| 637 |
+
|
| 638 |
+
// Assert that the correct metrics were reported for one empty-stream retry
|
| 639 |
+
expect(mockLogContentRetry).toHaveBeenCalledTimes(1);
|
| 640 |
+
|
| 641 |
+
// Explicitly verify the structure of each part to satisfy TypeScript
|
| 642 |
+
const turn1 = history[0];
|
| 643 |
+
if (!turn1?.parts?.[0] || !('text' in turn1.parts[0])) {
|
| 644 |
+
throw new Error('Test setup error: First turn is not a valid text part.');
|
| 645 |
+
}
|
| 646 |
+
expect(turn1.parts[0].text).toBe('First question');
|
| 647 |
+
|
| 648 |
+
const turn2 = history[1];
|
| 649 |
+
if (!turn2?.parts?.[0] || !('text' in turn2.parts[0])) {
|
| 650 |
+
throw new Error(
|
| 651 |
+
'Test setup error: Second turn is not a valid text part.',
|
| 652 |
+
);
|
| 653 |
+
}
|
| 654 |
+
expect(turn2.parts[0].text).toBe('First answer');
|
| 655 |
+
|
| 656 |
+
const turn3 = history[2];
|
| 657 |
+
if (!turn3?.parts?.[0] || !('text' in turn3.parts[0])) {
|
| 658 |
+
throw new Error('Test setup error: Third turn is not a valid text part.');
|
| 659 |
+
}
|
| 660 |
+
expect(turn3.parts[0].text).toBe('Second question');
|
| 661 |
+
|
| 662 |
+
const turn4 = history[3];
|
| 663 |
+
if (!turn4?.parts?.[0] || !('text' in turn4.parts[0])) {
|
| 664 |
+
throw new Error(
|
| 665 |
+
'Test setup error: Fourth turn is not a valid text part.',
|
| 666 |
+
);
|
| 667 |
+
}
|
| 668 |
+
expect(turn4.parts[0].text).toBe('Second answer');
|
| 669 |
+
});
|
| 670 |
+
|
| 671 |
+
describe('concurrency control', () => {
|
| 672 |
+
it('should queue a subsequent sendMessage call until the first one completes', async () => {
|
| 673 |
+
// 1. Create promises to manually control when the API calls resolve
|
| 674 |
+
let firstCallResolver: (value: GenerateContentResponse) => void;
|
| 675 |
+
const firstCallPromise = new Promise<GenerateContentResponse>(
|
| 676 |
+
(resolve) => {
|
| 677 |
+
firstCallResolver = resolve;
|
| 678 |
+
},
|
| 679 |
+
);
|
| 680 |
+
|
| 681 |
+
let secondCallResolver: (value: GenerateContentResponse) => void;
|
| 682 |
+
const secondCallPromise = new Promise<GenerateContentResponse>(
|
| 683 |
+
(resolve) => {
|
| 684 |
+
secondCallResolver = resolve;
|
| 685 |
+
},
|
| 686 |
+
);
|
| 687 |
+
|
| 688 |
+
// A standard response body for the mock
|
| 689 |
+
const mockResponse = {
|
| 690 |
+
candidates: [
|
| 691 |
+
{
|
| 692 |
+
content: { parts: [{ text: 'response' }], role: 'model' },
|
| 693 |
+
},
|
| 694 |
+
],
|
| 695 |
+
} as unknown as GenerateContentResponse;
|
| 696 |
+
|
| 697 |
+
// 2. Mock the API to return our controllable promises in order
|
| 698 |
+
vi.mocked(mockModelsModule.generateContent)
|
| 699 |
+
.mockReturnValueOnce(firstCallPromise)
|
| 700 |
+
.mockReturnValueOnce(secondCallPromise);
|
| 701 |
+
|
| 702 |
+
// 3. Start the first message call. Do not await it yet.
|
| 703 |
+
const firstMessagePromise = chat.sendMessage(
|
| 704 |
+
{ message: 'first' },
|
| 705 |
+
'prompt-1',
|
| 706 |
+
);
|
| 707 |
+
|
| 708 |
+
// Give the event loop a chance to run the async call up to the `await`
|
| 709 |
+
await new Promise(process.nextTick);
|
| 710 |
+
|
| 711 |
+
// 4. While the first call is "in-flight", start the second message call.
|
| 712 |
+
const secondMessagePromise = chat.sendMessage(
|
| 713 |
+
{ message: 'second' },
|
| 714 |
+
'prompt-2',
|
| 715 |
+
);
|
| 716 |
+
|
| 717 |
+
// 5. CRUCIAL CHECK: At this point, only the first API call should have been made.
|
| 718 |
+
// The second call should be waiting on `sendPromise`.
|
| 719 |
+
expect(mockModelsModule.generateContent).toHaveBeenCalledTimes(1);
|
| 720 |
+
expect(mockModelsModule.generateContent).toHaveBeenCalledWith(
|
| 721 |
+
expect.objectContaining({
|
| 722 |
+
contents: expect.arrayContaining([
|
| 723 |
+
expect.objectContaining({ parts: [{ text: 'first' }] }),
|
| 724 |
+
]),
|
| 725 |
+
}),
|
| 726 |
+
'prompt-1',
|
| 727 |
+
);
|
| 728 |
+
|
| 729 |
+
// 6. Unblock the first API call and wait for the first message to fully complete.
|
| 730 |
+
firstCallResolver!(mockResponse);
|
| 731 |
+
await firstMessagePromise;
|
| 732 |
+
|
| 733 |
+
// Give the event loop a chance to unblock and run the second call.
|
| 734 |
+
await new Promise(process.nextTick);
|
| 735 |
+
|
| 736 |
+
// 7. CRUCIAL CHECK: Now, the second API call should have been made.
|
| 737 |
+
expect(mockModelsModule.generateContent).toHaveBeenCalledTimes(2);
|
| 738 |
+
expect(mockModelsModule.generateContent).toHaveBeenCalledWith(
|
| 739 |
+
expect.objectContaining({
|
| 740 |
+
contents: expect.arrayContaining([
|
| 741 |
+
expect.objectContaining({ parts: [{ text: 'second' }] }),
|
| 742 |
+
]),
|
| 743 |
+
}),
|
| 744 |
+
'prompt-2',
|
| 745 |
+
);
|
| 746 |
+
|
| 747 |
+
// 8. Clean up by resolving the second call.
|
| 748 |
+
secondCallResolver!(mockResponse);
|
| 749 |
+
await secondMessagePromise;
|
| 750 |
+
});
|
| 751 |
+
});
|
| 752 |
+
it('should retry if the model returns a completely empty stream (no chunks)', async () => {
|
| 753 |
+
// 1. Mock the API to return an empty stream first, then a valid one.
|
| 754 |
+
vi.mocked(mockModelsModule.generateContentStream)
|
| 755 |
+
.mockImplementationOnce(
|
| 756 |
+
// First call resolves to an async generator that yields nothing.
|
| 757 |
+
async () => (async function* () {})(),
|
| 758 |
+
)
|
| 759 |
+
.mockImplementationOnce(
|
| 760 |
+
// Second call returns a valid stream.
|
| 761 |
+
async () =>
|
| 762 |
+
(async function* () {
|
| 763 |
+
yield {
|
| 764 |
+
candidates: [
|
| 765 |
+
{
|
| 766 |
+
content: {
|
| 767 |
+
parts: [{ text: 'Successful response after empty' }],
|
| 768 |
+
},
|
| 769 |
+
},
|
| 770 |
+
],
|
| 771 |
+
} as unknown as GenerateContentResponse;
|
| 772 |
+
})(),
|
| 773 |
+
);
|
| 774 |
+
|
| 775 |
+
// 2. Call the method and consume the stream.
|
| 776 |
+
const stream = await chat.sendMessageStream(
|
| 777 |
+
{ message: 'test empty stream' },
|
| 778 |
+
'prompt-id-empty-stream',
|
| 779 |
+
);
|
| 780 |
+
const chunks = [];
|
| 781 |
+
for await (const chunk of stream) {
|
| 782 |
+
chunks.push(chunk);
|
| 783 |
+
}
|
| 784 |
+
|
| 785 |
+
// 3. Assert the results.
|
| 786 |
+
expect(mockModelsModule.generateContentStream).toHaveBeenCalledTimes(2);
|
| 787 |
+
expect(
|
| 788 |
+
chunks.some(
|
| 789 |
+
(c) =>
|
| 790 |
+
c.candidates?.[0]?.content?.parts?.[0]?.text ===
|
| 791 |
+
'Successful response after empty',
|
| 792 |
+
),
|
| 793 |
+
).toBe(true);
|
| 794 |
+
|
| 795 |
+
const history = chat.getHistory();
|
| 796 |
+
expect(history.length).toBe(2);
|
| 797 |
+
|
| 798 |
+
// Explicitly verify the structure of each part to satisfy TypeScript
|
| 799 |
+
const turn1 = history[0];
|
| 800 |
+
if (!turn1?.parts?.[0] || !('text' in turn1.parts[0])) {
|
| 801 |
+
throw new Error('Test setup error: First turn is not a valid text part.');
|
| 802 |
+
}
|
| 803 |
+
expect(turn1.parts[0].text).toBe('test empty stream');
|
| 804 |
+
|
| 805 |
+
const turn2 = history[1];
|
| 806 |
+
if (!turn2?.parts?.[0] || !('text' in turn2.parts[0])) {
|
| 807 |
+
throw new Error(
|
| 808 |
+
'Test setup error: Second turn is not a valid text part.',
|
| 809 |
+
);
|
| 810 |
+
}
|
| 811 |
+
expect(turn2.parts[0].text).toBe('Successful response after empty');
|
| 812 |
+
});
|
| 813 |
+
it('should queue a subsequent sendMessageStream call until the first stream is fully consumed', async () => {
|
| 814 |
+
// 1. Create a promise to manually control the stream's lifecycle
|
| 815 |
+
let continueFirstStream: () => void;
|
| 816 |
+
const firstStreamContinuePromise = new Promise<void>((resolve) => {
|
| 817 |
+
continueFirstStream = resolve;
|
| 818 |
+
});
|
| 819 |
+
|
| 820 |
+
// 2. Mock the API to return controllable async generators
|
| 821 |
+
const firstStreamGenerator = (async function* () {
|
| 822 |
+
yield {
|
| 823 |
+
candidates: [
|
| 824 |
+
{ content: { parts: [{ text: 'first response part 1' }] } },
|
| 825 |
+
],
|
| 826 |
+
} as unknown as GenerateContentResponse;
|
| 827 |
+
await firstStreamContinuePromise; // Pause the stream
|
| 828 |
+
yield {
|
| 829 |
+
candidates: [{ content: { parts: [{ text: ' part 2' }] } }],
|
| 830 |
+
} as unknown as GenerateContentResponse;
|
| 831 |
+
})();
|
| 832 |
+
|
| 833 |
+
const secondStreamGenerator = (async function* () {
|
| 834 |
+
yield {
|
| 835 |
+
candidates: [{ content: { parts: [{ text: 'second response' }] } }],
|
| 836 |
+
} as unknown as GenerateContentResponse;
|
| 837 |
+
})();
|
| 838 |
+
|
| 839 |
+
vi.mocked(mockModelsModule.generateContentStream)
|
| 840 |
+
.mockResolvedValueOnce(firstStreamGenerator)
|
| 841 |
+
.mockResolvedValueOnce(secondStreamGenerator);
|
| 842 |
+
|
| 843 |
+
// 3. Start the first stream and consume only the first chunk to pause it
|
| 844 |
+
const firstStream = await chat.sendMessageStream(
|
| 845 |
+
{ message: 'first' },
|
| 846 |
+
'prompt-1',
|
| 847 |
+
);
|
| 848 |
+
const firstStreamIterator = firstStream[Symbol.asyncIterator]();
|
| 849 |
+
await firstStreamIterator.next();
|
| 850 |
+
|
| 851 |
+
// 4. While the first stream is paused, start the second call. It will block.
|
| 852 |
+
const secondStreamPromise = chat.sendMessageStream(
|
| 853 |
+
{ message: 'second' },
|
| 854 |
+
'prompt-2',
|
| 855 |
+
);
|
| 856 |
+
|
| 857 |
+
// 5. Assert that only one API call has been made so far.
|
| 858 |
+
expect(mockModelsModule.generateContentStream).toHaveBeenCalledTimes(1);
|
| 859 |
+
|
| 860 |
+
// 6. Unblock and fully consume the first stream to completion.
|
| 861 |
+
continueFirstStream!();
|
| 862 |
+
await firstStreamIterator.next(); // Consume the rest of the stream
|
| 863 |
+
await firstStreamIterator.next(); // Finish the iterator
|
| 864 |
+
|
| 865 |
+
// 7. Now that the first stream is done, await the second promise to get its generator.
|
| 866 |
+
const secondStream = await secondStreamPromise;
|
| 867 |
+
|
| 868 |
+
// 8. Start consuming the second stream, which triggers its internal API call.
|
| 869 |
+
const secondStreamIterator = secondStream[Symbol.asyncIterator]();
|
| 870 |
+
await secondStreamIterator.next();
|
| 871 |
+
|
| 872 |
+
// 9. The second API call should now have been made.
|
| 873 |
+
expect(mockModelsModule.generateContentStream).toHaveBeenCalledTimes(2);
|
| 874 |
+
|
| 875 |
+
// 10. FIX: Fully consume the second stream to ensure recordHistory is called.
|
| 876 |
+
await secondStreamIterator.next(); // This finishes the iterator.
|
| 877 |
+
|
| 878 |
+
// 11. Final check on history.
|
| 879 |
+
const history = chat.getHistory();
|
| 880 |
+
expect(history.length).toBe(4);
|
| 881 |
+
|
| 882 |
+
const turn4 = history[3];
|
| 883 |
+
if (!turn4?.parts?.[0] || !('text' in turn4.parts[0])) {
|
| 884 |
+
throw new Error(
|
| 885 |
+
'Test setup error: Fourth turn is not a valid text part.',
|
| 886 |
+
);
|
| 887 |
+
}
|
| 888 |
+
expect(turn4.parts[0].text).toBe('second response');
|
| 889 |
+
});
|
| 890 |
+
});
|