ADAPT-Chase commited on
Commit
b3315f2
·
verified ·
1 Parent(s): 9ffac69

Add files using upload-large-folder tool

Browse files
projects/ui/qwen-code/packages/core/src/core/geminiChat.ts ADDED
@@ -0,0 +1,763 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ // DISCLAIMER: This is a copied version of https://github.com/googleapis/js-genai/blob/main/src/chats.ts with the intention of working around a key bug
8
+ // where function responses are not treated as "valid" responses: https://b.corp.google.com/issues/420354090
9
+
10
+ import {
11
+ GenerateContentResponse,
12
+ Content,
13
+ GenerateContentConfig,
14
+ SendMessageParameters,
15
+ createUserContent,
16
+ Part,
17
+ Tool,
18
+ } from '@google/genai';
19
+ import { retryWithBackoff } from '../utils/retry.js';
20
+ import { isFunctionResponse } from '../utils/messageInspectors.js';
21
+ import { ContentGenerator, AuthType } from './contentGenerator.js';
22
+ import { Config } from '../config/config.js';
23
+ import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
24
+ import { hasCycleInSchema } from '../tools/tools.js';
25
+ import { StructuredError } from './turn.js';
26
+ import {
27
+ logContentRetry,
28
+ logContentRetryFailure,
29
+ logInvalidChunk,
30
+ } from '../telemetry/loggers.js';
31
+ import {
32
+ ContentRetryEvent,
33
+ ContentRetryFailureEvent,
34
+ InvalidChunkEvent,
35
+ } from '../telemetry/types.js';
36
+
37
+ /**
38
+ * Options for retrying due to invalid content from the model.
39
+ */
40
+ interface ContentRetryOptions {
41
+ /** Total number of attempts to make (1 initial + N retries). */
42
+ maxAttempts: number;
43
+ /** The base delay in milliseconds for linear backoff. */
44
+ initialDelayMs: number;
45
+ }
46
+
47
+ const INVALID_CONTENT_RETRY_OPTIONS: ContentRetryOptions = {
48
+ maxAttempts: 3, // 1 initial call + 2 retries
49
+ initialDelayMs: 500,
50
+ };
51
+ /**
52
+ * Returns true if the response is valid, false otherwise.
53
+ */
54
+ function isValidResponse(response: GenerateContentResponse): boolean {
55
+ // The Dashscope provider returns empty content with usage metadata at the end of the stream
56
+ if (response.usageMetadata) {
57
+ return true;
58
+ }
59
+
60
+ if (response.candidates === undefined || response.candidates.length === 0) {
61
+ return false;
62
+ }
63
+
64
+ const content = response.candidates[0]?.content;
65
+ return content !== undefined && isValidContent(content);
66
+ }
67
+
68
+ function isValidContent(content: Content): boolean {
69
+ if (content.parts === undefined || content.parts.length === 0) {
70
+ return false;
71
+ }
72
+ for (const part of content.parts) {
73
+ if (part === undefined || Object.keys(part).length === 0) {
74
+ return false;
75
+ }
76
+ if (
77
+ !part.thought &&
78
+ part.text !== undefined &&
79
+ part.text === '' &&
80
+ part.functionCall === undefined
81
+ ) {
82
+ return false;
83
+ }
84
+ }
85
+ return true;
86
+ }
87
+
88
+ /**
89
+ * Validates the history contains the correct roles.
90
+ *
91
+ * @throws Error if the history does not start with a user turn.
92
+ * @throws Error if the history contains an invalid role.
93
+ */
94
+ function validateHistory(history: Content[]) {
95
+ for (const content of history) {
96
+ if (content.role !== 'user' && content.role !== 'model') {
97
+ throw new Error(`Role must be user or model, but got ${content.role}.`);
98
+ }
99
+ }
100
+ }
101
+
102
+ /**
103
+ * Extracts the curated (valid) history from a comprehensive history.
104
+ *
105
+ * @remarks
106
+ * The model may sometimes generate invalid or empty contents(e.g., due to safety
107
+ * filters or recitation). Extracting valid turns from the history
108
+ * ensures that subsequent requests could be accepted by the model.
109
+ */
110
+ function extractCuratedHistory(comprehensiveHistory: Content[]): Content[] {
111
+ if (comprehensiveHistory === undefined || comprehensiveHistory.length === 0) {
112
+ return [];
113
+ }
114
+ const curatedHistory: Content[] = [];
115
+ const length = comprehensiveHistory.length;
116
+ let i = 0;
117
+ while (i < length) {
118
+ if (comprehensiveHistory[i].role === 'user') {
119
+ curatedHistory.push(comprehensiveHistory[i]);
120
+ i++;
121
+ } else {
122
+ const modelOutput: Content[] = [];
123
+ let isValid = true;
124
+ while (i < length && comprehensiveHistory[i].role === 'model') {
125
+ modelOutput.push(comprehensiveHistory[i]);
126
+ if (isValid && !isValidContent(comprehensiveHistory[i])) {
127
+ isValid = false;
128
+ }
129
+ i++;
130
+ }
131
+ if (isValid) {
132
+ curatedHistory.push(...modelOutput);
133
+ }
134
+ }
135
+ }
136
+ return curatedHistory;
137
+ }
138
+
139
+ /**
140
+ * Custom error to signal that a stream completed without valid content,
141
+ * which should trigger a retry.
142
+ */
143
+ export class EmptyStreamError extends Error {
144
+ constructor(message: string) {
145
+ super(message);
146
+ this.name = 'EmptyStreamError';
147
+ }
148
+ }
149
+
150
+ /**
151
+ * Chat session that enables sending messages to the model with previous
152
+ * conversation context.
153
+ *
154
+ * @remarks
155
+ * The session maintains all the turns between user and model.
156
+ */
157
+ export class GeminiChat {
158
+ // A promise to represent the current state of the message being sent to the
159
+ // model.
160
+ private sendPromise: Promise<void> = Promise.resolve();
161
+
162
+ constructor(
163
+ private readonly config: Config,
164
+ private readonly contentGenerator: ContentGenerator,
165
+ private readonly generationConfig: GenerateContentConfig = {},
166
+ private history: Content[] = [],
167
+ ) {
168
+ validateHistory(history);
169
+ }
170
+
171
+ /**
172
+ * Handles falling back to Flash model when persistent 429 errors occur for OAuth users.
173
+ * Uses a fallback handler if provided by the config; otherwise, returns null.
174
+ */
175
+ private async handleFlashFallback(
176
+ authType?: string,
177
+ error?: unknown,
178
+ ): Promise<string | null> {
179
+ // Handle different auth types
180
+ if (authType === AuthType.QWEN_OAUTH) {
181
+ return this.handleQwenOAuthError(error);
182
+ }
183
+
184
+ // Only handle fallback for OAuth users
185
+ if (authType !== AuthType.LOGIN_WITH_GOOGLE) {
186
+ return null;
187
+ }
188
+
189
+ const currentModel = this.config.getModel();
190
+ const fallbackModel = DEFAULT_GEMINI_FLASH_MODEL;
191
+
192
+ // Don't fallback if already using Flash model
193
+ if (currentModel === fallbackModel) {
194
+ return null;
195
+ }
196
+
197
+ // Check if config has a fallback handler (set by CLI package)
198
+ const fallbackHandler = this.config.flashFallbackHandler;
199
+ if (typeof fallbackHandler === 'function') {
200
+ try {
201
+ const accepted = await fallbackHandler(
202
+ currentModel,
203
+ fallbackModel,
204
+ error,
205
+ );
206
+ if (accepted !== false && accepted !== null) {
207
+ this.config.setModel(fallbackModel);
208
+ this.config.setFallbackMode(true);
209
+ return fallbackModel;
210
+ }
211
+ // Check if the model was switched manually in the handler
212
+ if (this.config.getModel() === fallbackModel) {
213
+ return null; // Model was switched but don't continue with current prompt
214
+ }
215
+ } catch (error) {
216
+ console.warn('Flash fallback handler failed:', error);
217
+ }
218
+ }
219
+
220
+ return null;
221
+ }
222
+
223
+ setSystemInstruction(sysInstr: string) {
224
+ this.generationConfig.systemInstruction = sysInstr;
225
+ }
226
+ /**
227
+ * Sends a message to the model and returns the response.
228
+ *
229
+ * @remarks
230
+ * This method will wait for the previous message to be processed before
231
+ * sending the next message.
232
+ *
233
+ * @see {@link Chat#sendMessageStream} for streaming method.
234
+ * @param params - parameters for sending messages within a chat session.
235
+ * @returns The model's response.
236
+ *
237
+ * @example
238
+ * ```ts
239
+ * const chat = ai.chats.create({model: 'gemini-2.0-flash'});
240
+ * const response = await chat.sendMessage({
241
+ * message: 'Why is the sky blue?'
242
+ * });
243
+ * console.log(response.text);
244
+ * ```
245
+ */
246
+ async sendMessage(
247
+ params: SendMessageParameters,
248
+ prompt_id: string,
249
+ ): Promise<GenerateContentResponse> {
250
+ await this.sendPromise;
251
+ const userContent = createUserContent(params.message);
252
+ const requestContents = this.getHistory(true).concat(userContent);
253
+
254
+ let response: GenerateContentResponse;
255
+
256
+ try {
257
+ const apiCall = () => {
258
+ const modelToUse = this.config.getModel() || DEFAULT_GEMINI_FLASH_MODEL;
259
+
260
+ // Prevent Flash model calls immediately after quota error
261
+ if (
262
+ this.config.getQuotaErrorOccurred() &&
263
+ modelToUse === DEFAULT_GEMINI_FLASH_MODEL
264
+ ) {
265
+ throw new Error(
266
+ 'Please submit a new query to continue with the Flash model.',
267
+ );
268
+ }
269
+
270
+ return this.contentGenerator.generateContent(
271
+ {
272
+ model: modelToUse,
273
+ contents: requestContents,
274
+ config: { ...this.generationConfig, ...params.config },
275
+ },
276
+ prompt_id,
277
+ );
278
+ };
279
+
280
+ response = await retryWithBackoff(apiCall, {
281
+ shouldRetry: (error: unknown) => {
282
+ // Check for known error messages and codes.
283
+ if (error instanceof Error && error.message) {
284
+ if (isSchemaDepthError(error.message)) return false;
285
+ if (error.message.includes('429')) return true;
286
+ if (error.message.match(/5\d{2}/)) return true;
287
+ }
288
+ return false; // Don't retry other errors by default
289
+ },
290
+ onPersistent429: async (authType?: string, error?: unknown) =>
291
+ await this.handleFlashFallback(authType, error),
292
+ authType: this.config.getContentGeneratorConfig()?.authType,
293
+ });
294
+
295
+ this.sendPromise = (async () => {
296
+ const outputContent = response.candidates?.[0]?.content;
297
+ // Because the AFC input contains the entire curated chat history in
298
+ // addition to the new user input, we need to truncate the AFC history
299
+ // to deduplicate the existing chat history.
300
+ const fullAutomaticFunctionCallingHistory =
301
+ response.automaticFunctionCallingHistory;
302
+ const index = this.getHistory(true).length;
303
+ let automaticFunctionCallingHistory: Content[] = [];
304
+ if (fullAutomaticFunctionCallingHistory != null) {
305
+ automaticFunctionCallingHistory =
306
+ fullAutomaticFunctionCallingHistory.slice(index) ?? [];
307
+ }
308
+ const modelOutput = outputContent ? [outputContent] : [];
309
+ this.recordHistory(
310
+ userContent,
311
+ modelOutput,
312
+ automaticFunctionCallingHistory,
313
+ );
314
+ })();
315
+ await this.sendPromise.catch(() => {
316
+ // Resets sendPromise to avoid subsequent calls failing
317
+ this.sendPromise = Promise.resolve();
318
+ });
319
+ return response;
320
+ } catch (error) {
321
+ this.sendPromise = Promise.resolve();
322
+ throw error;
323
+ }
324
+ }
325
+
326
+ /**
327
+ * Sends a message to the model and returns the response in chunks.
328
+ *
329
+ * @remarks
330
+ * This method will wait for the previous message to be processed before
331
+ * sending the next message.
332
+ *
333
+ * @see {@link Chat#sendMessage} for non-streaming method.
334
+ * @param params - parameters for sending the message.
335
+ * @return The model's response.
336
+ *
337
+ * @example
338
+ * ```ts
339
+ * const chat = ai.chats.create({model: 'gemini-2.0-flash'});
340
+ * const response = await chat.sendMessageStream({
341
+ * message: 'Why is the sky blue?'
342
+ * });
343
+ * for await (const chunk of response) {
344
+ * console.log(chunk.text);
345
+ * }
346
+ * ```
347
+ */
348
+ async sendMessageStream(
349
+ params: SendMessageParameters,
350
+ prompt_id: string,
351
+ ): Promise<AsyncGenerator<GenerateContentResponse>> {
352
+ await this.sendPromise;
353
+
354
+ let streamDoneResolver: () => void;
355
+ const streamDonePromise = new Promise<void>((resolve) => {
356
+ streamDoneResolver = resolve;
357
+ });
358
+ this.sendPromise = streamDonePromise;
359
+
360
+ const userContent = createUserContent(params.message);
361
+
362
+ // Add user content to history ONCE before any attempts.
363
+ this.history.push(userContent);
364
+ const requestContents = this.getHistory(true);
365
+
366
+ // eslint-disable-next-line @typescript-eslint/no-this-alias
367
+ const self = this;
368
+ return (async function* () {
369
+ try {
370
+ let lastError: unknown = new Error('Request failed after all retries.');
371
+
372
+ for (
373
+ let attempt = 0;
374
+ attempt < INVALID_CONTENT_RETRY_OPTIONS.maxAttempts;
375
+ attempt++
376
+ ) {
377
+ try {
378
+ const stream = await self.makeApiCallAndProcessStream(
379
+ requestContents,
380
+ params,
381
+ prompt_id,
382
+ userContent,
383
+ );
384
+
385
+ for await (const chunk of stream) {
386
+ yield chunk;
387
+ }
388
+
389
+ lastError = null;
390
+ break;
391
+ } catch (error) {
392
+ lastError = error;
393
+ const isContentError = error instanceof EmptyStreamError;
394
+
395
+ if (isContentError) {
396
+ // Check if we have more attempts left.
397
+ if (attempt < INVALID_CONTENT_RETRY_OPTIONS.maxAttempts - 1) {
398
+ logContentRetry(
399
+ self.config,
400
+ new ContentRetryEvent(
401
+ attempt,
402
+ 'EmptyStreamError',
403
+ INVALID_CONTENT_RETRY_OPTIONS.initialDelayMs,
404
+ ),
405
+ );
406
+ await new Promise((res) =>
407
+ setTimeout(
408
+ res,
409
+ INVALID_CONTENT_RETRY_OPTIONS.initialDelayMs *
410
+ (attempt + 1),
411
+ ),
412
+ );
413
+ continue;
414
+ }
415
+ }
416
+ break;
417
+ }
418
+ }
419
+
420
+ if (lastError) {
421
+ if (lastError instanceof EmptyStreamError) {
422
+ logContentRetryFailure(
423
+ self.config,
424
+ new ContentRetryFailureEvent(
425
+ INVALID_CONTENT_RETRY_OPTIONS.maxAttempts,
426
+ 'EmptyStreamError',
427
+ ),
428
+ );
429
+ }
430
+ // If the stream fails, remove the user message that was added.
431
+ if (self.history[self.history.length - 1] === userContent) {
432
+ self.history.pop();
433
+ }
434
+ throw lastError;
435
+ }
436
+ } finally {
437
+ streamDoneResolver!();
438
+ }
439
+ })();
440
+ }
441
+
442
+ private async makeApiCallAndProcessStream(
443
+ requestContents: Content[],
444
+ params: SendMessageParameters,
445
+ prompt_id: string,
446
+ userContent: Content,
447
+ ): Promise<AsyncGenerator<GenerateContentResponse>> {
448
+ const apiCall = () => {
449
+ const modelToUse = this.config.getModel();
450
+
451
+ if (
452
+ this.config.getQuotaErrorOccurred() &&
453
+ modelToUse === DEFAULT_GEMINI_FLASH_MODEL
454
+ ) {
455
+ throw new Error(
456
+ 'Please submit a new query to continue with the Flash model.',
457
+ );
458
+ }
459
+
460
+ return this.contentGenerator.generateContentStream(
461
+ {
462
+ model: modelToUse,
463
+ contents: requestContents,
464
+ config: { ...this.generationConfig, ...params.config },
465
+ },
466
+ prompt_id,
467
+ );
468
+ };
469
+
470
+ const streamResponse = await retryWithBackoff(apiCall, {
471
+ shouldRetry: (error: unknown) => {
472
+ if (error instanceof Error && error.message) {
473
+ if (isSchemaDepthError(error.message)) return false;
474
+ if (error.message.includes('429')) return true;
475
+ if (error.message.match(/5\d{2}/)) return true;
476
+ }
477
+ return false;
478
+ },
479
+ onPersistent429: async (authType?: string, error?: unknown) =>
480
+ await this.handleFlashFallback(authType, error),
481
+ authType: this.config.getContentGeneratorConfig()?.authType,
482
+ });
483
+
484
+ return this.processStreamResponse(streamResponse, userContent);
485
+ }
486
+
487
+ /**
488
+ * Returns the chat history.
489
+ *
490
+ * @remarks
491
+ * The history is a list of contents alternating between user and model.
492
+ *
493
+ * There are two types of history:
494
+ * - The `curated history` contains only the valid turns between user and
495
+ * model, which will be included in the subsequent requests sent to the model.
496
+ * - The `comprehensive history` contains all turns, including invalid or
497
+ * empty model outputs, providing a complete record of the history.
498
+ *
499
+ * The history is updated after receiving the response from the model,
500
+ * for streaming response, it means receiving the last chunk of the response.
501
+ *
502
+ * The `comprehensive history` is returned by default. To get the `curated
503
+ * history`, set the `curated` parameter to `true`.
504
+ *
505
+ * @param curated - whether to return the curated history or the comprehensive
506
+ * history.
507
+ * @return History contents alternating between user and model for the entire
508
+ * chat session.
509
+ */
510
+ getHistory(curated: boolean = false): Content[] {
511
+ const history = curated
512
+ ? extractCuratedHistory(this.history)
513
+ : this.history;
514
+ // Deep copy the history to avoid mutating the history outside of the
515
+ // chat session.
516
+ return structuredClone(history);
517
+ }
518
+
519
+ /**
520
+ * Clears the chat history.
521
+ */
522
+ clearHistory(): void {
523
+ this.history = [];
524
+ }
525
+
526
+ /**
527
+ * Adds a new entry to the chat history.
528
+ */
529
+ addHistory(content: Content): void {
530
+ this.history.push(content);
531
+ }
532
+ setHistory(history: Content[]): void {
533
+ this.history = history;
534
+ }
535
+
536
+ setTools(tools: Tool[]): void {
537
+ this.generationConfig.tools = tools;
538
+ }
539
+
540
+ async maybeIncludeSchemaDepthContext(error: StructuredError): Promise<void> {
541
+ // Check for potentially problematic cyclic tools with cyclic schemas
542
+ // and include a recommendation to remove potentially problematic tools.
543
+ if (
544
+ isSchemaDepthError(error.message) ||
545
+ isInvalidArgumentError(error.message)
546
+ ) {
547
+ const tools = this.config.getToolRegistry().getAllTools();
548
+ const cyclicSchemaTools: string[] = [];
549
+ for (const tool of tools) {
550
+ if (
551
+ (tool.schema.parametersJsonSchema &&
552
+ hasCycleInSchema(tool.schema.parametersJsonSchema)) ||
553
+ (tool.schema.parameters && hasCycleInSchema(tool.schema.parameters))
554
+ ) {
555
+ cyclicSchemaTools.push(tool.displayName);
556
+ }
557
+ }
558
+ if (cyclicSchemaTools.length > 0) {
559
+ const extraDetails =
560
+ `\n\nThis error was probably caused by cyclic schema references in one of the following tools, try disabling them with excludeTools:\n\n - ` +
561
+ cyclicSchemaTools.join(`\n - `) +
562
+ `\n`;
563
+ error.message += extraDetails;
564
+ }
565
+ }
566
+ }
567
+
568
+ private async *processStreamResponse(
569
+ streamResponse: AsyncGenerator<GenerateContentResponse>,
570
+ userInput: Content,
571
+ ): AsyncGenerator<GenerateContentResponse> {
572
+ const modelResponseParts: Part[] = [];
573
+ let isStreamInvalid = false;
574
+ let hasReceivedAnyChunk = false;
575
+
576
+ for await (const chunk of streamResponse) {
577
+ hasReceivedAnyChunk = true;
578
+ if (isValidResponse(chunk)) {
579
+ const content = chunk.candidates?.[0]?.content;
580
+ if (content) {
581
+ // Filter out thought parts from being added to history.
582
+ if (!this.isThoughtContent(content) && content.parts) {
583
+ modelResponseParts.push(...content.parts);
584
+ }
585
+ }
586
+ } else {
587
+ logInvalidChunk(
588
+ this.config,
589
+ new InvalidChunkEvent('Invalid chunk received from stream.'),
590
+ );
591
+ isStreamInvalid = true;
592
+ }
593
+ yield chunk; // Yield every chunk to the UI immediately.
594
+ }
595
+
596
+ // Now that the stream is finished, make a decision.
597
+ // Throw an error if the stream was invalid OR if it was completely empty.
598
+ if (isStreamInvalid || !hasReceivedAnyChunk) {
599
+ throw new EmptyStreamError(
600
+ 'Model stream was invalid or completed without valid content.',
601
+ );
602
+ }
603
+
604
+ // Use recordHistory to correctly save the conversation turn.
605
+ const modelOutput: Content[] = [
606
+ { role: 'model', parts: modelResponseParts },
607
+ ];
608
+ this.recordHistory(userInput, modelOutput);
609
+ }
610
+
611
+ private recordHistory(
612
+ userInput: Content,
613
+ modelOutput: Content[],
614
+ automaticFunctionCallingHistory?: Content[],
615
+ ) {
616
+ const newHistoryEntries: Content[] = [];
617
+
618
+ // Part 1: Handle the user's part of the turn.
619
+ if (
620
+ automaticFunctionCallingHistory &&
621
+ automaticFunctionCallingHistory.length > 0
622
+ ) {
623
+ newHistoryEntries.push(
624
+ ...extractCuratedHistory(automaticFunctionCallingHistory),
625
+ );
626
+ } else {
627
+ // Guard for streaming calls where the user input might already be in the history.
628
+ if (
629
+ this.history.length === 0 ||
630
+ this.history[this.history.length - 1] !== userInput
631
+ ) {
632
+ newHistoryEntries.push(userInput);
633
+ }
634
+ }
635
+
636
+ // Part 2: Handle the model's part of the turn, filtering out thoughts.
637
+ const nonThoughtModelOutput = modelOutput.filter(
638
+ (content) => !this.isThoughtContent(content),
639
+ );
640
+
641
+ let outputContents: Content[] = [];
642
+ if (nonThoughtModelOutput.length > 0) {
643
+ outputContents = nonThoughtModelOutput;
644
+ } else if (
645
+ modelOutput.length === 0 &&
646
+ !isFunctionResponse(userInput) &&
647
+ !automaticFunctionCallingHistory
648
+ ) {
649
+ // Add an empty model response if the model truly returned nothing.
650
+ outputContents.push({ role: 'model', parts: [] } as Content);
651
+ }
652
+
653
+ // Part 3: Consolidate the parts of this turn's model response.
654
+ const consolidatedOutputContents: Content[] = [];
655
+ if (outputContents.length > 0) {
656
+ for (const content of outputContents) {
657
+ const lastContent =
658
+ consolidatedOutputContents[consolidatedOutputContents.length - 1];
659
+ if (this.hasTextContent(lastContent) && this.hasTextContent(content)) {
660
+ lastContent.parts[0].text += content.parts[0].text || '';
661
+ if (content.parts.length > 1) {
662
+ lastContent.parts.push(...content.parts.slice(1));
663
+ }
664
+ } else {
665
+ consolidatedOutputContents.push(content);
666
+ }
667
+ }
668
+ }
669
+
670
+ // Part 4: Add the new turn (user and model parts) to the main history.
671
+ this.history.push(...newHistoryEntries, ...consolidatedOutputContents);
672
+ }
673
+
674
+ private hasTextContent(
675
+ content: Content | undefined,
676
+ ): content is Content & { parts: [{ text: string }, ...Part[]] } {
677
+ return !!(
678
+ content &&
679
+ content.role === 'model' &&
680
+ content.parts &&
681
+ content.parts.length > 0 &&
682
+ typeof content.parts[0].text === 'string' &&
683
+ content.parts[0].text !== ''
684
+ );
685
+ }
686
+
687
+ private isThoughtContent(
688
+ content: Content | undefined,
689
+ ): content is Content & { parts: [{ thought: boolean }, ...Part[]] } {
690
+ return !!(
691
+ content &&
692
+ content.role === 'model' &&
693
+ content.parts &&
694
+ content.parts.length > 0 &&
695
+ typeof content.parts[0].thought === 'boolean' &&
696
+ content.parts[0].thought === true
697
+ );
698
+ }
699
+
700
+ /**
701
+ * Handles Qwen OAuth authentication errors and rate limiting
702
+ */
703
+ private async handleQwenOAuthError(error?: unknown): Promise<string | null> {
704
+ if (!error) {
705
+ return null;
706
+ }
707
+
708
+ const errorMessage =
709
+ error instanceof Error
710
+ ? error.message.toLowerCase()
711
+ : String(error).toLowerCase();
712
+ const errorCode =
713
+ (error as { status?: number; code?: number })?.status ||
714
+ (error as { status?: number; code?: number })?.code;
715
+
716
+ // Check if this is an authentication/authorization error
717
+ const isAuthError =
718
+ errorCode === 401 ||
719
+ errorCode === 403 ||
720
+ errorMessage.includes('unauthorized') ||
721
+ errorMessage.includes('forbidden') ||
722
+ errorMessage.includes('invalid api key') ||
723
+ errorMessage.includes('authentication') ||
724
+ errorMessage.includes('access denied') ||
725
+ (errorMessage.includes('token') && errorMessage.includes('expired'));
726
+
727
+ // Check if this is a rate limiting error
728
+ const isRateLimitError =
729
+ errorCode === 429 ||
730
+ errorMessage.includes('429') ||
731
+ errorMessage.includes('rate limit') ||
732
+ errorMessage.includes('too many requests');
733
+
734
+ if (isAuthError) {
735
+ console.warn('Qwen OAuth authentication error detected:', errorMessage);
736
+ // The QwenContentGenerator should automatically handle token refresh
737
+ // If it still fails, it likely means the refresh token is also expired
738
+ console.log(
739
+ 'Note: If this persists, you may need to re-authenticate with Qwen OAuth',
740
+ );
741
+ return null;
742
+ }
743
+
744
+ if (isRateLimitError) {
745
+ console.warn('Qwen API rate limit encountered:', errorMessage);
746
+ // For rate limiting, we don't need to do anything special
747
+ // The retry mechanism will handle the backoff
748
+ return null;
749
+ }
750
+
751
+ // For other errors, don't handle them specially
752
+ return null;
753
+ }
754
+ }
755
+
756
+ /** Visible for Testing */
757
+ export function isSchemaDepthError(errorMessage: string): boolean {
758
+ return errorMessage.includes('maximum schema depth exceeded');
759
+ }
760
+
761
+ export function isInvalidArgumentError(errorMessage: string): boolean {
762
+ return errorMessage.includes('Request contains an invalid argument');
763
+ }
projects/ui/qwen-code/packages/core/src/core/geminiRequest.test.ts ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { describe, it, expect } from 'vitest';
8
+ import { partListUnionToString } from './geminiRequest.js';
9
+ import { type Part } from '@google/genai';
10
+
11
+ describe('partListUnionToString', () => {
12
+ it('should return the string value if the input is a string', () => {
13
+ const result = partListUnionToString('hello');
14
+ expect(result).toBe('hello');
15
+ });
16
+
17
+ it('should return a concatenated string if the input is an array of strings', () => {
18
+ const result = partListUnionToString(['hello', ' ', 'world']);
19
+ expect(result).toBe('hello world');
20
+ });
21
+
22
+ it('should handle videoMetadata', () => {
23
+ const part: Part = { videoMetadata: {} };
24
+ const result = partListUnionToString(part);
25
+ expect(result).toBe('[Video Metadata]');
26
+ });
27
+
28
+ it('should handle thought', () => {
29
+ const part: Part = { thought: true };
30
+ const result = partListUnionToString(part);
31
+ expect(result).toBe('[Thought: true]');
32
+ });
33
+
34
+ it('should handle codeExecutionResult', () => {
35
+ const part: Part = { codeExecutionResult: {} };
36
+ const result = partListUnionToString(part);
37
+ expect(result).toBe('[Code Execution Result]');
38
+ });
39
+
40
+ it('should handle executableCode', () => {
41
+ const part: Part = { executableCode: {} };
42
+ const result = partListUnionToString(part);
43
+ expect(result).toBe('[Executable Code]');
44
+ });
45
+
46
+ it('should handle fileData', () => {
47
+ const part: Part = {
48
+ fileData: { mimeType: 'text/plain', fileUri: 'file.txt' },
49
+ };
50
+ const result = partListUnionToString(part);
51
+ expect(result).toBe('[File Data]');
52
+ });
53
+
54
+ it('should handle functionCall', () => {
55
+ const part: Part = { functionCall: { name: 'myFunction' } };
56
+ const result = partListUnionToString(part);
57
+ expect(result).toBe('[Function Call: myFunction]');
58
+ });
59
+
60
+ it('should handle functionResponse', () => {
61
+ const part: Part = {
62
+ functionResponse: { name: 'myFunction', response: {} },
63
+ };
64
+ const result = partListUnionToString(part);
65
+ expect(result).toBe('[Function Response: myFunction]');
66
+ });
67
+
68
+ it('should handle inlineData', () => {
69
+ const part: Part = { inlineData: { mimeType: 'image/png', data: '...' } };
70
+ const result = partListUnionToString(part);
71
+ expect(result).toBe('<image/png>');
72
+ });
73
+
74
+ it('should handle text', () => {
75
+ const part: Part = { text: 'hello' };
76
+ const result = partListUnionToString(part);
77
+ expect(result).toBe('hello');
78
+ });
79
+
80
+ it('should return an empty string for an unknown part type', () => {
81
+ const part: Part = {};
82
+ const result = partListUnionToString(part);
83
+ expect(result).toBe('');
84
+ });
85
+ });
projects/ui/qwen-code/packages/core/src/core/geminiRequest.ts ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { type PartListUnion } from '@google/genai';
8
+ import { partToString } from '../utils/partUtils.js';
9
+
10
+ /**
11
+ * Represents a request to be sent to the Gemini API.
12
+ * For now, it's an alias to PartListUnion as the primary content.
13
+ * This can be expanded later to include other request parameters.
14
+ */
15
+ export type GeminiCodeRequest = PartListUnion;
16
+
17
+ export function partListUnionToString(value: PartListUnion): string {
18
+ return partToString(value, { verbose: true });
19
+ }
projects/ui/qwen-code/packages/core/src/core/logger.test.ts ADDED
@@ -0,0 +1,727 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import {
8
+ describe,
9
+ it,
10
+ expect,
11
+ vi,
12
+ beforeEach,
13
+ afterEach,
14
+ afterAll,
15
+ } from 'vitest';
16
+ import {
17
+ Logger,
18
+ MessageSenderType,
19
+ LogEntry,
20
+ encodeTagName,
21
+ decodeTagName,
22
+ } from './logger.js';
23
+ import { promises as fs, existsSync } from 'node:fs';
24
+ import path from 'node:path';
25
+ import { Content } from '@google/genai';
26
+
27
+ import crypto from 'node:crypto';
28
+ import os from 'node:os';
29
+
30
+ const GEMINI_DIR_NAME = '.qwen';
31
+ const TMP_DIR_NAME = 'tmp';
32
+ const LOG_FILE_NAME = 'logs.json';
33
+ const CHECKPOINT_FILE_NAME = 'checkpoint.json';
34
+
35
+ const projectDir = process.cwd();
36
+ const hash = crypto.createHash('sha256').update(projectDir).digest('hex');
37
+ const TEST_GEMINI_DIR = path.join(
38
+ os.homedir(),
39
+ GEMINI_DIR_NAME,
40
+ TMP_DIR_NAME,
41
+ hash,
42
+ );
43
+
44
+ const TEST_LOG_FILE_PATH = path.join(TEST_GEMINI_DIR, LOG_FILE_NAME);
45
+ const TEST_CHECKPOINT_FILE_PATH = path.join(
46
+ TEST_GEMINI_DIR,
47
+ CHECKPOINT_FILE_NAME,
48
+ );
49
+
50
+ async function cleanupLogAndCheckpointFiles() {
51
+ try {
52
+ await fs.rm(TEST_GEMINI_DIR, { recursive: true, force: true });
53
+ } catch (_error) {
54
+ // Ignore errors, as the directory may not exist, which is fine.
55
+ }
56
+ }
57
+
58
+ async function readLogFile(): Promise<LogEntry[]> {
59
+ try {
60
+ const content = await fs.readFile(TEST_LOG_FILE_PATH, 'utf-8');
61
+ return JSON.parse(content) as LogEntry[];
62
+ } catch (error) {
63
+ if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
64
+ return [];
65
+ }
66
+ throw error;
67
+ }
68
+ }
69
+
70
+ vi.mock('../utils/session.js', () => ({
71
+ sessionId: 'test-session-id',
72
+ }));
73
+
74
+ describe('Logger', () => {
75
+ let logger: Logger;
76
+ const testSessionId = 'test-session-id';
77
+
78
+ beforeEach(async () => {
79
+ vi.resetAllMocks();
80
+ vi.useFakeTimers();
81
+ vi.setSystemTime(new Date('2025-01-01T12:00:00.000Z'));
82
+ // Clean up before the test
83
+ await cleanupLogAndCheckpointFiles();
84
+ // Ensure the directory exists for the test
85
+ await fs.mkdir(TEST_GEMINI_DIR, { recursive: true });
86
+ logger = new Logger(testSessionId);
87
+ await logger.initialize();
88
+ });
89
+
90
+ afterEach(async () => {
91
+ if (logger) {
92
+ logger.close();
93
+ }
94
+ // Clean up after the test
95
+ await cleanupLogAndCheckpointFiles();
96
+ vi.useRealTimers();
97
+ vi.restoreAllMocks();
98
+ });
99
+
100
+ afterAll(async () => {
101
+ // Final cleanup
102
+ await cleanupLogAndCheckpointFiles();
103
+ });
104
+
105
+ describe('initialize', () => {
106
+ it('should create .gemini directory and an empty log file if none exist', async () => {
107
+ const dirExists = await fs
108
+ .access(TEST_GEMINI_DIR)
109
+ .then(() => true)
110
+ .catch(() => false);
111
+ expect(dirExists).toBe(true);
112
+
113
+ const fileExists = await fs
114
+ .access(TEST_LOG_FILE_PATH)
115
+ .then(() => true)
116
+ .catch(() => false);
117
+ expect(fileExists).toBe(true);
118
+
119
+ const logContent = await readLogFile();
120
+ expect(logContent).toEqual([]);
121
+ });
122
+
123
+ it('should load existing logs and set correct messageId for the current session', async () => {
124
+ const currentSessionId = 'session-123';
125
+ const anotherSessionId = 'session-456';
126
+ const existingLogs: LogEntry[] = [
127
+ {
128
+ sessionId: currentSessionId,
129
+ messageId: 0,
130
+ timestamp: new Date('2025-01-01T10:00:05.000Z').toISOString(),
131
+ type: MessageSenderType.USER,
132
+ message: 'Msg1',
133
+ },
134
+ {
135
+ sessionId: anotherSessionId,
136
+ messageId: 5,
137
+ timestamp: new Date('2025-01-01T09:00:00.000Z').toISOString(),
138
+ type: MessageSenderType.USER,
139
+ message: 'OldMsg',
140
+ },
141
+ {
142
+ sessionId: currentSessionId,
143
+ messageId: 1,
144
+ timestamp: new Date('2025-01-01T10:00:10.000Z').toISOString(),
145
+ type: MessageSenderType.USER,
146
+ message: 'Msg2',
147
+ },
148
+ ];
149
+ await fs.writeFile(
150
+ TEST_LOG_FILE_PATH,
151
+ JSON.stringify(existingLogs, null, 2),
152
+ );
153
+ const newLogger = new Logger(currentSessionId);
154
+ await newLogger.initialize();
155
+ expect(newLogger['messageId']).toBe(2);
156
+ expect(newLogger['logs']).toEqual(existingLogs);
157
+ newLogger.close();
158
+ });
159
+
160
+ it('should set messageId to 0 for a new session if log file exists but has no logs for current session', async () => {
161
+ const existingLogs: LogEntry[] = [
162
+ {
163
+ sessionId: 'some-other-session',
164
+ messageId: 5,
165
+ timestamp: new Date().toISOString(),
166
+ type: MessageSenderType.USER,
167
+ message: 'OldMsg',
168
+ },
169
+ ];
170
+ await fs.writeFile(
171
+ TEST_LOG_FILE_PATH,
172
+ JSON.stringify(existingLogs, null, 2),
173
+ );
174
+ const newLogger = new Logger('a-new-session');
175
+ await newLogger.initialize();
176
+ expect(newLogger['messageId']).toBe(0);
177
+ newLogger.close();
178
+ });
179
+
180
+ it('should be idempotent', async () => {
181
+ await logger.logMessage(MessageSenderType.USER, 'test message');
182
+ const initialMessageId = logger['messageId'];
183
+ const initialLogCount = logger['logs'].length;
184
+
185
+ await logger.initialize(); // Second call should not change state
186
+
187
+ expect(logger['messageId']).toBe(initialMessageId);
188
+ expect(logger['logs'].length).toBe(initialLogCount);
189
+ const logsFromFile = await readLogFile();
190
+ expect(logsFromFile.length).toBe(1);
191
+ });
192
+
193
+ it('should handle invalid JSON in log file by backing it up and starting fresh', async () => {
194
+ await fs.writeFile(TEST_LOG_FILE_PATH, 'invalid json');
195
+ const consoleDebugSpy = vi
196
+ .spyOn(console, 'debug')
197
+ .mockImplementation(() => {});
198
+
199
+ const newLogger = new Logger(testSessionId);
200
+ await newLogger.initialize();
201
+
202
+ expect(consoleDebugSpy).toHaveBeenCalledWith(
203
+ expect.stringContaining('Invalid JSON in log file'),
204
+ expect.any(SyntaxError),
205
+ );
206
+ const logContent = await readLogFile();
207
+ expect(logContent).toEqual([]);
208
+ const dirContents = await fs.readdir(TEST_GEMINI_DIR);
209
+ expect(
210
+ dirContents.some(
211
+ (f) =>
212
+ f.startsWith(LOG_FILE_NAME + '.invalid_json') && f.endsWith('.bak'),
213
+ ),
214
+ ).toBe(true);
215
+ newLogger.close();
216
+ });
217
+
218
+ it('should handle non-array JSON in log file by backing it up and starting fresh', async () => {
219
+ await fs.writeFile(
220
+ TEST_LOG_FILE_PATH,
221
+ JSON.stringify({ not: 'an array' }),
222
+ );
223
+ const consoleDebugSpy = vi
224
+ .spyOn(console, 'debug')
225
+ .mockImplementation(() => {});
226
+
227
+ const newLogger = new Logger(testSessionId);
228
+ await newLogger.initialize();
229
+
230
+ expect(consoleDebugSpy).toHaveBeenCalledWith(
231
+ `Log file at ${TEST_LOG_FILE_PATH} is not a valid JSON array. Starting with empty logs.`,
232
+ );
233
+ const logContent = await readLogFile();
234
+ expect(logContent).toEqual([]);
235
+ const dirContents = await fs.readdir(TEST_GEMINI_DIR);
236
+ expect(
237
+ dirContents.some(
238
+ (f) =>
239
+ f.startsWith(LOG_FILE_NAME + '.malformed_array') &&
240
+ f.endsWith('.bak'),
241
+ ),
242
+ ).toBe(true);
243
+ newLogger.close();
244
+ });
245
+ });
246
+
247
+ describe('logMessage', () => {
248
+ it('should append a message to the log file and update in-memory logs', async () => {
249
+ await logger.logMessage(MessageSenderType.USER, 'Hello, world!');
250
+ const logsFromFile = await readLogFile();
251
+ expect(logsFromFile.length).toBe(1);
252
+ expect(logsFromFile[0]).toMatchObject({
253
+ sessionId: testSessionId,
254
+ messageId: 0,
255
+ type: MessageSenderType.USER,
256
+ message: 'Hello, world!',
257
+ timestamp: new Date('2025-01-01T12:00:00.000Z').toISOString(),
258
+ });
259
+ expect(logger['logs'].length).toBe(1);
260
+ expect(logger['logs'][0]).toEqual(logsFromFile[0]);
261
+ expect(logger['messageId']).toBe(1);
262
+ });
263
+
264
+ it('should correctly increment messageId for subsequent messages in the same session', async () => {
265
+ await logger.logMessage(MessageSenderType.USER, 'First');
266
+ vi.advanceTimersByTime(1000);
267
+ await logger.logMessage(MessageSenderType.USER, 'Second');
268
+ const logs = await readLogFile();
269
+ expect(logs.length).toBe(2);
270
+ expect(logs[0].messageId).toBe(0);
271
+ expect(logs[1].messageId).toBe(1);
272
+ expect(logs[1].timestamp).not.toBe(logs[0].timestamp);
273
+ expect(logger['messageId']).toBe(2);
274
+ });
275
+
276
+ it('should handle logger not initialized', async () => {
277
+ const uninitializedLogger = new Logger(testSessionId);
278
+ uninitializedLogger.close(); // Ensure it's treated as uninitialized
279
+ const consoleDebugSpy = vi
280
+ .spyOn(console, 'debug')
281
+ .mockImplementation(() => {});
282
+ await uninitializedLogger.logMessage(MessageSenderType.USER, 'test');
283
+ expect(consoleDebugSpy).toHaveBeenCalledWith(
284
+ 'Logger not initialized or session ID missing. Cannot log message.',
285
+ );
286
+ expect((await readLogFile()).length).toBe(0);
287
+ uninitializedLogger.close();
288
+ });
289
+
290
+ it('should simulate concurrent writes from different logger instances to the same file', async () => {
291
+ const concurrentSessionId = 'concurrent-session';
292
+ const logger1 = new Logger(concurrentSessionId);
293
+ await logger1.initialize();
294
+
295
+ const logger2 = new Logger(concurrentSessionId);
296
+ await logger2.initialize();
297
+ expect(logger2['sessionId']).toEqual(logger1['sessionId']);
298
+
299
+ await logger1.logMessage(MessageSenderType.USER, 'L1M1');
300
+ vi.advanceTimersByTime(10);
301
+ await logger2.logMessage(MessageSenderType.USER, 'L2M1');
302
+ vi.advanceTimersByTime(10);
303
+ await logger1.logMessage(MessageSenderType.USER, 'L1M2');
304
+ vi.advanceTimersByTime(10);
305
+ await logger2.logMessage(MessageSenderType.USER, 'L2M2');
306
+
307
+ const logsFromFile = await readLogFile();
308
+ expect(logsFromFile.length).toBe(4);
309
+ const messageIdsInFile = logsFromFile
310
+ .map((log) => log.messageId)
311
+ .sort((a, b) => a - b);
312
+ expect(messageIdsInFile).toEqual([0, 1, 2, 3]);
313
+
314
+ const messagesInFile = logsFromFile
315
+ .sort((a, b) => a.messageId - b.messageId)
316
+ .map((l) => l.message);
317
+ expect(messagesInFile).toEqual(['L1M1', 'L2M1', 'L1M2', 'L2M2']);
318
+
319
+ // Check internal state (next messageId each logger would use for that session)
320
+ expect(logger1['messageId']).toBe(3);
321
+ expect(logger2['messageId']).toBe(4);
322
+
323
+ logger1.close();
324
+ logger2.close();
325
+ });
326
+
327
+ it('should not throw, not increment messageId, and log error if writing to file fails', async () => {
328
+ vi.spyOn(fs, 'writeFile').mockRejectedValueOnce(new Error('Disk full'));
329
+ const consoleDebugSpy = vi
330
+ .spyOn(console, 'debug')
331
+ .mockImplementation(() => {});
332
+ const initialMessageId = logger['messageId'];
333
+ const initialLogCount = logger['logs'].length;
334
+
335
+ await logger.logMessage(MessageSenderType.USER, 'test fail write');
336
+
337
+ expect(consoleDebugSpy).toHaveBeenCalledWith(
338
+ 'Error writing to log file:',
339
+ expect.any(Error),
340
+ );
341
+ expect(logger['messageId']).toBe(initialMessageId); // Not incremented
342
+ expect(logger['logs'].length).toBe(initialLogCount); // Log not added to in-memory cache
343
+ });
344
+ });
345
+
346
+ describe('getPreviousUserMessages', () => {
347
+ it('should retrieve all user messages from logs, sorted newest first', async () => {
348
+ const loggerSort = new Logger('session-1');
349
+ await loggerSort.initialize();
350
+ await loggerSort.logMessage(MessageSenderType.USER, 'S1M0_ts100000');
351
+ vi.advanceTimersByTime(1000);
352
+ await loggerSort.logMessage(MessageSenderType.USER, 'S1M1_ts101000');
353
+ vi.advanceTimersByTime(1000);
354
+ // Switch to a different session to log
355
+ const loggerSort2 = new Logger('session-2');
356
+ await loggerSort2.initialize();
357
+ await loggerSort2.logMessage(MessageSenderType.USER, 'S2M0_ts102000');
358
+ vi.advanceTimersByTime(1000);
359
+ await loggerSort2.logMessage(
360
+ 'model' as MessageSenderType,
361
+ 'S2_Model_ts103000',
362
+ );
363
+ vi.advanceTimersByTime(1000);
364
+ await loggerSort2.logMessage(MessageSenderType.USER, 'S2M1_ts104000');
365
+ loggerSort.close();
366
+ loggerSort2.close();
367
+
368
+ const finalLogger = new Logger('final-session');
369
+ await finalLogger.initialize();
370
+
371
+ const messages = await finalLogger.getPreviousUserMessages();
372
+ expect(messages).toEqual([
373
+ 'S2M1_ts104000',
374
+ 'S2M0_ts102000',
375
+ 'S1M1_ts101000',
376
+ 'S1M0_ts100000',
377
+ ]);
378
+ finalLogger.close();
379
+ });
380
+
381
+ it('should return empty array if no user messages exist', async () => {
382
+ await logger.logMessage('system' as MessageSenderType, 'System boot');
383
+ const messages = await logger.getPreviousUserMessages();
384
+ expect(messages).toEqual([]);
385
+ });
386
+
387
+ it('should return empty array if logger not initialized', async () => {
388
+ const uninitializedLogger = new Logger(testSessionId);
389
+ uninitializedLogger.close();
390
+ const messages = await uninitializedLogger.getPreviousUserMessages();
391
+ expect(messages).toEqual([]);
392
+ uninitializedLogger.close();
393
+ });
394
+ });
395
+
396
+ describe('saveCheckpoint', () => {
397
+ const conversation: Content[] = [
398
+ { role: 'user', parts: [{ text: 'Hello' }] },
399
+ { role: 'model', parts: [{ text: 'Hi there' }] },
400
+ ];
401
+
402
+ it.each([
403
+ {
404
+ tag: 'test-tag',
405
+ encodedTag: 'test-tag',
406
+ },
407
+ {
408
+ tag: '你好世界',
409
+ encodedTag: '%E4%BD%A0%E5%A5%BD%E4%B8%96%E7%95%8C',
410
+ },
411
+ {
412
+ tag: 'japanese-ひらがなひらがな形声',
413
+ encodedTag:
414
+ 'japanese-%E3%81%B2%E3%82%89%E3%81%8C%E3%81%AA%E3%81%B2%E3%82%89%E3%81%8C%E3%81%AA%E5%BD%A2%E5%A3%B0',
415
+ },
416
+ {
417
+ tag: '../../secret',
418
+ encodedTag: '..%2F..%2Fsecret',
419
+ },
420
+ ])('should save a checkpoint', async ({ tag, encodedTag }) => {
421
+ await logger.saveCheckpoint(conversation, tag);
422
+ const taggedFilePath = path.join(
423
+ TEST_GEMINI_DIR,
424
+ `checkpoint-${encodedTag}.json`,
425
+ );
426
+ const fileContent = await fs.readFile(taggedFilePath, 'utf-8');
427
+ expect(JSON.parse(fileContent)).toEqual(conversation);
428
+ });
429
+
430
+ it('should not throw if logger is not initialized', async () => {
431
+ const uninitializedLogger = new Logger(testSessionId);
432
+ uninitializedLogger.close();
433
+ const consoleErrorSpy = vi
434
+ .spyOn(console, 'error')
435
+ .mockImplementation(() => {});
436
+
437
+ await expect(
438
+ uninitializedLogger.saveCheckpoint(conversation, 'tag'),
439
+ ).resolves.not.toThrow();
440
+ expect(consoleErrorSpy).toHaveBeenCalledWith(
441
+ 'Logger not initialized or checkpoint file path not set. Cannot save a checkpoint.',
442
+ );
443
+ });
444
+ });
445
+
446
+ describe('loadCheckpoint', () => {
447
+ const conversation: Content[] = [
448
+ { role: 'user', parts: [{ text: 'Hello' }] },
449
+ { role: 'model', parts: [{ text: 'Hi there' }] },
450
+ ];
451
+
452
+ beforeEach(async () => {
453
+ await fs.writeFile(
454
+ TEST_CHECKPOINT_FILE_PATH,
455
+ JSON.stringify(conversation, null, 2),
456
+ );
457
+ });
458
+
459
+ it.each([
460
+ {
461
+ tag: 'test-tag',
462
+ encodedTag: 'test-tag',
463
+ },
464
+ {
465
+ tag: '你好世界',
466
+ encodedTag: '%E4%BD%A0%E5%A5%BD%E4%B8%96%E7%95%8C',
467
+ },
468
+ {
469
+ tag: 'japanese-ひらがなひらがな形声',
470
+ encodedTag:
471
+ 'japanese-%E3%81%B2%E3%82%89%E3%81%8C%E3%81%AA%E3%81%B2%E3%82%89%E3%81%8C%E3%81%AA%E5%BD%A2%E5%A3%B0',
472
+ },
473
+ {
474
+ tag: '../../secret',
475
+ encodedTag: '..%2F..%2Fsecret',
476
+ },
477
+ ])('should load from a checkpoint', async ({ tag, encodedTag }) => {
478
+ const taggedConversation = [
479
+ ...conversation,
480
+ { role: 'user', parts: [{ text: 'hello' }] },
481
+ ];
482
+ const taggedFilePath = path.join(
483
+ TEST_GEMINI_DIR,
484
+ `checkpoint-${encodedTag}.json`,
485
+ );
486
+ await fs.writeFile(
487
+ taggedFilePath,
488
+ JSON.stringify(taggedConversation, null, 2),
489
+ );
490
+
491
+ const loaded = await logger.loadCheckpoint(tag);
492
+ expect(loaded).toEqual(taggedConversation);
493
+ expect(encodeTagName(tag)).toBe(encodedTag);
494
+ expect(decodeTagName(encodedTag)).toBe(tag);
495
+ });
496
+
497
+ it('should return an empty array if a tagged checkpoint file does not exist', async () => {
498
+ const loaded = await logger.loadCheckpoint('nonexistent-tag');
499
+ expect(loaded).toEqual([]);
500
+ });
501
+
502
+ it('should return an empty array if the checkpoint file does not exist', async () => {
503
+ await fs.unlink(TEST_CHECKPOINT_FILE_PATH); // Ensure it's gone
504
+ const loaded = await logger.loadCheckpoint('missing');
505
+ expect(loaded).toEqual([]);
506
+ });
507
+
508
+ it('should return an empty array if the file contains invalid JSON', async () => {
509
+ const tag = 'invalid-json-tag';
510
+ const encodedTag = 'invalid-json-tag';
511
+ const taggedFilePath = path.join(
512
+ TEST_GEMINI_DIR,
513
+ `checkpoint-${encodedTag}.json`,
514
+ );
515
+ await fs.writeFile(taggedFilePath, 'invalid json');
516
+ const consoleErrorSpy = vi
517
+ .spyOn(console, 'error')
518
+ .mockImplementation(() => {});
519
+ const loadedCheckpoint = await logger.loadCheckpoint(tag);
520
+ expect(loadedCheckpoint).toEqual([]);
521
+ expect(consoleErrorSpy).toHaveBeenCalledWith(
522
+ expect.stringContaining('Failed to read or parse checkpoint file'),
523
+ expect.any(Error),
524
+ );
525
+ });
526
+
527
+ it('should return an empty array if logger is not initialized', async () => {
528
+ const uninitializedLogger = new Logger(testSessionId);
529
+ uninitializedLogger.close();
530
+ const consoleErrorSpy = vi
531
+ .spyOn(console, 'error')
532
+ .mockImplementation(() => {});
533
+ const loadedCheckpoint = await uninitializedLogger.loadCheckpoint('tag');
534
+ expect(loadedCheckpoint).toEqual([]);
535
+ expect(consoleErrorSpy).toHaveBeenCalledWith(
536
+ 'Logger not initialized or checkpoint file path not set. Cannot load checkpoint.',
537
+ );
538
+ });
539
+ });
540
+
541
+ describe('deleteCheckpoint', () => {
542
+ const conversation: Content[] = [
543
+ { role: 'user', parts: [{ text: 'Content to be deleted' }] },
544
+ ];
545
+ const tag = 'delete-me';
546
+ const encodedTag = 'delete-me';
547
+ let taggedFilePath: string;
548
+
549
+ beforeEach(async () => {
550
+ taggedFilePath = path.join(
551
+ TEST_GEMINI_DIR,
552
+ `checkpoint-${encodedTag}.json`,
553
+ );
554
+ // Create a file to be deleted
555
+ await fs.writeFile(taggedFilePath, JSON.stringify(conversation));
556
+ });
557
+
558
+ it('should delete the specified checkpoint file and return true', async () => {
559
+ const result = await logger.deleteCheckpoint(tag);
560
+ expect(result).toBe(true);
561
+
562
+ // Verify the file is actually gone
563
+ await expect(fs.access(taggedFilePath)).rejects.toThrow(/ENOENT/);
564
+ });
565
+
566
+ it('should delete both new and old checkpoint files if they exist', async () => {
567
+ const oldTag = 'delete-me(old)';
568
+ const oldStylePath = path.join(
569
+ TEST_GEMINI_DIR,
570
+ `checkpoint-${oldTag}.json`,
571
+ );
572
+ const newStylePath = logger['_checkpointPath'](oldTag);
573
+
574
+ // Create both files
575
+ await fs.writeFile(oldStylePath, '{}');
576
+ await fs.writeFile(newStylePath, '{}');
577
+
578
+ // Verify both files exist before deletion
579
+ expect(existsSync(oldStylePath)).toBe(true);
580
+ expect(existsSync(newStylePath)).toBe(true);
581
+
582
+ const result = await logger.deleteCheckpoint(oldTag);
583
+ expect(result).toBe(true);
584
+
585
+ // Verify both are gone
586
+ expect(existsSync(oldStylePath)).toBe(false);
587
+ expect(existsSync(newStylePath)).toBe(false);
588
+ });
589
+
590
+ it('should return false if the checkpoint file does not exist', async () => {
591
+ const result = await logger.deleteCheckpoint('non-existent-tag');
592
+ expect(result).toBe(false);
593
+ });
594
+
595
+ it('should re-throw an error if file deletion fails for reasons other than not existing', async () => {
596
+ // Simulate a different error (e.g., permission denied)
597
+ vi.spyOn(fs, 'unlink').mockRejectedValueOnce(
598
+ Object.assign(new Error('EACCES: permission denied'), {
599
+ code: 'EACCES',
600
+ }),
601
+ );
602
+ const consoleErrorSpy = vi
603
+ .spyOn(console, 'error')
604
+ .mockImplementation(() => {});
605
+
606
+ await expect(logger.deleteCheckpoint(tag)).rejects.toThrow(
607
+ 'EACCES: permission denied',
608
+ );
609
+ expect(consoleErrorSpy).toHaveBeenCalledWith(
610
+ `Failed to delete checkpoint file ${taggedFilePath}:`,
611
+ expect.any(Error),
612
+ );
613
+ });
614
+
615
+ it('should return false if logger is not initialized', async () => {
616
+ const uninitializedLogger = new Logger(testSessionId);
617
+ uninitializedLogger.close();
618
+ const consoleErrorSpy = vi
619
+ .spyOn(console, 'error')
620
+ .mockImplementation(() => {});
621
+
622
+ const result = await uninitializedLogger.deleteCheckpoint(tag);
623
+ expect(result).toBe(false);
624
+ expect(consoleErrorSpy).toHaveBeenCalledWith(
625
+ 'Logger not initialized or checkpoint file path not set. Cannot delete checkpoint.',
626
+ );
627
+ });
628
+ });
629
+
630
+ describe('checkpointExists', () => {
631
+ const tag = 'exists-test';
632
+ const encodedTag = 'exists-test';
633
+ let taggedFilePath: string;
634
+
635
+ beforeEach(() => {
636
+ taggedFilePath = path.join(
637
+ TEST_GEMINI_DIR,
638
+ `checkpoint-${encodedTag}.json`,
639
+ );
640
+ });
641
+
642
+ it('should return true if the checkpoint file exists', async () => {
643
+ await fs.writeFile(taggedFilePath, '{}');
644
+ const exists = await logger.checkpointExists(tag);
645
+ expect(exists).toBe(true);
646
+ });
647
+
648
+ it('should return false if the checkpoint file does not exist', async () => {
649
+ const exists = await logger.checkpointExists('non-existent-tag');
650
+ expect(exists).toBe(false);
651
+ });
652
+
653
+ it('should throw an error if logger is not initialized', async () => {
654
+ const uninitializedLogger = new Logger(testSessionId);
655
+ uninitializedLogger.close();
656
+
657
+ await expect(uninitializedLogger.checkpointExists(tag)).rejects.toThrow(
658
+ 'Logger not initialized. Cannot check for checkpoint existence.',
659
+ );
660
+ });
661
+
662
+ it('should re-throw an error if fs.access fails for reasons other than not existing', async () => {
663
+ vi.spyOn(fs, 'access').mockRejectedValueOnce(
664
+ Object.assign(new Error('EACCES: permission denied'), {
665
+ code: 'EACCES',
666
+ }),
667
+ );
668
+ const consoleErrorSpy = vi
669
+ .spyOn(console, 'error')
670
+ .mockImplementation(() => {});
671
+
672
+ await expect(logger.checkpointExists(tag)).rejects.toThrow(
673
+ 'EACCES: permission denied',
674
+ );
675
+ expect(consoleErrorSpy).toHaveBeenCalledWith(
676
+ `Failed to check checkpoint existence for path for tag "${tag}":`,
677
+ expect.any(Error),
678
+ );
679
+ });
680
+ });
681
+
682
+ describe('Backward compatibility', () => {
683
+ const conversation: Content[] = [
684
+ { role: 'user', parts: [{ text: 'Hello' }] },
685
+ { role: 'model', parts: [{ text: 'Hi there' }] },
686
+ ];
687
+ it('should load from a checkpoint with a raw special character tag', async () => {
688
+ const taggedConversation = [
689
+ ...conversation,
690
+ { role: 'user', parts: [{ text: 'hello' }] },
691
+ ];
692
+ const tag = 'special(char)';
693
+ const taggedFilePath = path.join(
694
+ TEST_GEMINI_DIR,
695
+ `checkpoint-${tag}.json`,
696
+ );
697
+ await fs.writeFile(
698
+ taggedFilePath,
699
+ JSON.stringify(taggedConversation, null, 2),
700
+ );
701
+
702
+ const loaded = await logger.loadCheckpoint(tag);
703
+ expect(loaded).toEqual(taggedConversation);
704
+ });
705
+ });
706
+
707
+ describe('close', () => {
708
+ it('should reset logger state', async () => {
709
+ await logger.logMessage(MessageSenderType.USER, 'A message');
710
+ logger.close();
711
+ const consoleDebugSpy = vi
712
+ .spyOn(console, 'debug')
713
+ .mockImplementation(() => {});
714
+ await logger.logMessage(MessageSenderType.USER, 'Another message');
715
+ expect(consoleDebugSpy).toHaveBeenCalledWith(
716
+ 'Logger not initialized or session ID missing. Cannot log message.',
717
+ );
718
+ const messages = await logger.getPreviousUserMessages();
719
+ expect(messages).toEqual([]);
720
+ expect(logger['initialized']).toBe(false);
721
+ expect(logger['logFilePath']).toBeUndefined();
722
+ expect(logger['logs']).toEqual([]);
723
+ expect(logger['sessionId']).toBeUndefined();
724
+ expect(logger['messageId']).toBe(0);
725
+ });
726
+ });
727
+ });
projects/ui/qwen-code/packages/core/src/core/logger.ts ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import path from 'node:path';
8
+ import { promises as fs } from 'node:fs';
9
+ import { Content } from '@google/genai';
10
+ import { getProjectTempDir } from '../utils/paths.js';
11
+
12
+ const LOG_FILE_NAME = 'logs.json';
13
+
14
+ export enum MessageSenderType {
15
+ USER = 'user',
16
+ }
17
+
18
+ export interface LogEntry {
19
+ sessionId: string;
20
+ messageId: number;
21
+ timestamp: string;
22
+ type: MessageSenderType;
23
+ message: string;
24
+ }
25
+
26
+ // This regex matches any character that is NOT a letter (a-z, A-Z),
27
+ // a number (0-9), a hyphen (-), an underscore (_), or a dot (.).
28
+
29
+ /**
30
+ * Encodes a string to be safe for use as a filename.
31
+ *
32
+ * It replaces any characters that are not alphanumeric or one of `_`, `-`, `.`
33
+ * with a URL-like percent-encoding (`%` followed by the 2-digit hex code).
34
+ *
35
+ * @param str The input string to encode.
36
+ * @returns The encoded, filename-safe string.
37
+ */
38
+ export function encodeTagName(str: string): string {
39
+ return encodeURIComponent(str);
40
+ }
41
+
42
+ /**
43
+ * Decodes a string that was encoded with the `encode` function.
44
+ *
45
+ * It finds any percent-encoded characters and converts them back to their
46
+ * original representation.
47
+ *
48
+ * @param str The encoded string to decode.
49
+ * @returns The decoded, original string.
50
+ */
51
+ export function decodeTagName(str: string): string {
52
+ try {
53
+ return decodeURIComponent(str);
54
+ } catch (_e) {
55
+ // Fallback for old, potentially malformed encoding
56
+ return str.replace(/%([0-9A-F]{2})/g, (_, hex) =>
57
+ String.fromCharCode(parseInt(hex, 16)),
58
+ );
59
+ }
60
+ }
61
+
62
+ export class Logger {
63
+ private qwenDir: string | undefined;
64
+ private logFilePath: string | undefined;
65
+ private sessionId: string | undefined;
66
+ private messageId = 0; // Instance-specific counter for the next messageId
67
+ private initialized = false;
68
+ private logs: LogEntry[] = []; // In-memory cache, ideally reflects the last known state of the file
69
+
70
+ constructor(sessionId: string) {
71
+ this.sessionId = sessionId;
72
+ }
73
+
74
+ private async _readLogFile(): Promise<LogEntry[]> {
75
+ if (!this.logFilePath) {
76
+ throw new Error('Log file path not set during read attempt.');
77
+ }
78
+ try {
79
+ const fileContent = await fs.readFile(this.logFilePath, 'utf-8');
80
+ const parsedLogs = JSON.parse(fileContent);
81
+ if (!Array.isArray(parsedLogs)) {
82
+ console.debug(
83
+ `Log file at ${this.logFilePath} is not a valid JSON array. Starting with empty logs.`,
84
+ );
85
+ await this._backupCorruptedLogFile('malformed_array');
86
+ return [];
87
+ }
88
+ return parsedLogs.filter(
89
+ (entry) =>
90
+ typeof entry.sessionId === 'string' &&
91
+ typeof entry.messageId === 'number' &&
92
+ typeof entry.timestamp === 'string' &&
93
+ typeof entry.type === 'string' &&
94
+ typeof entry.message === 'string',
95
+ ) as LogEntry[];
96
+ } catch (error) {
97
+ const nodeError = error as NodeJS.ErrnoException;
98
+ if (nodeError.code === 'ENOENT') {
99
+ return [];
100
+ }
101
+ if (error instanceof SyntaxError) {
102
+ console.debug(
103
+ `Invalid JSON in log file ${this.logFilePath}. Backing up and starting fresh.`,
104
+ error,
105
+ );
106
+ await this._backupCorruptedLogFile('invalid_json');
107
+ return [];
108
+ }
109
+ console.debug(
110
+ `Failed to read or parse log file ${this.logFilePath}:`,
111
+ error,
112
+ );
113
+ throw error;
114
+ }
115
+ }
116
+
117
+ private async _backupCorruptedLogFile(reason: string): Promise<void> {
118
+ if (!this.logFilePath) return;
119
+ const backupPath = `${this.logFilePath}.${reason}.${Date.now()}.bak`;
120
+ try {
121
+ await fs.rename(this.logFilePath, backupPath);
122
+ console.debug(`Backed up corrupted log file to ${backupPath}`);
123
+ } catch (_backupError) {
124
+ // If rename fails (e.g. file doesn't exist), no need to log an error here as the primary error (e.g. invalid JSON) is already handled.
125
+ }
126
+ }
127
+
128
+ async initialize(): Promise<void> {
129
+ if (this.initialized) {
130
+ return;
131
+ }
132
+
133
+ this.qwenDir = getProjectTempDir(process.cwd());
134
+ this.logFilePath = path.join(this.qwenDir, LOG_FILE_NAME);
135
+
136
+ try {
137
+ await fs.mkdir(this.qwenDir, { recursive: true });
138
+ let fileExisted = true;
139
+ try {
140
+ await fs.access(this.logFilePath);
141
+ } catch (_e) {
142
+ fileExisted = false;
143
+ }
144
+ this.logs = await this._readLogFile();
145
+ if (!fileExisted && this.logs.length === 0) {
146
+ await fs.writeFile(this.logFilePath, '[]', 'utf-8');
147
+ }
148
+ const sessionLogs = this.logs.filter(
149
+ (entry) => entry.sessionId === this.sessionId,
150
+ );
151
+ this.messageId =
152
+ sessionLogs.length > 0
153
+ ? Math.max(...sessionLogs.map((entry) => entry.messageId)) + 1
154
+ : 0;
155
+ this.initialized = true;
156
+ } catch (err) {
157
+ console.error('Failed to initialize logger:', err);
158
+ this.initialized = false;
159
+ }
160
+ }
161
+
162
+ private async _updateLogFile(
163
+ entryToAppend: LogEntry,
164
+ ): Promise<LogEntry | null> {
165
+ if (!this.logFilePath) {
166
+ console.debug('Log file path not set. Cannot persist log entry.');
167
+ throw new Error('Log file path not set during update attempt.');
168
+ }
169
+
170
+ let currentLogsOnDisk: LogEntry[];
171
+ try {
172
+ currentLogsOnDisk = await this._readLogFile();
173
+ } catch (readError) {
174
+ console.debug(
175
+ 'Critical error reading log file before append:',
176
+ readError,
177
+ );
178
+ throw readError;
179
+ }
180
+
181
+ // Determine the correct messageId for the new entry based on current disk state for its session
182
+ const sessionLogsOnDisk = currentLogsOnDisk.filter(
183
+ (e) => e.sessionId === entryToAppend.sessionId,
184
+ );
185
+ const nextMessageIdForSession =
186
+ sessionLogsOnDisk.length > 0
187
+ ? Math.max(...sessionLogsOnDisk.map((e) => e.messageId)) + 1
188
+ : 0;
189
+
190
+ // Update the messageId of the entry we are about to append
191
+ entryToAppend.messageId = nextMessageIdForSession;
192
+
193
+ // Check if this entry (same session, same *recalculated* messageId, same content) might already exist
194
+ // This is a stricter check for true duplicates if multiple instances try to log the exact same thing
195
+ // at the exact same calculated messageId slot.
196
+ const entryExists = currentLogsOnDisk.some(
197
+ (e) =>
198
+ e.sessionId === entryToAppend.sessionId &&
199
+ e.messageId === entryToAppend.messageId &&
200
+ e.timestamp === entryToAppend.timestamp && // Timestamps are good for distinguishing
201
+ e.message === entryToAppend.message,
202
+ );
203
+
204
+ if (entryExists) {
205
+ console.debug(
206
+ `Duplicate log entry detected and skipped: session ${entryToAppend.sessionId}, messageId ${entryToAppend.messageId}`,
207
+ );
208
+ this.logs = currentLogsOnDisk; // Ensure in-memory is synced with disk
209
+ return null; // Indicate that no new entry was actually added
210
+ }
211
+
212
+ currentLogsOnDisk.push(entryToAppend);
213
+
214
+ try {
215
+ await fs.writeFile(
216
+ this.logFilePath,
217
+ JSON.stringify(currentLogsOnDisk, null, 2),
218
+ 'utf-8',
219
+ );
220
+ this.logs = currentLogsOnDisk;
221
+ return entryToAppend; // Return the successfully appended entry
222
+ } catch (error) {
223
+ console.debug('Error writing to log file:', error);
224
+ throw error;
225
+ }
226
+ }
227
+
228
+ async getPreviousUserMessages(): Promise<string[]> {
229
+ if (!this.initialized) return [];
230
+ return this.logs
231
+ .filter((entry) => entry.type === MessageSenderType.USER)
232
+ .sort((a, b) => {
233
+ const dateA = new Date(a.timestamp).getTime();
234
+ const dateB = new Date(b.timestamp).getTime();
235
+ return dateB - dateA;
236
+ })
237
+ .map((entry) => entry.message);
238
+ }
239
+
240
+ async logMessage(type: MessageSenderType, message: string): Promise<void> {
241
+ if (!this.initialized || this.sessionId === undefined) {
242
+ console.debug(
243
+ 'Logger not initialized or session ID missing. Cannot log message.',
244
+ );
245
+ return;
246
+ }
247
+
248
+ // The messageId used here is the instance's idea of the next ID.
249
+ // _updateLogFile will verify and potentially recalculate based on the file's actual state.
250
+ const newEntryObject: LogEntry = {
251
+ sessionId: this.sessionId,
252
+ messageId: this.messageId, // This will be recalculated in _updateLogFile
253
+ type,
254
+ message,
255
+ timestamp: new Date().toISOString(),
256
+ };
257
+
258
+ try {
259
+ const writtenEntry = await this._updateLogFile(newEntryObject);
260
+ if (writtenEntry) {
261
+ // If an entry was actually written (not a duplicate skip),
262
+ // then this instance can increment its idea of the next messageId for this session.
263
+ this.messageId = writtenEntry.messageId + 1;
264
+ }
265
+ } catch (_error) {
266
+ // Error already logged by _updateLogFile or _readLogFile
267
+ }
268
+ }
269
+
270
+ private _checkpointPath(tag: string): string {
271
+ if (!tag.length) {
272
+ throw new Error('No checkpoint tag specified.');
273
+ }
274
+ if (!this.qwenDir) {
275
+ throw new Error('Checkpoint file path not set.');
276
+ }
277
+ // Encode the tag to handle all special characters safely.
278
+ const encodedTag = encodeTagName(tag);
279
+ return path.join(this.qwenDir, `checkpoint-${encodedTag}.json`);
280
+ }
281
+
282
+ private async _getCheckpointPath(tag: string): Promise<string> {
283
+ // 1. Check for the new encoded path first.
284
+ const newPath = this._checkpointPath(tag);
285
+ try {
286
+ await fs.access(newPath);
287
+ return newPath; // Found it, use the new path.
288
+ } catch (error) {
289
+ const nodeError = error as NodeJS.ErrnoException;
290
+ if (nodeError.code !== 'ENOENT') {
291
+ throw error; // A real error occurred, rethrow it.
292
+ }
293
+ // It was not found, so we'll check the old path next.
294
+ }
295
+
296
+ // 2. Fallback for backward compatibility: check for the old raw path.
297
+ const oldPath = path.join(this.qwenDir!, `checkpoint-${tag}.json`);
298
+ try {
299
+ await fs.access(oldPath);
300
+ return oldPath; // Found it, use the old path.
301
+ } catch (error) {
302
+ const nodeError = error as NodeJS.ErrnoException;
303
+ if (nodeError.code !== 'ENOENT') {
304
+ throw error; // A real error occurred, rethrow it.
305
+ }
306
+ }
307
+
308
+ // 3. If neither path exists, return the new encoded path as the canonical one.
309
+ return newPath;
310
+ }
311
+
312
+ async saveCheckpoint(conversation: Content[], tag: string): Promise<void> {
313
+ if (!this.initialized) {
314
+ console.error(
315
+ 'Logger not initialized or checkpoint file path not set. Cannot save a checkpoint.',
316
+ );
317
+ return;
318
+ }
319
+ // Always save with the new encoded path.
320
+ const path = this._checkpointPath(tag);
321
+ try {
322
+ await fs.writeFile(path, JSON.stringify(conversation, null, 2), 'utf-8');
323
+ } catch (error) {
324
+ console.error('Error writing to checkpoint file:', error);
325
+ }
326
+ }
327
+
328
+ async loadCheckpoint(tag: string): Promise<Content[]> {
329
+ if (!this.initialized) {
330
+ console.error(
331
+ 'Logger not initialized or checkpoint file path not set. Cannot load checkpoint.',
332
+ );
333
+ return [];
334
+ }
335
+
336
+ const path = await this._getCheckpointPath(tag);
337
+ try {
338
+ const fileContent = await fs.readFile(path, 'utf-8');
339
+ const parsedContent = JSON.parse(fileContent);
340
+ if (!Array.isArray(parsedContent)) {
341
+ console.warn(
342
+ `Checkpoint file at ${path} is not a valid JSON array. Returning empty checkpoint.`,
343
+ );
344
+ return [];
345
+ }
346
+ return parsedContent as Content[];
347
+ } catch (error) {
348
+ const nodeError = error as NodeJS.ErrnoException;
349
+ if (nodeError.code === 'ENOENT') {
350
+ // This is okay, it just means the checkpoint doesn't exist in either format.
351
+ return [];
352
+ }
353
+ console.error(`Failed to read or parse checkpoint file ${path}:`, error);
354
+ return [];
355
+ }
356
+ }
357
+
358
+ async deleteCheckpoint(tag: string): Promise<boolean> {
359
+ if (!this.initialized || !this.qwenDir) {
360
+ console.error(
361
+ 'Logger not initialized or checkpoint file path not set. Cannot delete checkpoint.',
362
+ );
363
+ return false;
364
+ }
365
+
366
+ let deletedSomething = false;
367
+
368
+ // 1. Attempt to delete the new encoded path.
369
+ const newPath = this._checkpointPath(tag);
370
+ try {
371
+ await fs.unlink(newPath);
372
+ deletedSomething = true;
373
+ } catch (error) {
374
+ const nodeError = error as NodeJS.ErrnoException;
375
+ if (nodeError.code !== 'ENOENT') {
376
+ console.error(`Failed to delete checkpoint file ${newPath}:`, error);
377
+ throw error; // Rethrow unexpected errors
378
+ }
379
+ // It's okay if it doesn't exist.
380
+ }
381
+
382
+ // 2. Attempt to delete the old raw path for backward compatibility.
383
+ const oldPath = path.join(this.qwenDir!, `checkpoint-${tag}.json`);
384
+ if (newPath !== oldPath) {
385
+ try {
386
+ await fs.unlink(oldPath);
387
+ deletedSomething = true;
388
+ } catch (error) {
389
+ const nodeError = error as NodeJS.ErrnoException;
390
+ if (nodeError.code !== 'ENOENT') {
391
+ console.error(`Failed to delete checkpoint file ${oldPath}:`, error);
392
+ throw error; // Rethrow unexpected errors
393
+ }
394
+ // It's okay if it doesn't exist.
395
+ }
396
+ }
397
+
398
+ return deletedSomething;
399
+ }
400
+
401
+ async checkpointExists(tag: string): Promise<boolean> {
402
+ if (!this.initialized) {
403
+ throw new Error(
404
+ 'Logger not initialized. Cannot check for checkpoint existence.',
405
+ );
406
+ }
407
+ let filePath: string | undefined;
408
+ try {
409
+ filePath = await this._getCheckpointPath(tag);
410
+ // We need to check for existence again, because _getCheckpointPath
411
+ // returns a canonical path even if it doesn't exist yet.
412
+ await fs.access(filePath);
413
+ return true;
414
+ } catch (error) {
415
+ const nodeError = error as NodeJS.ErrnoException;
416
+ if (nodeError.code === 'ENOENT') {
417
+ return false; // It truly doesn't exist in either format.
418
+ }
419
+ // A different error occurred.
420
+ console.error(
421
+ `Failed to check checkpoint existence for ${
422
+ filePath ?? `path for tag "${tag}"`
423
+ }:`,
424
+ error,
425
+ );
426
+ throw error;
427
+ }
428
+ }
429
+
430
+ close(): void {
431
+ this.initialized = false;
432
+ this.logFilePath = undefined;
433
+ this.logs = [];
434
+ this.sessionId = undefined;
435
+ this.messageId = 0;
436
+ }
437
+ }
projects/ui/qwen-code/packages/core/src/core/loggingContentGenerator.ts ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import {
8
+ Content,
9
+ CountTokensParameters,
10
+ CountTokensResponse,
11
+ EmbedContentParameters,
12
+ EmbedContentResponse,
13
+ GenerateContentParameters,
14
+ GenerateContentResponseUsageMetadata,
15
+ GenerateContentResponse,
16
+ } from '@google/genai';
17
+ import {
18
+ ApiRequestEvent,
19
+ ApiResponseEvent,
20
+ ApiErrorEvent,
21
+ } from '../telemetry/types.js';
22
+ import { Config } from '../config/config.js';
23
+ import {
24
+ logApiError,
25
+ logApiRequest,
26
+ logApiResponse,
27
+ } from '../telemetry/loggers.js';
28
+ import { ContentGenerator } from './contentGenerator.js';
29
+ import { toContents } from '../code_assist/converter.js';
30
+ import { isStructuredError } from '../utils/quotaErrorDetection.js';
31
+
32
+ interface StructuredError {
33
+ status: number;
34
+ }
35
+
36
+ /**
37
+ * A decorator that wraps a ContentGenerator to add logging to API calls.
38
+ */
39
+ export class LoggingContentGenerator implements ContentGenerator {
40
+ constructor(
41
+ private readonly wrapped: ContentGenerator,
42
+ private readonly config: Config,
43
+ ) {}
44
+
45
+ getWrapped(): ContentGenerator {
46
+ return this.wrapped;
47
+ }
48
+
49
+ private logApiRequest(
50
+ contents: Content[],
51
+ model: string,
52
+ promptId: string,
53
+ ): void {
54
+ const requestText = JSON.stringify(contents);
55
+ logApiRequest(
56
+ this.config,
57
+ new ApiRequestEvent(model, promptId, requestText),
58
+ );
59
+ }
60
+
61
+ private _logApiResponse(
62
+ responseId: string,
63
+ durationMs: number,
64
+ prompt_id: string,
65
+ usageMetadata?: GenerateContentResponseUsageMetadata,
66
+ responseText?: string,
67
+ ): void {
68
+ logApiResponse(
69
+ this.config,
70
+ new ApiResponseEvent(
71
+ responseId,
72
+ this.config.getModel(),
73
+ durationMs,
74
+ prompt_id,
75
+ this.config.getContentGeneratorConfig()?.authType,
76
+ usageMetadata,
77
+ responseText,
78
+ ),
79
+ );
80
+ }
81
+
82
+ private _logApiError(
83
+ responseId: string | undefined,
84
+ durationMs: number,
85
+ error: unknown,
86
+ prompt_id: string,
87
+ ): void {
88
+ const errorMessage = error instanceof Error ? error.message : String(error);
89
+ const errorType = error instanceof Error ? error.name : 'unknown';
90
+
91
+ logApiError(
92
+ this.config,
93
+ new ApiErrorEvent(
94
+ responseId,
95
+ this.config.getModel(),
96
+ errorMessage,
97
+ durationMs,
98
+ prompt_id,
99
+ this.config.getContentGeneratorConfig()?.authType,
100
+ errorType,
101
+ isStructuredError(error)
102
+ ? (error as StructuredError).status
103
+ : undefined,
104
+ ),
105
+ );
106
+ }
107
+
108
+ async generateContent(
109
+ req: GenerateContentParameters,
110
+ userPromptId: string,
111
+ ): Promise<GenerateContentResponse> {
112
+ const startTime = Date.now();
113
+ this.logApiRequest(toContents(req.contents), req.model, userPromptId);
114
+ try {
115
+ const response = await this.wrapped.generateContent(req, userPromptId);
116
+ const durationMs = Date.now() - startTime;
117
+ this._logApiResponse(
118
+ response.responseId ?? '',
119
+ durationMs,
120
+ userPromptId,
121
+ response.usageMetadata,
122
+ JSON.stringify(response),
123
+ );
124
+ return response;
125
+ } catch (error) {
126
+ const durationMs = Date.now() - startTime;
127
+ this._logApiError(undefined, durationMs, error, userPromptId);
128
+ throw error;
129
+ }
130
+ }
131
+
132
+ async generateContentStream(
133
+ req: GenerateContentParameters,
134
+ userPromptId: string,
135
+ ): Promise<AsyncGenerator<GenerateContentResponse>> {
136
+ const startTime = Date.now();
137
+ this.logApiRequest(toContents(req.contents), req.model, userPromptId);
138
+
139
+ let stream: AsyncGenerator<GenerateContentResponse>;
140
+ try {
141
+ stream = await this.wrapped.generateContentStream(req, userPromptId);
142
+ } catch (error) {
143
+ const durationMs = Date.now() - startTime;
144
+ this._logApiError(undefined, durationMs, error, userPromptId);
145
+ throw error;
146
+ }
147
+
148
+ return this.loggingStreamWrapper(stream, startTime, userPromptId);
149
+ }
150
+
151
+ private async *loggingStreamWrapper(
152
+ stream: AsyncGenerator<GenerateContentResponse>,
153
+ startTime: number,
154
+ userPromptId: string,
155
+ ): AsyncGenerator<GenerateContentResponse> {
156
+ let lastResponse: GenerateContentResponse | undefined;
157
+ let lastUsageMetadata: GenerateContentResponseUsageMetadata | undefined;
158
+ try {
159
+ for await (const response of stream) {
160
+ lastResponse = response;
161
+ if (response.usageMetadata) {
162
+ lastUsageMetadata = response.usageMetadata;
163
+ }
164
+ yield response;
165
+ }
166
+ } catch (error) {
167
+ const durationMs = Date.now() - startTime;
168
+ this._logApiError(undefined, durationMs, error, userPromptId);
169
+ throw error;
170
+ }
171
+ const durationMs = Date.now() - startTime;
172
+ if (lastResponse) {
173
+ this._logApiResponse(
174
+ lastResponse.responseId ?? '',
175
+ durationMs,
176
+ userPromptId,
177
+ lastUsageMetadata,
178
+ JSON.stringify(lastResponse),
179
+ );
180
+ }
181
+ }
182
+
183
+ async countTokens(req: CountTokensParameters): Promise<CountTokensResponse> {
184
+ return this.wrapped.countTokens(req);
185
+ }
186
+
187
+ async embedContent(
188
+ req: EmbedContentParameters,
189
+ ): Promise<EmbedContentResponse> {
190
+ return this.wrapped.embedContent(req);
191
+ }
192
+ }
projects/ui/qwen-code/packages/core/src/core/nonInteractiveToolExecutor.test.ts ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { describe, it, expect, vi, beforeEach } from 'vitest';
8
+ import { executeToolCall } from './nonInteractiveToolExecutor.js';
9
+ import {
10
+ ToolRegistry,
11
+ ToolCallRequestInfo,
12
+ ToolResult,
13
+ Config,
14
+ ToolErrorType,
15
+ } from '../index.js';
16
+ import { Part } from '@google/genai';
17
+ import { MockTool } from '../test-utils/tools.js';
18
+
19
+ describe('executeToolCall', () => {
20
+ let mockToolRegistry: ToolRegistry;
21
+ let mockTool: MockTool;
22
+ let abortController: AbortController;
23
+ let mockConfig: Config;
24
+
25
+ beforeEach(() => {
26
+ mockTool = new MockTool();
27
+
28
+ mockToolRegistry = {
29
+ getTool: vi.fn(),
30
+ // Add other ToolRegistry methods if needed, or use a more complete mock
31
+ } as unknown as ToolRegistry;
32
+
33
+ mockConfig = {
34
+ getSessionId: () => 'test-session-id',
35
+ getUsageStatisticsEnabled: () => true,
36
+ getDebugMode: () => false,
37
+ getContentGeneratorConfig: () => ({
38
+ model: 'test-model',
39
+ authType: 'oauth-personal',
40
+ }),
41
+ getToolRegistry: () => mockToolRegistry,
42
+ } as unknown as Config;
43
+
44
+ abortController = new AbortController();
45
+ });
46
+
47
+ it('should execute a tool successfully', async () => {
48
+ const request: ToolCallRequestInfo = {
49
+ callId: 'call1',
50
+ name: 'testTool',
51
+ args: { param1: 'value1' },
52
+ isClientInitiated: false,
53
+ prompt_id: 'prompt-id-1',
54
+ };
55
+ const toolResult: ToolResult = {
56
+ llmContent: 'Tool executed successfully',
57
+ returnDisplay: 'Success!',
58
+ };
59
+ vi.mocked(mockToolRegistry.getTool).mockReturnValue(mockTool);
60
+ vi.spyOn(mockTool, 'validateBuildAndExecute').mockResolvedValue(toolResult);
61
+
62
+ const response = await executeToolCall(
63
+ mockConfig,
64
+ request,
65
+ abortController.signal,
66
+ );
67
+
68
+ expect(mockToolRegistry.getTool).toHaveBeenCalledWith('testTool');
69
+ expect(mockTool.validateBuildAndExecute).toHaveBeenCalledWith(
70
+ request.args,
71
+ abortController.signal,
72
+ );
73
+ expect(response.callId).toBe('call1');
74
+ expect(response.error).toBeUndefined();
75
+ expect(response.resultDisplay).toBe('Success!');
76
+ expect(response.responseParts).toEqual({
77
+ functionResponse: {
78
+ name: 'testTool',
79
+ id: 'call1',
80
+ response: { output: 'Tool executed successfully' },
81
+ },
82
+ });
83
+ });
84
+
85
+ it('should return an error if tool is not found', async () => {
86
+ const request: ToolCallRequestInfo = {
87
+ callId: 'call2',
88
+ name: 'nonexistentTool',
89
+ args: {},
90
+ isClientInitiated: false,
91
+ prompt_id: 'prompt-id-2',
92
+ };
93
+ vi.mocked(mockToolRegistry.getTool).mockReturnValue(undefined);
94
+
95
+ const response = await executeToolCall(
96
+ mockConfig,
97
+ request,
98
+ abortController.signal,
99
+ );
100
+
101
+ expect(response.callId).toBe('call2');
102
+ expect(response.error).toBeInstanceOf(Error);
103
+ expect(response.error?.message).toBe(
104
+ 'Tool "nonexistentTool" not found in registry.',
105
+ );
106
+ expect(response.resultDisplay).toBe(
107
+ 'Tool "nonexistentTool" not found in registry.',
108
+ );
109
+ expect(response.responseParts).toEqual([
110
+ {
111
+ functionResponse: {
112
+ name: 'nonexistentTool',
113
+ id: 'call2',
114
+ response: { error: 'Tool "nonexistentTool" not found in registry.' },
115
+ },
116
+ },
117
+ ]);
118
+ });
119
+
120
+ it('should return an error if tool validation fails', async () => {
121
+ const request: ToolCallRequestInfo = {
122
+ callId: 'call3',
123
+ name: 'testTool',
124
+ args: { param1: 'invalid' },
125
+ isClientInitiated: false,
126
+ prompt_id: 'prompt-id-3',
127
+ };
128
+ const validationErrorResult: ToolResult = {
129
+ llmContent: 'Error: Invalid parameters',
130
+ returnDisplay: 'Invalid parameters',
131
+ error: {
132
+ message: 'Invalid parameters',
133
+ type: ToolErrorType.INVALID_TOOL_PARAMS,
134
+ },
135
+ };
136
+ vi.mocked(mockToolRegistry.getTool).mockReturnValue(mockTool);
137
+ vi.spyOn(mockTool, 'validateBuildAndExecute').mockResolvedValue(
138
+ validationErrorResult,
139
+ );
140
+
141
+ const response = await executeToolCall(
142
+ mockConfig,
143
+ request,
144
+ abortController.signal,
145
+ );
146
+ expect(response).toStrictEqual({
147
+ callId: 'call3',
148
+ error: new Error('Invalid parameters'),
149
+ errorType: ToolErrorType.INVALID_TOOL_PARAMS,
150
+ responseParts: {
151
+ functionResponse: {
152
+ id: 'call3',
153
+ name: 'testTool',
154
+ response: {
155
+ output: 'Error: Invalid parameters',
156
+ },
157
+ },
158
+ },
159
+ resultDisplay: 'Invalid parameters',
160
+ });
161
+ });
162
+
163
+ it('should return an error if tool execution fails', async () => {
164
+ const request: ToolCallRequestInfo = {
165
+ callId: 'call4',
166
+ name: 'testTool',
167
+ args: { param1: 'value1' },
168
+ isClientInitiated: false,
169
+ prompt_id: 'prompt-id-4',
170
+ };
171
+ const executionErrorResult: ToolResult = {
172
+ llmContent: 'Error: Execution failed',
173
+ returnDisplay: 'Execution failed',
174
+ error: {
175
+ message: 'Execution failed',
176
+ type: ToolErrorType.EXECUTION_FAILED,
177
+ },
178
+ };
179
+ vi.mocked(mockToolRegistry.getTool).mockReturnValue(mockTool);
180
+ vi.spyOn(mockTool, 'validateBuildAndExecute').mockResolvedValue(
181
+ executionErrorResult,
182
+ );
183
+
184
+ const response = await executeToolCall(
185
+ mockConfig,
186
+ request,
187
+ abortController.signal,
188
+ );
189
+ expect(response).toStrictEqual({
190
+ callId: 'call4',
191
+ error: new Error('Execution failed'),
192
+ errorType: ToolErrorType.EXECUTION_FAILED,
193
+ responseParts: {
194
+ functionResponse: {
195
+ id: 'call4',
196
+ name: 'testTool',
197
+ response: {
198
+ output: 'Error: Execution failed',
199
+ },
200
+ },
201
+ },
202
+ resultDisplay: 'Execution failed',
203
+ });
204
+ });
205
+
206
+ it('should return an unhandled exception error if execution throws', async () => {
207
+ const request: ToolCallRequestInfo = {
208
+ callId: 'call5',
209
+ name: 'testTool',
210
+ args: { param1: 'value1' },
211
+ isClientInitiated: false,
212
+ prompt_id: 'prompt-id-5',
213
+ };
214
+ const executionError = new Error('Something went very wrong');
215
+ vi.mocked(mockToolRegistry.getTool).mockReturnValue(mockTool);
216
+ vi.spyOn(mockTool, 'validateBuildAndExecute').mockRejectedValue(
217
+ executionError,
218
+ );
219
+
220
+ const response = await executeToolCall(
221
+ mockConfig,
222
+ request,
223
+ abortController.signal,
224
+ );
225
+
226
+ expect(response.callId).toBe('call5');
227
+ expect(response.error).toBe(executionError);
228
+ expect(response.errorType).toBe(ToolErrorType.UNHANDLED_EXCEPTION);
229
+ expect(response.resultDisplay).toBe('Something went very wrong');
230
+ expect(response.responseParts).toEqual([
231
+ {
232
+ functionResponse: {
233
+ name: 'testTool',
234
+ id: 'call5',
235
+ response: { error: 'Something went very wrong' },
236
+ },
237
+ },
238
+ ]);
239
+ });
240
+
241
+ it('should correctly format llmContent with inlineData', async () => {
242
+ const request: ToolCallRequestInfo = {
243
+ callId: 'call6',
244
+ name: 'testTool',
245
+ args: {},
246
+ isClientInitiated: false,
247
+ prompt_id: 'prompt-id-6',
248
+ };
249
+ const imageDataPart: Part = {
250
+ inlineData: { mimeType: 'image/png', data: 'base64data' },
251
+ };
252
+ const toolResult: ToolResult = {
253
+ llmContent: [imageDataPart],
254
+ returnDisplay: 'Image processed',
255
+ };
256
+ vi.mocked(mockToolRegistry.getTool).mockReturnValue(mockTool);
257
+ vi.spyOn(mockTool, 'validateBuildAndExecute').mockResolvedValue(toolResult);
258
+
259
+ const response = await executeToolCall(
260
+ mockConfig,
261
+ request,
262
+ abortController.signal,
263
+ );
264
+
265
+ expect(response.resultDisplay).toBe('Image processed');
266
+ expect(response.responseParts).toEqual([
267
+ {
268
+ functionResponse: {
269
+ name: 'testTool',
270
+ id: 'call6',
271
+ response: {
272
+ output: 'Binary content of type image/png was processed.',
273
+ },
274
+ },
275
+ },
276
+ imageDataPart,
277
+ ]);
278
+ });
279
+ });
projects/ui/qwen-code/packages/core/src/core/nonInteractiveToolExecutor.ts ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import {
8
+ FileDiff,
9
+ logToolCall,
10
+ ToolCallRequestInfo,
11
+ ToolCallResponseInfo,
12
+ ToolErrorType,
13
+ ToolResult,
14
+ } from '../index.js';
15
+ import { DiscoveredMCPTool } from '../tools/mcp-tool.js';
16
+ import { Config } from '../config/config.js';
17
+ import { convertToFunctionResponse } from './coreToolScheduler.js';
18
+ import { ToolCallDecision } from '../telemetry/tool-call-decision.js';
19
+
20
+ /**
21
+ * Executes a single tool call non-interactively.
22
+ * It does not handle confirmations, multiple calls, or live updates.
23
+ */
24
+ export async function executeToolCall(
25
+ config: Config,
26
+ toolCallRequest: ToolCallRequestInfo,
27
+ abortSignal?: AbortSignal,
28
+ ): Promise<ToolCallResponseInfo> {
29
+ const tool = config.getToolRegistry().getTool(toolCallRequest.name);
30
+
31
+ const startTime = Date.now();
32
+ if (!tool) {
33
+ const error = new Error(
34
+ `Tool "${toolCallRequest.name}" not found in registry.`,
35
+ );
36
+ const durationMs = Date.now() - startTime;
37
+ logToolCall(config, {
38
+ 'event.name': 'tool_call',
39
+ 'event.timestamp': new Date().toISOString(),
40
+ function_name: toolCallRequest.name,
41
+ function_args: toolCallRequest.args,
42
+ duration_ms: durationMs,
43
+ success: false,
44
+ error: error.message,
45
+ prompt_id: toolCallRequest.prompt_id,
46
+ tool_type: 'native',
47
+ });
48
+ // Ensure the response structure matches what the API expects for an error
49
+ return {
50
+ callId: toolCallRequest.callId,
51
+ responseParts: [
52
+ {
53
+ functionResponse: {
54
+ id: toolCallRequest.callId,
55
+ name: toolCallRequest.name,
56
+ response: { error: error.message },
57
+ },
58
+ },
59
+ ],
60
+ resultDisplay: error.message,
61
+ error,
62
+ errorType: ToolErrorType.TOOL_NOT_REGISTERED,
63
+ };
64
+ }
65
+
66
+ try {
67
+ // Directly execute without confirmation or live output handling
68
+ const effectiveAbortSignal = abortSignal ?? new AbortController().signal;
69
+ const toolResult: ToolResult = await tool.validateBuildAndExecute(
70
+ toolCallRequest.args,
71
+ effectiveAbortSignal,
72
+ // No live output callback for non-interactive mode
73
+ );
74
+
75
+ const tool_output = toolResult.llmContent;
76
+
77
+ const tool_display = toolResult.returnDisplay;
78
+
79
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
80
+ let metadata: { [key: string]: any } = {};
81
+ if (
82
+ toolResult.error === undefined &&
83
+ typeof tool_display === 'object' &&
84
+ tool_display !== null &&
85
+ 'diffStat' in tool_display
86
+ ) {
87
+ const diffStat = (tool_display as FileDiff).diffStat;
88
+ if (diffStat) {
89
+ metadata = {
90
+ ai_added_lines: diffStat.ai_added_lines,
91
+ ai_removed_lines: diffStat.ai_removed_lines,
92
+ user_added_lines: diffStat.user_added_lines,
93
+ user_removed_lines: diffStat.user_removed_lines,
94
+ };
95
+ }
96
+ }
97
+ const durationMs = Date.now() - startTime;
98
+ logToolCall(config, {
99
+ 'event.name': 'tool_call',
100
+ 'event.timestamp': new Date().toISOString(),
101
+ function_name: toolCallRequest.name,
102
+ function_args: toolCallRequest.args,
103
+ duration_ms: durationMs,
104
+ success: toolResult.error === undefined,
105
+ error:
106
+ toolResult.error === undefined ? undefined : toolResult.error.message,
107
+ error_type:
108
+ toolResult.error === undefined ? undefined : toolResult.error.type,
109
+ prompt_id: toolCallRequest.prompt_id,
110
+ metadata,
111
+ decision: ToolCallDecision.AUTO_ACCEPT,
112
+ tool_type:
113
+ typeof tool !== 'undefined' && tool instanceof DiscoveredMCPTool
114
+ ? 'mcp'
115
+ : 'native',
116
+ });
117
+
118
+ const response = convertToFunctionResponse(
119
+ toolCallRequest.name,
120
+ toolCallRequest.callId,
121
+ tool_output,
122
+ );
123
+
124
+ return {
125
+ callId: toolCallRequest.callId,
126
+ responseParts: response,
127
+ resultDisplay: tool_display,
128
+ error:
129
+ toolResult.error === undefined
130
+ ? undefined
131
+ : new Error(toolResult.error.message),
132
+ errorType:
133
+ toolResult.error === undefined ? undefined : toolResult.error.type,
134
+ };
135
+ } catch (e) {
136
+ const error = e instanceof Error ? e : new Error(String(e));
137
+ const durationMs = Date.now() - startTime;
138
+ logToolCall(config, {
139
+ 'event.name': 'tool_call',
140
+ 'event.timestamp': new Date().toISOString(),
141
+ function_name: toolCallRequest.name,
142
+ function_args: toolCallRequest.args,
143
+ duration_ms: durationMs,
144
+ success: false,
145
+ error: error.message,
146
+ error_type: ToolErrorType.UNHANDLED_EXCEPTION,
147
+ prompt_id: toolCallRequest.prompt_id,
148
+ tool_type:
149
+ typeof tool !== 'undefined' && tool instanceof DiscoveredMCPTool
150
+ ? 'mcp'
151
+ : 'native',
152
+ });
153
+ return {
154
+ callId: toolCallRequest.callId,
155
+ responseParts: [
156
+ {
157
+ functionResponse: {
158
+ id: toolCallRequest.callId,
159
+ name: toolCallRequest.name,
160
+ response: { error: error.message },
161
+ },
162
+ },
163
+ ],
164
+ resultDisplay: error.message,
165
+ error,
166
+ errorType: ToolErrorType.UNHANDLED_EXCEPTION,
167
+ };
168
+ }
169
+ }
projects/ui/qwen-code/packages/core/src/core/openaiContentGenerator.test.ts ADDED
The diff for this file is too large to render. See raw diff
 
projects/ui/qwen-code/packages/core/src/core/openaiContentGenerator.ts ADDED
@@ -0,0 +1,1696 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Qwen
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import {
8
+ CountTokensResponse,
9
+ GenerateContentResponse,
10
+ GenerateContentParameters,
11
+ CountTokensParameters,
12
+ EmbedContentResponse,
13
+ EmbedContentParameters,
14
+ FinishReason,
15
+ Part,
16
+ Content,
17
+ Tool,
18
+ ToolListUnion,
19
+ CallableTool,
20
+ FunctionCall,
21
+ FunctionResponse,
22
+ } from '@google/genai';
23
+ import {
24
+ AuthType,
25
+ ContentGenerator,
26
+ ContentGeneratorConfig,
27
+ } from './contentGenerator.js';
28
+ import OpenAI from 'openai';
29
+ import { logApiError, logApiResponse } from '../telemetry/loggers.js';
30
+ import { ApiErrorEvent, ApiResponseEvent } from '../telemetry/types.js';
31
+ import { Config } from '../config/config.js';
32
+ import { openaiLogger } from '../utils/openaiLogger.js';
33
+ import { safeJsonParse } from '../utils/safeJsonParse.js';
34
+
35
+ // Extended types to support cache_control
36
+ interface ChatCompletionContentPartTextWithCache
37
+ extends OpenAI.Chat.ChatCompletionContentPartText {
38
+ cache_control?: { type: 'ephemeral' };
39
+ }
40
+
41
+ type ChatCompletionContentPartWithCache =
42
+ | ChatCompletionContentPartTextWithCache
43
+ | OpenAI.Chat.ChatCompletionContentPartImage
44
+ | OpenAI.Chat.ChatCompletionContentPartRefusal;
45
+
46
+ // OpenAI API type definitions for logging
47
+ interface OpenAIToolCall {
48
+ id: string;
49
+ type: 'function';
50
+ function: {
51
+ name: string;
52
+ arguments: string;
53
+ };
54
+ }
55
+
56
+ interface OpenAIContentItem {
57
+ type: 'text';
58
+ text: string;
59
+ cache_control?: { type: 'ephemeral' };
60
+ }
61
+
62
+ interface OpenAIMessage {
63
+ role: 'system' | 'user' | 'assistant' | 'tool';
64
+ content: string | null | OpenAIContentItem[];
65
+ tool_calls?: OpenAIToolCall[];
66
+ tool_call_id?: string;
67
+ }
68
+
69
+ interface OpenAIUsage {
70
+ prompt_tokens: number;
71
+ completion_tokens: number;
72
+ total_tokens: number;
73
+ prompt_tokens_details?: {
74
+ cached_tokens?: number;
75
+ };
76
+ }
77
+
78
+ interface OpenAIChoice {
79
+ index: number;
80
+ message: OpenAIMessage;
81
+ finish_reason: string;
82
+ }
83
+
84
+ interface OpenAIResponseFormat {
85
+ id: string;
86
+ object: string;
87
+ created: number;
88
+ model: string;
89
+ choices: OpenAIChoice[];
90
+ usage?: OpenAIUsage;
91
+ }
92
+
93
+ export class OpenAIContentGenerator implements ContentGenerator {
94
+ protected client: OpenAI;
95
+ private model: string;
96
+ private contentGeneratorConfig: ContentGeneratorConfig;
97
+ private config: Config;
98
+ private streamingToolCalls: Map<
99
+ number,
100
+ {
101
+ id?: string;
102
+ name?: string;
103
+ arguments: string;
104
+ }
105
+ > = new Map();
106
+
107
+ constructor(
108
+ contentGeneratorConfig: ContentGeneratorConfig,
109
+ gcConfig: Config,
110
+ ) {
111
+ this.model = contentGeneratorConfig.model;
112
+ this.contentGeneratorConfig = contentGeneratorConfig;
113
+ this.config = gcConfig;
114
+
115
+ const version = gcConfig.getCliVersion() || 'unknown';
116
+ const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
117
+
118
+ // Check if using OpenRouter and add required headers
119
+ const isOpenRouterProvider = this.isOpenRouterProvider();
120
+ const isDashScopeProvider = this.isDashScopeProvider();
121
+
122
+ const defaultHeaders = {
123
+ 'User-Agent': userAgent,
124
+ ...(isOpenRouterProvider
125
+ ? {
126
+ 'HTTP-Referer': 'https://github.com/QwenLM/qwen-code.git',
127
+ 'X-Title': 'Qwen Code',
128
+ }
129
+ : isDashScopeProvider
130
+ ? {
131
+ 'X-DashScope-CacheControl': 'enable',
132
+ 'X-DashScope-UserAgent': userAgent,
133
+ 'X-DashScope-AuthType': contentGeneratorConfig.authType,
134
+ }
135
+ : {}),
136
+ };
137
+
138
+ this.client = new OpenAI({
139
+ apiKey: contentGeneratorConfig.apiKey,
140
+ baseURL: contentGeneratorConfig.baseUrl,
141
+ timeout: contentGeneratorConfig.timeout ?? 120000,
142
+ maxRetries: contentGeneratorConfig.maxRetries ?? 3,
143
+ defaultHeaders,
144
+ });
145
+ }
146
+
147
+ /**
148
+ * Hook for subclasses to customize error handling behavior
149
+ * @param error The error that occurred
150
+ * @param request The original request
151
+ * @returns true if error logging should be suppressed, false otherwise
152
+ */
153
+ protected shouldSuppressErrorLogging(
154
+ _error: unknown,
155
+ _request: GenerateContentParameters,
156
+ ): boolean {
157
+ return false; // Default behavior: never suppress error logging
158
+ }
159
+
160
+ /**
161
+ * Check if an error is a timeout error
162
+ */
163
+ private isTimeoutError(error: unknown): boolean {
164
+ if (!error) return false;
165
+
166
+ const errorMessage =
167
+ error instanceof Error
168
+ ? error.message.toLowerCase()
169
+ : String(error).toLowerCase();
170
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
171
+ const errorCode = (error as any)?.code;
172
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
173
+ const errorType = (error as any)?.type;
174
+
175
+ // Check for common timeout indicators
176
+ return (
177
+ errorMessage.includes('timeout') ||
178
+ errorMessage.includes('timed out') ||
179
+ errorMessage.includes('connection timeout') ||
180
+ errorMessage.includes('request timeout') ||
181
+ errorMessage.includes('read timeout') ||
182
+ errorMessage.includes('etimedout') || // Include ETIMEDOUT in message check
183
+ errorMessage.includes('esockettimedout') || // Include ESOCKETTIMEDOUT in message check
184
+ errorCode === 'ETIMEDOUT' ||
185
+ errorCode === 'ESOCKETTIMEDOUT' ||
186
+ errorType === 'timeout' ||
187
+ // OpenAI specific timeout indicators
188
+ errorMessage.includes('request timed out') ||
189
+ errorMessage.includes('deadline exceeded')
190
+ );
191
+ }
192
+
193
+ private isOpenRouterProvider(): boolean {
194
+ const baseURL = this.contentGeneratorConfig.baseUrl || '';
195
+ return baseURL.includes('openrouter.ai');
196
+ }
197
+
198
+ /**
199
+ * Determine if this is a DashScope provider.
200
+ * DashScope providers include QWEN_OAUTH auth type or specific DashScope base URLs.
201
+ *
202
+ * @returns true if this is a DashScope provider, false otherwise
203
+ */
204
+ private isDashScopeProvider(): boolean {
205
+ const authType = this.contentGeneratorConfig.authType;
206
+ const baseUrl = this.contentGeneratorConfig.baseUrl;
207
+
208
+ return (
209
+ authType === AuthType.QWEN_OAUTH ||
210
+ baseUrl === 'https://dashscope.aliyuncs.com/compatible-mode/v1' ||
211
+ baseUrl === 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1'
212
+ );
213
+ }
214
+
215
+ /**
216
+ * Build metadata object for OpenAI API requests.
217
+ *
218
+ * @param userPromptId The user prompt ID to include in metadata
219
+ * @returns metadata object if shouldIncludeMetadata() returns true, undefined otherwise
220
+ */
221
+ private buildMetadata(
222
+ userPromptId: string,
223
+ ): { metadata: { sessionId?: string; promptId: string } } | undefined {
224
+ if (!this.isDashScopeProvider()) {
225
+ return undefined;
226
+ }
227
+
228
+ return {
229
+ metadata: {
230
+ sessionId: this.config.getSessionId?.(),
231
+ promptId: userPromptId,
232
+ },
233
+ };
234
+ }
235
+
236
+ private async buildCreateParams(
237
+ request: GenerateContentParameters,
238
+ userPromptId: string,
239
+ streaming: boolean = false,
240
+ ): Promise<Parameters<typeof this.client.chat.completions.create>[0]> {
241
+ let messages = this.convertToOpenAIFormat(request);
242
+
243
+ // Add cache control to system and last messages for DashScope providers
244
+ // Only add cache control to system message for non-streaming requests
245
+ if (this.isDashScopeProvider()) {
246
+ messages = this.addDashScopeCacheControl(
247
+ messages,
248
+ streaming ? 'both' : 'system',
249
+ );
250
+ }
251
+
252
+ // Build sampling parameters with clear priority:
253
+ // 1. Request-level parameters (highest priority)
254
+ // 2. Config-level sampling parameters (medium priority)
255
+ // 3. Default values (lowest priority)
256
+ const samplingParams = this.buildSamplingParameters(request);
257
+
258
+ const createParams: Parameters<
259
+ typeof this.client.chat.completions.create
260
+ >[0] = {
261
+ model: this.model,
262
+ messages,
263
+ ...samplingParams,
264
+ ...(this.buildMetadata(userPromptId) || {}),
265
+ };
266
+
267
+ if (request.config?.tools) {
268
+ createParams.tools = await this.convertGeminiToolsToOpenAI(
269
+ request.config.tools,
270
+ );
271
+ }
272
+
273
+ if (streaming) {
274
+ createParams.stream = true;
275
+ createParams.stream_options = { include_usage: true };
276
+ }
277
+
278
+ return createParams;
279
+ }
280
+
281
+ async generateContent(
282
+ request: GenerateContentParameters,
283
+ userPromptId: string,
284
+ ): Promise<GenerateContentResponse> {
285
+ const startTime = Date.now();
286
+ const createParams = await this.buildCreateParams(
287
+ request,
288
+ userPromptId,
289
+ false,
290
+ );
291
+
292
+ try {
293
+ const completion = (await this.client.chat.completions.create(
294
+ createParams,
295
+ )) as OpenAI.Chat.ChatCompletion;
296
+
297
+ const response = this.convertToGeminiFormat(completion);
298
+ const durationMs = Date.now() - startTime;
299
+
300
+ // Log API response event for UI telemetry
301
+ const responseEvent = new ApiResponseEvent(
302
+ response.responseId || 'unknown',
303
+ this.model,
304
+ durationMs,
305
+ userPromptId,
306
+ this.contentGeneratorConfig.authType,
307
+ response.usageMetadata,
308
+ );
309
+
310
+ logApiResponse(this.config, responseEvent);
311
+
312
+ // Log interaction if enabled
313
+ if (this.contentGeneratorConfig.enableOpenAILogging) {
314
+ const openaiRequest = createParams;
315
+ const openaiResponse = this.convertGeminiResponseToOpenAI(response);
316
+ await openaiLogger.logInteraction(openaiRequest, openaiResponse);
317
+ }
318
+
319
+ return response;
320
+ } catch (error) {
321
+ const durationMs = Date.now() - startTime;
322
+
323
+ // Identify timeout errors specifically
324
+ const isTimeoutError = this.isTimeoutError(error);
325
+ const errorMessage = isTimeoutError
326
+ ? `Request timeout after ${Math.round(durationMs / 1000)}s. Try reducing input length or increasing timeout in config.`
327
+ : error instanceof Error
328
+ ? error.message
329
+ : String(error);
330
+
331
+ // Log API error event for UI telemetry
332
+ const errorEvent = new ApiErrorEvent(
333
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
334
+ (error as any).requestID || 'unknown',
335
+ this.model,
336
+ errorMessage,
337
+ durationMs,
338
+ userPromptId,
339
+ this.contentGeneratorConfig.authType,
340
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
341
+ (error as any).type,
342
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
343
+ (error as any).code,
344
+ );
345
+ logApiError(this.config, errorEvent);
346
+
347
+ // Log error interaction if enabled
348
+ if (this.contentGeneratorConfig.enableOpenAILogging) {
349
+ await openaiLogger.logInteraction(
350
+ createParams,
351
+ undefined,
352
+ error as Error,
353
+ );
354
+ }
355
+
356
+ // Allow subclasses to suppress error logging for specific scenarios
357
+ if (!this.shouldSuppressErrorLogging(error, request)) {
358
+ console.error('OpenAI API Error:', errorMessage);
359
+ }
360
+
361
+ // Provide helpful timeout-specific error message
362
+ if (isTimeoutError) {
363
+ throw new Error(
364
+ `${errorMessage}\n\nTroubleshooting tips:\n` +
365
+ `- Reduce input length or complexity\n` +
366
+ `- Increase timeout in config: contentGenerator.timeout\n` +
367
+ `- Check network connectivity\n` +
368
+ `- Consider using streaming mode for long responses`,
369
+ );
370
+ }
371
+
372
+ throw error;
373
+ }
374
+ }
375
+
376
+ async generateContentStream(
377
+ request: GenerateContentParameters,
378
+ userPromptId: string,
379
+ ): Promise<AsyncGenerator<GenerateContentResponse>> {
380
+ const startTime = Date.now();
381
+ const createParams = await this.buildCreateParams(
382
+ request,
383
+ userPromptId,
384
+ true,
385
+ );
386
+
387
+ try {
388
+ const stream = (await this.client.chat.completions.create(
389
+ createParams,
390
+ )) as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>;
391
+
392
+ const originalStream = this.streamGenerator(stream);
393
+
394
+ // Collect all responses for final logging (don't log during streaming)
395
+ const responses: GenerateContentResponse[] = [];
396
+
397
+ // Return a new generator that both yields responses and collects them
398
+ const wrappedGenerator = async function* (this: OpenAIContentGenerator) {
399
+ try {
400
+ for await (const response of originalStream) {
401
+ responses.push(response);
402
+ yield response;
403
+ }
404
+
405
+ const durationMs = Date.now() - startTime;
406
+
407
+ // Get final usage metadata from the last response that has it
408
+ const finalUsageMetadata = responses
409
+ .slice()
410
+ .reverse()
411
+ .find((r) => r.usageMetadata)?.usageMetadata;
412
+
413
+ // Log API response event for UI telemetry
414
+ const responseEvent = new ApiResponseEvent(
415
+ responses[responses.length - 1]?.responseId || 'unknown',
416
+ this.model,
417
+ durationMs,
418
+ userPromptId,
419
+ this.contentGeneratorConfig.authType,
420
+ finalUsageMetadata,
421
+ );
422
+
423
+ logApiResponse(this.config, responseEvent);
424
+
425
+ // Log interaction if enabled (same as generateContent method)
426
+ if (this.contentGeneratorConfig.enableOpenAILogging) {
427
+ const openaiRequest = createParams;
428
+ // For streaming, we combine all responses into a single response for logging
429
+ const combinedResponse =
430
+ this.combineStreamResponsesForLogging(responses);
431
+ const openaiResponse =
432
+ this.convertGeminiResponseToOpenAI(combinedResponse);
433
+ await openaiLogger.logInteraction(openaiRequest, openaiResponse);
434
+ }
435
+ } catch (error) {
436
+ const durationMs = Date.now() - startTime;
437
+
438
+ // Identify timeout errors specifically for streaming
439
+ const isTimeoutError = this.isTimeoutError(error);
440
+ const errorMessage = isTimeoutError
441
+ ? `Streaming request timeout after ${Math.round(durationMs / 1000)}s. Try reducing input length or increasing timeout in config.`
442
+ : error instanceof Error
443
+ ? error.message
444
+ : String(error);
445
+
446
+ // Log API error event for UI telemetry
447
+ const errorEvent = new ApiErrorEvent(
448
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
449
+ (error as any).requestID || 'unknown',
450
+ this.model,
451
+ errorMessage,
452
+ durationMs,
453
+ userPromptId,
454
+ this.contentGeneratorConfig.authType,
455
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
456
+ (error as any).type,
457
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
458
+ (error as any).code,
459
+ );
460
+ logApiError(this.config, errorEvent);
461
+
462
+ // Log error interaction if enabled
463
+ if (this.contentGeneratorConfig.enableOpenAILogging) {
464
+ await openaiLogger.logInteraction(
465
+ createParams,
466
+ undefined,
467
+ error as Error,
468
+ );
469
+ }
470
+
471
+ // Provide helpful timeout-specific error message for streaming
472
+ if (isTimeoutError) {
473
+ throw new Error(
474
+ `${errorMessage}\n\nStreaming timeout troubleshooting:\n` +
475
+ `- Reduce input length or complexity\n` +
476
+ `- Increase timeout in config: contentGenerator.timeout\n` +
477
+ `- Check network stability for streaming connections\n` +
478
+ `- Consider using non-streaming mode for very long inputs`,
479
+ );
480
+ }
481
+
482
+ throw error;
483
+ }
484
+ }.bind(this);
485
+
486
+ return wrappedGenerator();
487
+ } catch (error) {
488
+ const durationMs = Date.now() - startTime;
489
+
490
+ // Identify timeout errors specifically for streaming setup
491
+ const isTimeoutError = this.isTimeoutError(error);
492
+ const errorMessage = isTimeoutError
493
+ ? `Streaming setup timeout after ${Math.round(durationMs / 1000)}s. Try reducing input length or increasing timeout in config.`
494
+ : error instanceof Error
495
+ ? error.message
496
+ : String(error);
497
+
498
+ // Log API error event for UI telemetry
499
+ const errorEvent = new ApiErrorEvent(
500
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
501
+ (error as any).requestID || 'unknown',
502
+ this.model,
503
+ errorMessage,
504
+ durationMs,
505
+ userPromptId,
506
+ this.contentGeneratorConfig.authType,
507
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
508
+ (error as any).type,
509
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
510
+ (error as any).code,
511
+ );
512
+ logApiError(this.config, errorEvent);
513
+
514
+ // Allow subclasses to suppress error logging for specific scenarios
515
+ if (!this.shouldSuppressErrorLogging(error, request)) {
516
+ console.error('OpenAI API Streaming Error:', errorMessage);
517
+ }
518
+
519
+ // Provide helpful timeout-specific error message for streaming setup
520
+ if (isTimeoutError) {
521
+ throw new Error(
522
+ `${errorMessage}\n\nStreaming setup timeout troubleshooting:\n` +
523
+ `- Reduce input length or complexity\n` +
524
+ `- Increase timeout in config: contentGenerator.timeout\n` +
525
+ `- Check network connectivity and firewall settings\n` +
526
+ `- Consider using non-streaming mode for very long inputs`,
527
+ );
528
+ }
529
+
530
+ throw error;
531
+ }
532
+ }
533
+
534
+ private async *streamGenerator(
535
+ stream: AsyncIterable<OpenAI.Chat.ChatCompletionChunk>,
536
+ ): AsyncGenerator<GenerateContentResponse> {
537
+ // Reset the accumulator for each new stream
538
+ this.streamingToolCalls.clear();
539
+
540
+ for await (const chunk of stream) {
541
+ const response = this.convertStreamChunkToGeminiFormat(chunk);
542
+
543
+ // Ignore empty responses, which would cause problems with downstream code
544
+ // that expects a valid response.
545
+ if (
546
+ response.candidates?.[0]?.content?.parts?.length === 0 &&
547
+ !response.usageMetadata
548
+ ) {
549
+ continue;
550
+ }
551
+
552
+ yield response;
553
+ }
554
+ }
555
+
556
+ /**
557
+ * Combine streaming responses for logging purposes
558
+ */
559
+ private combineStreamResponsesForLogging(
560
+ responses: GenerateContentResponse[],
561
+ ): GenerateContentResponse {
562
+ if (responses.length === 0) {
563
+ return new GenerateContentResponse();
564
+ }
565
+
566
+ const lastResponse = responses[responses.length - 1];
567
+
568
+ // Find the last response with usage metadata
569
+ const finalUsageMetadata = responses
570
+ .slice()
571
+ .reverse()
572
+ .find((r) => r.usageMetadata)?.usageMetadata;
573
+
574
+ // Combine all text content from the stream
575
+ const combinedParts: Part[] = [];
576
+ let combinedText = '';
577
+ const functionCalls: Part[] = [];
578
+
579
+ for (const response of responses) {
580
+ if (response.candidates?.[0]?.content?.parts) {
581
+ for (const part of response.candidates[0].content.parts) {
582
+ if ('text' in part && part.text) {
583
+ combinedText += part.text;
584
+ } else if ('functionCall' in part && part.functionCall) {
585
+ functionCalls.push(part);
586
+ }
587
+ }
588
+ }
589
+ }
590
+
591
+ // Add combined text if any
592
+ if (combinedText) {
593
+ combinedParts.push({ text: combinedText });
594
+ }
595
+
596
+ // Add function calls
597
+ combinedParts.push(...functionCalls);
598
+
599
+ // Create combined response
600
+ const combinedResponse = new GenerateContentResponse();
601
+ combinedResponse.candidates = [
602
+ {
603
+ content: {
604
+ parts: combinedParts,
605
+ role: 'model' as const,
606
+ },
607
+ finishReason:
608
+ responses[responses.length - 1]?.candidates?.[0]?.finishReason ||
609
+ FinishReason.FINISH_REASON_UNSPECIFIED,
610
+ index: 0,
611
+ safetyRatings: [],
612
+ },
613
+ ];
614
+ combinedResponse.responseId = lastResponse?.responseId;
615
+ combinedResponse.createTime = lastResponse?.createTime;
616
+ combinedResponse.modelVersion = this.model;
617
+ combinedResponse.promptFeedback = { safetyRatings: [] };
618
+ combinedResponse.usageMetadata = finalUsageMetadata;
619
+
620
+ return combinedResponse;
621
+ }
622
+
623
+ async countTokens(
624
+ request: CountTokensParameters,
625
+ ): Promise<CountTokensResponse> {
626
+ // Use tiktoken for accurate token counting
627
+ const content = JSON.stringify(request.contents);
628
+ let totalTokens = 0;
629
+
630
+ try {
631
+ const { get_encoding } = await import('tiktoken');
632
+ const encoding = get_encoding('cl100k_base'); // GPT-4 encoding, but estimate for qwen
633
+ totalTokens = encoding.encode(content).length;
634
+ encoding.free();
635
+ } catch (error) {
636
+ console.warn(
637
+ 'Failed to load tiktoken, falling back to character approximation:',
638
+ error,
639
+ );
640
+ // Fallback: rough approximation using character count
641
+ totalTokens = Math.ceil(content.length / 4); // Rough estimate: 1 token ≈ 4 characters
642
+ }
643
+
644
+ return {
645
+ totalTokens,
646
+ };
647
+ }
648
+
649
+ async embedContent(
650
+ request: EmbedContentParameters,
651
+ ): Promise<EmbedContentResponse> {
652
+ // Extract text from contents
653
+ let text = '';
654
+ if (Array.isArray(request.contents)) {
655
+ text = request.contents
656
+ .map((content) => {
657
+ if (typeof content === 'string') return content;
658
+ if ('parts' in content && content.parts) {
659
+ return content.parts
660
+ .map((part) =>
661
+ typeof part === 'string'
662
+ ? part
663
+ : 'text' in part
664
+ ? (part as { text?: string }).text || ''
665
+ : '',
666
+ )
667
+ .join(' ');
668
+ }
669
+ return '';
670
+ })
671
+ .join(' ');
672
+ } else if (request.contents) {
673
+ if (typeof request.contents === 'string') {
674
+ text = request.contents;
675
+ } else if ('parts' in request.contents && request.contents.parts) {
676
+ text = request.contents.parts
677
+ .map((part: Part) =>
678
+ typeof part === 'string' ? part : 'text' in part ? part.text : '',
679
+ )
680
+ .join(' ');
681
+ }
682
+ }
683
+
684
+ try {
685
+ const embedding = await this.client.embeddings.create({
686
+ model: 'text-embedding-ada-002', // Default embedding model
687
+ input: text,
688
+ });
689
+
690
+ return {
691
+ embeddings: [
692
+ {
693
+ values: embedding.data[0].embedding,
694
+ },
695
+ ],
696
+ };
697
+ } catch (error) {
698
+ console.error('OpenAI API Embedding Error:', error);
699
+ throw new Error(
700
+ `OpenAI API error: ${error instanceof Error ? error.message : String(error)}`,
701
+ );
702
+ }
703
+ }
704
+
705
+ private convertGeminiParametersToOpenAI(
706
+ parameters: Record<string, unknown>,
707
+ ): Record<string, unknown> | undefined {
708
+ if (!parameters || typeof parameters !== 'object') {
709
+ return parameters;
710
+ }
711
+
712
+ const converted = JSON.parse(JSON.stringify(parameters));
713
+
714
+ const convertTypes = (obj: unknown): unknown => {
715
+ if (typeof obj !== 'object' || obj === null) {
716
+ return obj;
717
+ }
718
+
719
+ if (Array.isArray(obj)) {
720
+ return obj.map(convertTypes);
721
+ }
722
+
723
+ const result: Record<string, unknown> = {};
724
+ for (const [key, value] of Object.entries(obj)) {
725
+ if (key === 'type' && typeof value === 'string') {
726
+ // Convert Gemini types to OpenAI JSON Schema types
727
+ const lowerValue = value.toLowerCase();
728
+ if (lowerValue === 'integer') {
729
+ result[key] = 'integer';
730
+ } else if (lowerValue === 'number') {
731
+ result[key] = 'number';
732
+ } else {
733
+ result[key] = lowerValue;
734
+ }
735
+ } else if (
736
+ key === 'minimum' ||
737
+ key === 'maximum' ||
738
+ key === 'multipleOf'
739
+ ) {
740
+ // Ensure numeric constraints are actual numbers, not strings
741
+ if (typeof value === 'string' && !isNaN(Number(value))) {
742
+ result[key] = Number(value);
743
+ } else {
744
+ result[key] = value;
745
+ }
746
+ } else if (
747
+ key === 'minLength' ||
748
+ key === 'maxLength' ||
749
+ key === 'minItems' ||
750
+ key === 'maxItems'
751
+ ) {
752
+ // Ensure length constraints are integers, not strings
753
+ if (typeof value === 'string' && !isNaN(Number(value))) {
754
+ result[key] = parseInt(value, 10);
755
+ } else {
756
+ result[key] = value;
757
+ }
758
+ } else if (typeof value === 'object') {
759
+ result[key] = convertTypes(value);
760
+ } else {
761
+ result[key] = value;
762
+ }
763
+ }
764
+ return result;
765
+ };
766
+
767
+ return convertTypes(converted) as Record<string, unknown> | undefined;
768
+ }
769
+
770
+ /**
771
+ * Converts Gemini tools to OpenAI format for API compatibility.
772
+ * Handles both Gemini tools (using 'parameters' field) and MCP tools (using 'parametersJsonSchema' field).
773
+ *
774
+ * Gemini tools use a custom parameter format that needs conversion to OpenAI JSON Schema format.
775
+ * MCP tools already use JSON Schema format in the parametersJsonSchema field and can be used directly.
776
+ *
777
+ * @param geminiTools - Array of Gemini tools to convert
778
+ * @returns Promise resolving to array of OpenAI-compatible tools
779
+ */
780
+ private async convertGeminiToolsToOpenAI(
781
+ geminiTools: ToolListUnion,
782
+ ): Promise<OpenAI.Chat.ChatCompletionTool[]> {
783
+ const openAITools: OpenAI.Chat.ChatCompletionTool[] = [];
784
+
785
+ for (const tool of geminiTools) {
786
+ let actualTool: Tool;
787
+
788
+ // Handle CallableTool vs Tool
789
+ if ('tool' in tool) {
790
+ // This is a CallableTool
791
+ actualTool = await (tool as CallableTool).tool();
792
+ } else {
793
+ // This is already a Tool
794
+ actualTool = tool as Tool;
795
+ }
796
+
797
+ if (actualTool.functionDeclarations) {
798
+ for (const func of actualTool.functionDeclarations) {
799
+ if (func.name && func.description) {
800
+ let parameters: Record<string, unknown> | undefined;
801
+
802
+ // Handle both Gemini tools (parameters) and MCP tools (parametersJsonSchema)
803
+ if (func.parametersJsonSchema) {
804
+ // MCP tool format - use parametersJsonSchema directly
805
+ if (func.parametersJsonSchema) {
806
+ // Create a shallow copy to avoid mutating the original object
807
+ const paramsCopy = {
808
+ ...(func.parametersJsonSchema as Record<string, unknown>),
809
+ };
810
+ parameters = paramsCopy;
811
+ }
812
+ } else if (func.parameters) {
813
+ // Gemini tool format - convert parameters to OpenAI format
814
+ parameters = this.convertGeminiParametersToOpenAI(
815
+ func.parameters as Record<string, unknown>,
816
+ );
817
+ }
818
+
819
+ openAITools.push({
820
+ type: 'function',
821
+ function: {
822
+ name: func.name,
823
+ description: func.description,
824
+ parameters,
825
+ },
826
+ });
827
+ }
828
+ }
829
+ }
830
+ }
831
+
832
+ // console.log(
833
+ // 'OpenAI Tools Parameters:',
834
+ // JSON.stringify(openAITools, null, 2),
835
+ // );
836
+ return openAITools;
837
+ }
838
+
839
+ private convertToOpenAIFormat(
840
+ request: GenerateContentParameters,
841
+ ): OpenAI.Chat.ChatCompletionMessageParam[] {
842
+ const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];
843
+
844
+ // Handle system instruction from config
845
+ if (request.config?.systemInstruction) {
846
+ const systemInstruction = request.config.systemInstruction;
847
+ let systemText = '';
848
+
849
+ if (Array.isArray(systemInstruction)) {
850
+ systemText = systemInstruction
851
+ .map((content) => {
852
+ if (typeof content === 'string') return content;
853
+ if ('parts' in content) {
854
+ const contentObj = content as Content;
855
+ return (
856
+ contentObj.parts
857
+ ?.map((p: Part) =>
858
+ typeof p === 'string' ? p : 'text' in p ? p.text : '',
859
+ )
860
+ .join('\n') || ''
861
+ );
862
+ }
863
+ return '';
864
+ })
865
+ .join('\n');
866
+ } else if (typeof systemInstruction === 'string') {
867
+ systemText = systemInstruction;
868
+ } else if (
869
+ typeof systemInstruction === 'object' &&
870
+ 'parts' in systemInstruction
871
+ ) {
872
+ const systemContent = systemInstruction as Content;
873
+ systemText =
874
+ systemContent.parts
875
+ ?.map((p: Part) =>
876
+ typeof p === 'string' ? p : 'text' in p ? p.text : '',
877
+ )
878
+ .join('\n') || '';
879
+ }
880
+
881
+ if (systemText) {
882
+ messages.push({
883
+ role: 'system' as const,
884
+ content: systemText,
885
+ });
886
+ }
887
+ }
888
+
889
+ // Handle contents
890
+ if (Array.isArray(request.contents)) {
891
+ for (const content of request.contents) {
892
+ if (typeof content === 'string') {
893
+ messages.push({ role: 'user' as const, content });
894
+ } else if ('role' in content && 'parts' in content) {
895
+ // Check if this content has function calls or responses
896
+ const functionCalls: FunctionCall[] = [];
897
+ const functionResponses: FunctionResponse[] = [];
898
+ const textParts: string[] = [];
899
+
900
+ for (const part of content.parts || []) {
901
+ if (typeof part === 'string') {
902
+ textParts.push(part);
903
+ } else if ('text' in part && part.text) {
904
+ textParts.push(part.text);
905
+ } else if ('functionCall' in part && part.functionCall) {
906
+ functionCalls.push(part.functionCall);
907
+ } else if ('functionResponse' in part && part.functionResponse) {
908
+ functionResponses.push(part.functionResponse);
909
+ }
910
+ }
911
+
912
+ // Handle function responses (tool results)
913
+ if (functionResponses.length > 0) {
914
+ for (const funcResponse of functionResponses) {
915
+ messages.push({
916
+ role: 'tool' as const,
917
+ tool_call_id: funcResponse.id || '',
918
+ content:
919
+ typeof funcResponse.response === 'string'
920
+ ? funcResponse.response
921
+ : JSON.stringify(funcResponse.response),
922
+ });
923
+ }
924
+ }
925
+ // Handle model messages with function calls
926
+ else if (content.role === 'model' && functionCalls.length > 0) {
927
+ const toolCalls = functionCalls.map((fc, index) => ({
928
+ id: fc.id || `call_${index}`,
929
+ type: 'function' as const,
930
+ function: {
931
+ name: fc.name || '',
932
+ arguments: JSON.stringify(fc.args || {}),
933
+ },
934
+ }));
935
+
936
+ messages.push({
937
+ role: 'assistant' as const,
938
+ content: textParts.join('') || null,
939
+ tool_calls: toolCalls,
940
+ });
941
+ }
942
+ // Handle regular text messages
943
+ else {
944
+ const role =
945
+ content.role === 'model'
946
+ ? ('assistant' as const)
947
+ : ('user' as const);
948
+ const text = textParts.join('');
949
+ if (text) {
950
+ messages.push({ role, content: text });
951
+ }
952
+ }
953
+ }
954
+ }
955
+ } else if (request.contents) {
956
+ if (typeof request.contents === 'string') {
957
+ messages.push({ role: 'user' as const, content: request.contents });
958
+ } else if ('role' in request.contents && 'parts' in request.contents) {
959
+ const content = request.contents;
960
+ const role =
961
+ content.role === 'model' ? ('assistant' as const) : ('user' as const);
962
+ const text =
963
+ content.parts
964
+ ?.map((p: Part) =>
965
+ typeof p === 'string' ? p : 'text' in p ? p.text : '',
966
+ )
967
+ .join('\n') || '';
968
+ messages.push({ role, content: text });
969
+ }
970
+ }
971
+
972
+ // Clean up orphaned tool calls and merge consecutive assistant messages
973
+ const cleanedMessages = this.cleanOrphanedToolCalls(messages);
974
+ const mergedMessages =
975
+ this.mergeConsecutiveAssistantMessages(cleanedMessages);
976
+
977
+ return mergedMessages;
978
+ }
979
+
980
+ /**
981
+ * Add cache control flag to specified message(s) for DashScope providers
982
+ */
983
+ private addDashScopeCacheControl(
984
+ messages: OpenAI.Chat.ChatCompletionMessageParam[],
985
+ target: 'system' | 'last' | 'both' = 'both',
986
+ ): OpenAI.Chat.ChatCompletionMessageParam[] {
987
+ if (!this.isDashScopeProvider() || messages.length === 0) {
988
+ return messages;
989
+ }
990
+
991
+ let updatedMessages = [...messages];
992
+
993
+ // Add cache control to system message if requested
994
+ if (target === 'system' || target === 'both') {
995
+ updatedMessages = this.addCacheControlToMessage(
996
+ updatedMessages,
997
+ 'system',
998
+ );
999
+ }
1000
+
1001
+ // Add cache control to last message if requested
1002
+ if (target === 'last' || target === 'both') {
1003
+ updatedMessages = this.addCacheControlToMessage(updatedMessages, 'last');
1004
+ }
1005
+
1006
+ return updatedMessages;
1007
+ }
1008
+
1009
+ /**
1010
+ * Helper method to add cache control to a specific message
1011
+ */
1012
+ private addCacheControlToMessage(
1013
+ messages: OpenAI.Chat.ChatCompletionMessageParam[],
1014
+ target: 'system' | 'last',
1015
+ ): OpenAI.Chat.ChatCompletionMessageParam[] {
1016
+ const updatedMessages = [...messages];
1017
+ let messageIndex: number;
1018
+
1019
+ if (target === 'system') {
1020
+ // Find the first system message
1021
+ messageIndex = messages.findIndex((msg) => msg.role === 'system');
1022
+ if (messageIndex === -1) {
1023
+ return updatedMessages;
1024
+ }
1025
+ } else {
1026
+ // Get the last message
1027
+ messageIndex = messages.length - 1;
1028
+ }
1029
+
1030
+ const message = updatedMessages[messageIndex];
1031
+
1032
+ // Only process messages that have content
1033
+ if ('content' in message && message.content !== null) {
1034
+ if (typeof message.content === 'string') {
1035
+ // Convert string content to array format with cache control
1036
+ const messageWithArrayContent = {
1037
+ ...message,
1038
+ content: [
1039
+ {
1040
+ type: 'text',
1041
+ text: message.content,
1042
+ cache_control: { type: 'ephemeral' },
1043
+ } as ChatCompletionContentPartTextWithCache,
1044
+ ],
1045
+ };
1046
+ updatedMessages[messageIndex] =
1047
+ messageWithArrayContent as OpenAI.Chat.ChatCompletionMessageParam;
1048
+ } else if (Array.isArray(message.content)) {
1049
+ // If content is already an array, add cache_control to the last item
1050
+ const contentArray = [
1051
+ ...message.content,
1052
+ ] as ChatCompletionContentPartWithCache[];
1053
+ if (contentArray.length > 0) {
1054
+ const lastItem = contentArray[contentArray.length - 1];
1055
+ if (lastItem.type === 'text') {
1056
+ // Add cache_control to the last text item
1057
+ contentArray[contentArray.length - 1] = {
1058
+ ...lastItem,
1059
+ cache_control: { type: 'ephemeral' },
1060
+ } as ChatCompletionContentPartTextWithCache;
1061
+ } else {
1062
+ // If the last item is not text, add a new text item with cache_control
1063
+ contentArray.push({
1064
+ type: 'text',
1065
+ text: '',
1066
+ cache_control: { type: 'ephemeral' },
1067
+ } as ChatCompletionContentPartTextWithCache);
1068
+ }
1069
+
1070
+ const messageWithCache = {
1071
+ ...message,
1072
+ content: contentArray,
1073
+ };
1074
+ updatedMessages[messageIndex] =
1075
+ messageWithCache as OpenAI.Chat.ChatCompletionMessageParam;
1076
+ }
1077
+ }
1078
+ }
1079
+
1080
+ return updatedMessages;
1081
+ }
1082
+
1083
+ /**
1084
+ * Clean up orphaned tool calls from message history to prevent OpenAI API errors
1085
+ */
1086
+ private cleanOrphanedToolCalls(
1087
+ messages: OpenAI.Chat.ChatCompletionMessageParam[],
1088
+ ): OpenAI.Chat.ChatCompletionMessageParam[] {
1089
+ const cleaned: OpenAI.Chat.ChatCompletionMessageParam[] = [];
1090
+ const toolCallIds = new Set<string>();
1091
+ const toolResponseIds = new Set<string>();
1092
+
1093
+ // First pass: collect all tool call IDs and tool response IDs
1094
+ for (const message of messages) {
1095
+ if (
1096
+ message.role === 'assistant' &&
1097
+ 'tool_calls' in message &&
1098
+ message.tool_calls
1099
+ ) {
1100
+ for (const toolCall of message.tool_calls) {
1101
+ if (toolCall.id) {
1102
+ toolCallIds.add(toolCall.id);
1103
+ }
1104
+ }
1105
+ } else if (
1106
+ message.role === 'tool' &&
1107
+ 'tool_call_id' in message &&
1108
+ message.tool_call_id
1109
+ ) {
1110
+ toolResponseIds.add(message.tool_call_id);
1111
+ }
1112
+ }
1113
+
1114
+ // Second pass: filter out orphaned messages
1115
+ for (const message of messages) {
1116
+ if (
1117
+ message.role === 'assistant' &&
1118
+ 'tool_calls' in message &&
1119
+ message.tool_calls
1120
+ ) {
1121
+ // Filter out tool calls that don't have corresponding responses
1122
+ const validToolCalls = message.tool_calls.filter(
1123
+ (toolCall) => toolCall.id && toolResponseIds.has(toolCall.id),
1124
+ );
1125
+
1126
+ if (validToolCalls.length > 0) {
1127
+ // Keep the message but only with valid tool calls
1128
+ const cleanedMessage = { ...message };
1129
+ (
1130
+ cleanedMessage as OpenAI.Chat.ChatCompletionMessageParam & {
1131
+ tool_calls?: OpenAI.Chat.ChatCompletionMessageToolCall[];
1132
+ }
1133
+ ).tool_calls = validToolCalls;
1134
+ cleaned.push(cleanedMessage);
1135
+ } else if (
1136
+ typeof message.content === 'string' &&
1137
+ message.content.trim()
1138
+ ) {
1139
+ // Keep the message if it has text content, but remove tool calls
1140
+ const cleanedMessage = { ...message };
1141
+ delete (
1142
+ cleanedMessage as OpenAI.Chat.ChatCompletionMessageParam & {
1143
+ tool_calls?: OpenAI.Chat.ChatCompletionMessageToolCall[];
1144
+ }
1145
+ ).tool_calls;
1146
+ cleaned.push(cleanedMessage);
1147
+ }
1148
+ // If no valid tool calls and no content, skip the message entirely
1149
+ } else if (
1150
+ message.role === 'tool' &&
1151
+ 'tool_call_id' in message &&
1152
+ message.tool_call_id
1153
+ ) {
1154
+ // Only keep tool responses that have corresponding tool calls
1155
+ if (toolCallIds.has(message.tool_call_id)) {
1156
+ cleaned.push(message);
1157
+ }
1158
+ } else {
1159
+ // Keep all other messages as-is
1160
+ cleaned.push(message);
1161
+ }
1162
+ }
1163
+
1164
+ // Final validation: ensure every assistant message with tool_calls has corresponding tool responses
1165
+ const finalCleaned: OpenAI.Chat.ChatCompletionMessageParam[] = [];
1166
+ const finalToolCallIds = new Set<string>();
1167
+
1168
+ // Collect all remaining tool call IDs
1169
+ for (const message of cleaned) {
1170
+ if (
1171
+ message.role === 'assistant' &&
1172
+ 'tool_calls' in message &&
1173
+ message.tool_calls
1174
+ ) {
1175
+ for (const toolCall of message.tool_calls) {
1176
+ if (toolCall.id) {
1177
+ finalToolCallIds.add(toolCall.id);
1178
+ }
1179
+ }
1180
+ }
1181
+ }
1182
+
1183
+ // Verify all tool calls have responses
1184
+ const finalToolResponseIds = new Set<string>();
1185
+ for (const message of cleaned) {
1186
+ if (
1187
+ message.role === 'tool' &&
1188
+ 'tool_call_id' in message &&
1189
+ message.tool_call_id
1190
+ ) {
1191
+ finalToolResponseIds.add(message.tool_call_id);
1192
+ }
1193
+ }
1194
+
1195
+ // Remove any remaining orphaned tool calls
1196
+ for (const message of cleaned) {
1197
+ if (
1198
+ message.role === 'assistant' &&
1199
+ 'tool_calls' in message &&
1200
+ message.tool_calls
1201
+ ) {
1202
+ const finalValidToolCalls = message.tool_calls.filter(
1203
+ (toolCall) => toolCall.id && finalToolResponseIds.has(toolCall.id),
1204
+ );
1205
+
1206
+ if (finalValidToolCalls.length > 0) {
1207
+ const cleanedMessage = { ...message };
1208
+ (
1209
+ cleanedMessage as OpenAI.Chat.ChatCompletionMessageParam & {
1210
+ tool_calls?: OpenAI.Chat.ChatCompletionMessageToolCall[];
1211
+ }
1212
+ ).tool_calls = finalValidToolCalls;
1213
+ finalCleaned.push(cleanedMessage);
1214
+ } else if (
1215
+ typeof message.content === 'string' &&
1216
+ message.content.trim()
1217
+ ) {
1218
+ const cleanedMessage = { ...message };
1219
+ delete (
1220
+ cleanedMessage as OpenAI.Chat.ChatCompletionMessageParam & {
1221
+ tool_calls?: OpenAI.Chat.ChatCompletionMessageToolCall[];
1222
+ }
1223
+ ).tool_calls;
1224
+ finalCleaned.push(cleanedMessage);
1225
+ }
1226
+ } else {
1227
+ finalCleaned.push(message);
1228
+ }
1229
+ }
1230
+
1231
+ return finalCleaned;
1232
+ }
1233
+
1234
+ /**
1235
+ * Merge consecutive assistant messages to combine split text and tool calls
1236
+ */
1237
+ private mergeConsecutiveAssistantMessages(
1238
+ messages: OpenAI.Chat.ChatCompletionMessageParam[],
1239
+ ): OpenAI.Chat.ChatCompletionMessageParam[] {
1240
+ const merged: OpenAI.Chat.ChatCompletionMessageParam[] = [];
1241
+
1242
+ for (const message of messages) {
1243
+ if (message.role === 'assistant' && merged.length > 0) {
1244
+ const lastMessage = merged[merged.length - 1];
1245
+
1246
+ // If the last message is also an assistant message, merge them
1247
+ if (lastMessage.role === 'assistant') {
1248
+ // Combine content
1249
+ const combinedContent = [
1250
+ typeof lastMessage.content === 'string' ? lastMessage.content : '',
1251
+ typeof message.content === 'string' ? message.content : '',
1252
+ ]
1253
+ .filter(Boolean)
1254
+ .join('');
1255
+
1256
+ // Combine tool calls
1257
+ const lastToolCalls =
1258
+ 'tool_calls' in lastMessage ? lastMessage.tool_calls || [] : [];
1259
+ const currentToolCalls =
1260
+ 'tool_calls' in message ? message.tool_calls || [] : [];
1261
+ const combinedToolCalls = [...lastToolCalls, ...currentToolCalls];
1262
+
1263
+ // Update the last message with combined data
1264
+ (
1265
+ lastMessage as OpenAI.Chat.ChatCompletionMessageParam & {
1266
+ content: string | null;
1267
+ tool_calls?: OpenAI.Chat.ChatCompletionMessageToolCall[];
1268
+ }
1269
+ ).content = combinedContent || null;
1270
+ if (combinedToolCalls.length > 0) {
1271
+ (
1272
+ lastMessage as OpenAI.Chat.ChatCompletionMessageParam & {
1273
+ content: string | null;
1274
+ tool_calls?: OpenAI.Chat.ChatCompletionMessageToolCall[];
1275
+ }
1276
+ ).tool_calls = combinedToolCalls;
1277
+ }
1278
+
1279
+ continue; // Skip adding the current message since it's been merged
1280
+ }
1281
+ }
1282
+
1283
+ // Add the message as-is if no merging is needed
1284
+ merged.push(message);
1285
+ }
1286
+
1287
+ return merged;
1288
+ }
1289
+
1290
+ private convertToGeminiFormat(
1291
+ openaiResponse: OpenAI.Chat.ChatCompletion,
1292
+ ): GenerateContentResponse {
1293
+ const choice = openaiResponse.choices[0];
1294
+ const response = new GenerateContentResponse();
1295
+
1296
+ const parts: Part[] = [];
1297
+
1298
+ // Handle text content
1299
+ if (choice.message.content) {
1300
+ parts.push({ text: choice.message.content });
1301
+ }
1302
+
1303
+ // Handle tool calls
1304
+ if (choice.message.tool_calls) {
1305
+ for (const toolCall of choice.message.tool_calls) {
1306
+ if (toolCall.function) {
1307
+ let args: Record<string, unknown> = {};
1308
+ if (toolCall.function.arguments) {
1309
+ args = safeJsonParse(toolCall.function.arguments, {});
1310
+ }
1311
+
1312
+ parts.push({
1313
+ functionCall: {
1314
+ id: toolCall.id,
1315
+ name: toolCall.function.name,
1316
+ args,
1317
+ },
1318
+ });
1319
+ }
1320
+ }
1321
+ }
1322
+
1323
+ response.responseId = openaiResponse.id;
1324
+ response.createTime = openaiResponse.created
1325
+ ? openaiResponse.created.toString()
1326
+ : new Date().getTime().toString();
1327
+
1328
+ response.candidates = [
1329
+ {
1330
+ content: {
1331
+ parts,
1332
+ role: 'model' as const,
1333
+ },
1334
+ finishReason: this.mapFinishReason(choice.finish_reason || 'stop'),
1335
+ index: 0,
1336
+ safetyRatings: [],
1337
+ },
1338
+ ];
1339
+
1340
+ response.modelVersion = this.model;
1341
+ response.promptFeedback = { safetyRatings: [] };
1342
+
1343
+ // Add usage metadata if available
1344
+ if (openaiResponse.usage) {
1345
+ const usage = openaiResponse.usage as OpenAIUsage;
1346
+
1347
+ const promptTokens = usage.prompt_tokens || 0;
1348
+ const completionTokens = usage.completion_tokens || 0;
1349
+ const totalTokens = usage.total_tokens || 0;
1350
+ const cachedTokens = usage.prompt_tokens_details?.cached_tokens || 0;
1351
+
1352
+ // If we only have total tokens but no breakdown, estimate the split
1353
+ // Typically input is ~70% and output is ~30% for most conversations
1354
+ let finalPromptTokens = promptTokens;
1355
+ let finalCompletionTokens = completionTokens;
1356
+
1357
+ if (totalTokens > 0 && promptTokens === 0 && completionTokens === 0) {
1358
+ // Estimate: assume 70% input, 30% output
1359
+ finalPromptTokens = Math.round(totalTokens * 0.7);
1360
+ finalCompletionTokens = Math.round(totalTokens * 0.3);
1361
+ }
1362
+
1363
+ response.usageMetadata = {
1364
+ promptTokenCount: finalPromptTokens,
1365
+ candidatesTokenCount: finalCompletionTokens,
1366
+ totalTokenCount: totalTokens,
1367
+ cachedContentTokenCount: cachedTokens,
1368
+ };
1369
+ }
1370
+
1371
+ return response;
1372
+ }
1373
+
1374
+ private convertStreamChunkToGeminiFormat(
1375
+ chunk: OpenAI.Chat.ChatCompletionChunk,
1376
+ ): GenerateContentResponse {
1377
+ const choice = chunk.choices?.[0];
1378
+ const response = new GenerateContentResponse();
1379
+
1380
+ if (choice) {
1381
+ const parts: Part[] = [];
1382
+
1383
+ // Handle text content
1384
+ if (choice.delta?.content) {
1385
+ if (typeof choice.delta.content === 'string') {
1386
+ parts.push({ text: choice.delta.content });
1387
+ }
1388
+ }
1389
+
1390
+ // Handle tool calls - only accumulate during streaming, emit when complete
1391
+ if (choice.delta?.tool_calls) {
1392
+ for (const toolCall of choice.delta.tool_calls) {
1393
+ const index = toolCall.index ?? 0;
1394
+
1395
+ // Get or create the tool call accumulator for this index
1396
+ let accumulatedCall = this.streamingToolCalls.get(index);
1397
+ if (!accumulatedCall) {
1398
+ accumulatedCall = { arguments: '' };
1399
+ this.streamingToolCalls.set(index, accumulatedCall);
1400
+ }
1401
+
1402
+ // Update accumulated data
1403
+ if (toolCall.id) {
1404
+ accumulatedCall.id = toolCall.id;
1405
+ }
1406
+ if (toolCall.function?.name) {
1407
+ // If this is a new function name, reset the arguments
1408
+ if (accumulatedCall.name !== toolCall.function.name) {
1409
+ accumulatedCall.arguments = '';
1410
+ }
1411
+ accumulatedCall.name = toolCall.function.name;
1412
+ }
1413
+ if (toolCall.function?.arguments) {
1414
+ // Check if we already have a complete JSON object
1415
+ const currentArgs = accumulatedCall.arguments;
1416
+ const newArgs = toolCall.function.arguments;
1417
+
1418
+ // If current arguments already form a complete JSON and new arguments start a new object,
1419
+ // this indicates a new tool call with the same name
1420
+ let shouldReset = false;
1421
+ if (currentArgs && newArgs.trim().startsWith('{')) {
1422
+ try {
1423
+ JSON.parse(currentArgs);
1424
+ // If we can parse current arguments as complete JSON and new args start with {,
1425
+ // this is likely a new tool call
1426
+ shouldReset = true;
1427
+ } catch {
1428
+ // Current arguments are not complete JSON, continue accumulating
1429
+ }
1430
+ }
1431
+
1432
+ if (shouldReset) {
1433
+ accumulatedCall.arguments = newArgs;
1434
+ } else {
1435
+ accumulatedCall.arguments += newArgs;
1436
+ }
1437
+ }
1438
+ }
1439
+ }
1440
+
1441
+ // Only emit function calls when streaming is complete (finish_reason is present)
1442
+ if (choice.finish_reason) {
1443
+ for (const [, accumulatedCall] of this.streamingToolCalls) {
1444
+ // TODO: Add back id once we have a way to generate tool_call_id from the VLLM parser.
1445
+ // if (accumulatedCall.id && accumulatedCall.name) {
1446
+ if (accumulatedCall.name) {
1447
+ let args: Record<string, unknown> = {};
1448
+ if (accumulatedCall.arguments) {
1449
+ args = safeJsonParse(accumulatedCall.arguments, {});
1450
+ }
1451
+
1452
+ parts.push({
1453
+ functionCall: {
1454
+ id:
1455
+ accumulatedCall.id ||
1456
+ `call_${Date.now()}_${Math.random().toString(36).substring(2, 9)}`,
1457
+ name: accumulatedCall.name,
1458
+ args,
1459
+ },
1460
+ });
1461
+ }
1462
+ }
1463
+ // Clear all accumulated tool calls
1464
+ this.streamingToolCalls.clear();
1465
+ }
1466
+
1467
+ response.candidates = [
1468
+ {
1469
+ content: {
1470
+ parts,
1471
+ role: 'model' as const,
1472
+ },
1473
+ finishReason: choice.finish_reason
1474
+ ? this.mapFinishReason(choice.finish_reason)
1475
+ : FinishReason.FINISH_REASON_UNSPECIFIED,
1476
+ index: 0,
1477
+ safetyRatings: [],
1478
+ },
1479
+ ];
1480
+ } else {
1481
+ response.candidates = [];
1482
+ }
1483
+
1484
+ response.responseId = chunk.id;
1485
+ response.createTime = chunk.created
1486
+ ? chunk.created.toString()
1487
+ : new Date().getTime().toString();
1488
+
1489
+ response.modelVersion = this.model;
1490
+ response.promptFeedback = { safetyRatings: [] };
1491
+
1492
+ // Add usage metadata if available in the chunk
1493
+ if (chunk.usage) {
1494
+ const usage = chunk.usage as OpenAIUsage;
1495
+
1496
+ const promptTokens = usage.prompt_tokens || 0;
1497
+ const completionTokens = usage.completion_tokens || 0;
1498
+ const totalTokens = usage.total_tokens || 0;
1499
+ const cachedTokens = usage.prompt_tokens_details?.cached_tokens || 0;
1500
+
1501
+ // If we only have total tokens but no breakdown, estimate the split
1502
+ // Typically input is ~70% and output is ~30% for most conversations
1503
+ let finalPromptTokens = promptTokens;
1504
+ let finalCompletionTokens = completionTokens;
1505
+
1506
+ if (totalTokens > 0 && promptTokens === 0 && completionTokens === 0) {
1507
+ // Estimate: assume 70% input, 30% output
1508
+ finalPromptTokens = Math.round(totalTokens * 0.7);
1509
+ finalCompletionTokens = Math.round(totalTokens * 0.3);
1510
+ }
1511
+
1512
+ response.usageMetadata = {
1513
+ promptTokenCount: finalPromptTokens,
1514
+ candidatesTokenCount: finalCompletionTokens,
1515
+ totalTokenCount: totalTokens,
1516
+ cachedContentTokenCount: cachedTokens,
1517
+ };
1518
+ }
1519
+
1520
+ return response;
1521
+ }
1522
+
1523
+ /**
1524
+ * Build sampling parameters with clear priority:
1525
+ * 1. Config-level sampling parameters (highest priority)
1526
+ * 2. Request-level parameters (medium priority)
1527
+ * 3. Default values (lowest priority)
1528
+ */
1529
+ private buildSamplingParameters(
1530
+ request: GenerateContentParameters,
1531
+ ): Record<string, unknown> {
1532
+ const configSamplingParams = this.contentGeneratorConfig.samplingParams;
1533
+
1534
+ const params = {
1535
+ // Temperature: config > request > default
1536
+ temperature:
1537
+ configSamplingParams?.temperature !== undefined
1538
+ ? configSamplingParams.temperature
1539
+ : request.config?.temperature !== undefined
1540
+ ? request.config.temperature
1541
+ : 0.0,
1542
+
1543
+ // Max tokens: config > request > undefined
1544
+ ...(configSamplingParams?.max_tokens !== undefined
1545
+ ? { max_tokens: configSamplingParams.max_tokens }
1546
+ : request.config?.maxOutputTokens !== undefined
1547
+ ? { max_tokens: request.config.maxOutputTokens }
1548
+ : {}),
1549
+
1550
+ // Top-p: config > request > default
1551
+ top_p:
1552
+ configSamplingParams?.top_p !== undefined
1553
+ ? configSamplingParams.top_p
1554
+ : request.config?.topP !== undefined
1555
+ ? request.config.topP
1556
+ : 1.0,
1557
+
1558
+ // Top-k: config only (not available in request)
1559
+ ...(configSamplingParams?.top_k !== undefined
1560
+ ? { top_k: configSamplingParams.top_k }
1561
+ : {}),
1562
+
1563
+ // Repetition penalty: config only
1564
+ ...(configSamplingParams?.repetition_penalty !== undefined
1565
+ ? { repetition_penalty: configSamplingParams.repetition_penalty }
1566
+ : {}),
1567
+
1568
+ // Presence penalty: config only
1569
+ ...(configSamplingParams?.presence_penalty !== undefined
1570
+ ? { presence_penalty: configSamplingParams.presence_penalty }
1571
+ : {}),
1572
+
1573
+ // Frequency penalty: config only
1574
+ ...(configSamplingParams?.frequency_penalty !== undefined
1575
+ ? { frequency_penalty: configSamplingParams.frequency_penalty }
1576
+ : {}),
1577
+ };
1578
+
1579
+ return params;
1580
+ }
1581
+
1582
+ private mapFinishReason(openaiReason: string | null): FinishReason {
1583
+ if (!openaiReason) return FinishReason.FINISH_REASON_UNSPECIFIED;
1584
+ const mapping: Record<string, FinishReason> = {
1585
+ stop: FinishReason.STOP,
1586
+ length: FinishReason.MAX_TOKENS,
1587
+ content_filter: FinishReason.SAFETY,
1588
+ function_call: FinishReason.STOP,
1589
+ tool_calls: FinishReason.STOP,
1590
+ };
1591
+ return mapping[openaiReason] || FinishReason.FINISH_REASON_UNSPECIFIED;
1592
+ }
1593
+
1594
+ /**
1595
+ * Convert Gemini response format to OpenAI chat completion format for logging
1596
+ */
1597
+ private convertGeminiResponseToOpenAI(
1598
+ response: GenerateContentResponse,
1599
+ ): OpenAIResponseFormat {
1600
+ const candidate = response.candidates?.[0];
1601
+ const content = candidate?.content;
1602
+
1603
+ let messageContent: string | null = null;
1604
+ const toolCalls: OpenAIToolCall[] = [];
1605
+
1606
+ if (content?.parts) {
1607
+ const textParts: string[] = [];
1608
+
1609
+ for (const part of content.parts) {
1610
+ if ('text' in part && part.text) {
1611
+ textParts.push(part.text);
1612
+ } else if ('functionCall' in part && part.functionCall) {
1613
+ toolCalls.push({
1614
+ id: part.functionCall.id || `call_${toolCalls.length}`,
1615
+ type: 'function' as const,
1616
+ function: {
1617
+ name: part.functionCall.name || '',
1618
+ arguments: JSON.stringify(part.functionCall.args || {}),
1619
+ },
1620
+ });
1621
+ }
1622
+ }
1623
+
1624
+ messageContent = textParts.join('').trimEnd();
1625
+ }
1626
+
1627
+ const choice: OpenAIChoice = {
1628
+ index: 0,
1629
+ message: {
1630
+ role: 'assistant',
1631
+ content: messageContent,
1632
+ },
1633
+ finish_reason: this.mapGeminiFinishReasonToOpenAI(
1634
+ candidate?.finishReason,
1635
+ ),
1636
+ };
1637
+
1638
+ if (toolCalls.length > 0) {
1639
+ choice.message.tool_calls = toolCalls;
1640
+ }
1641
+
1642
+ const openaiResponse: OpenAIResponseFormat = {
1643
+ id: response.responseId || `chatcmpl-${Date.now()}`,
1644
+ object: 'chat.completion',
1645
+ created: response.createTime
1646
+ ? Number(response.createTime)
1647
+ : Math.floor(Date.now() / 1000),
1648
+ model: this.model,
1649
+ choices: [choice],
1650
+ };
1651
+
1652
+ // Add usage metadata if available
1653
+ if (response.usageMetadata) {
1654
+ openaiResponse.usage = {
1655
+ prompt_tokens: response.usageMetadata.promptTokenCount || 0,
1656
+ completion_tokens: response.usageMetadata.candidatesTokenCount || 0,
1657
+ total_tokens: response.usageMetadata.totalTokenCount || 0,
1658
+ };
1659
+
1660
+ if (response.usageMetadata.cachedContentTokenCount) {
1661
+ openaiResponse.usage.prompt_tokens_details = {
1662
+ cached_tokens: response.usageMetadata.cachedContentTokenCount,
1663
+ };
1664
+ }
1665
+ }
1666
+
1667
+ return openaiResponse;
1668
+ }
1669
+
1670
+ /**
1671
+ * Map Gemini finish reasons to OpenAI finish reasons
1672
+ */
1673
+ private mapGeminiFinishReasonToOpenAI(geminiReason?: unknown): string {
1674
+ if (!geminiReason) return 'stop';
1675
+
1676
+ switch (geminiReason) {
1677
+ case 'STOP':
1678
+ case 1: // FinishReason.STOP
1679
+ return 'stop';
1680
+ case 'MAX_TOKENS':
1681
+ case 2: // FinishReason.MAX_TOKENS
1682
+ return 'length';
1683
+ case 'SAFETY':
1684
+ case 3: // FinishReason.SAFETY
1685
+ return 'content_filter';
1686
+ case 'RECITATION':
1687
+ case 4: // FinishReason.RECITATION
1688
+ return 'content_filter';
1689
+ case 'OTHER':
1690
+ case 5: // FinishReason.OTHER
1691
+ return 'stop';
1692
+ default:
1693
+ return 'stop';
1694
+ }
1695
+ }
1696
+ }
projects/ui/qwen-code/packages/core/src/core/prompts.test.ts ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { describe, it, expect, vi, beforeEach } from 'vitest';
8
+ import { getCoreSystemPrompt } from './prompts.js';
9
+ import { isGitRepository } from '../utils/gitUtils.js';
10
+ import fs from 'node:fs';
11
+ import os from 'node:os';
12
+ import path from 'node:path';
13
+ import { GEMINI_CONFIG_DIR } from '../tools/memoryTool.js';
14
+
15
+ // Mock tool names if they are dynamically generated or complex
16
+ vi.mock('../tools/ls', () => ({ LSTool: { Name: 'list_directory' } }));
17
+ vi.mock('../tools/edit', () => ({ EditTool: { Name: 'replace' } }));
18
+ vi.mock('../tools/glob', () => ({ GlobTool: { Name: 'glob' } }));
19
+ vi.mock('../tools/grep', () => ({ GrepTool: { Name: 'search_file_content' } }));
20
+ vi.mock('../tools/read-file', () => ({ ReadFileTool: { Name: 'read_file' } }));
21
+ vi.mock('../tools/read-many-files', () => ({
22
+ ReadManyFilesTool: { Name: 'read_many_files' },
23
+ }));
24
+ vi.mock('../tools/shell', () => ({
25
+ ShellTool: { Name: 'run_shell_command' },
26
+ }));
27
+ vi.mock('../tools/write-file', () => ({
28
+ WriteFileTool: { Name: 'write_file' },
29
+ }));
30
+ vi.mock('../utils/gitUtils', () => ({
31
+ isGitRepository: vi.fn(),
32
+ }));
33
+ vi.mock('node:fs');
34
+
35
+ describe('Core System Prompt (prompts.ts)', () => {
36
+ beforeEach(() => {
37
+ vi.resetAllMocks();
38
+ vi.stubEnv('GEMINI_SYSTEM_MD', undefined);
39
+ vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', undefined);
40
+ });
41
+
42
+ it('should return the base prompt when no userMemory is provided', () => {
43
+ vi.stubEnv('SANDBOX', undefined);
44
+ const prompt = getCoreSystemPrompt();
45
+ expect(prompt).not.toContain('---\n\n'); // Separator should not be present
46
+ expect(prompt).toContain('You are Qwen Code, an interactive CLI agent'); // Check for core content
47
+ expect(prompt).toMatchSnapshot(); // Use snapshot for base prompt structure
48
+ });
49
+
50
+ it('should return the base prompt when userMemory is empty string', () => {
51
+ vi.stubEnv('SANDBOX', undefined);
52
+ const prompt = getCoreSystemPrompt('');
53
+ expect(prompt).not.toContain('---\n\n');
54
+ expect(prompt).toContain('You are Qwen Code, an interactive CLI agent');
55
+ expect(prompt).toMatchSnapshot();
56
+ });
57
+
58
+ it('should return the base prompt when userMemory is whitespace only', () => {
59
+ vi.stubEnv('SANDBOX', undefined);
60
+ const prompt = getCoreSystemPrompt(' \n \t ');
61
+ expect(prompt).not.toContain('---\n\n');
62
+ expect(prompt).toContain('You are Qwen Code, an interactive CLI agent');
63
+ expect(prompt).toMatchSnapshot();
64
+ });
65
+
66
+ it('should append userMemory with separator when provided', () => {
67
+ vi.stubEnv('SANDBOX', undefined);
68
+ const memory = 'This is custom user memory.\nBe extra polite.';
69
+ const expectedSuffix = `\n\n---\n\n${memory}`;
70
+ const prompt = getCoreSystemPrompt(memory);
71
+
72
+ expect(prompt.endsWith(expectedSuffix)).toBe(true);
73
+ expect(prompt).toContain('You are Qwen Code, an interactive CLI agent'); // Ensure base prompt follows
74
+ expect(prompt).toMatchSnapshot(); // Snapshot the combined prompt
75
+ });
76
+
77
+ it('should include sandbox-specific instructions when SANDBOX env var is set', () => {
78
+ vi.stubEnv('SANDBOX', 'true'); // Generic sandbox value
79
+ const prompt = getCoreSystemPrompt();
80
+ expect(prompt).toContain('# Sandbox');
81
+ expect(prompt).not.toContain('# macOS Seatbelt');
82
+ expect(prompt).not.toContain('# Outside of Sandbox');
83
+ expect(prompt).toMatchSnapshot();
84
+ });
85
+
86
+ it('should include seatbelt-specific instructions when SANDBOX env var is "sandbox-exec"', () => {
87
+ vi.stubEnv('SANDBOX', 'sandbox-exec');
88
+ const prompt = getCoreSystemPrompt();
89
+ expect(prompt).toContain('# macOS Seatbelt');
90
+ expect(prompt).not.toContain('# Sandbox');
91
+ expect(prompt).not.toContain('# Outside of Sandbox');
92
+ expect(prompt).toMatchSnapshot();
93
+ });
94
+
95
+ it('should include non-sandbox instructions when SANDBOX env var is not set', () => {
96
+ vi.stubEnv('SANDBOX', undefined); // Ensure it's not set
97
+ const prompt = getCoreSystemPrompt();
98
+ expect(prompt).toContain('# Outside of Sandbox');
99
+ expect(prompt).not.toContain('# Sandbox');
100
+ expect(prompt).not.toContain('# macOS Seatbelt');
101
+ expect(prompt).toMatchSnapshot();
102
+ });
103
+
104
+ it('should include git instructions when in a git repo', () => {
105
+ vi.stubEnv('SANDBOX', undefined);
106
+ vi.mocked(isGitRepository).mockReturnValue(true);
107
+ const prompt = getCoreSystemPrompt();
108
+ expect(prompt).toContain('# Git Repository');
109
+ expect(prompt).toMatchSnapshot();
110
+ });
111
+
112
+ it('should not include git instructions when not in a git repo', () => {
113
+ vi.stubEnv('SANDBOX', undefined);
114
+ vi.mocked(isGitRepository).mockReturnValue(false);
115
+ const prompt = getCoreSystemPrompt();
116
+ expect(prompt).not.toContain('# Git Repository');
117
+ expect(prompt).toMatchSnapshot();
118
+ });
119
+
120
+ describe('GEMINI_SYSTEM_MD environment variable', () => {
121
+ it('should use default prompt when GEMINI_SYSTEM_MD is "false"', () => {
122
+ vi.stubEnv('GEMINI_SYSTEM_MD', 'false');
123
+ const prompt = getCoreSystemPrompt();
124
+ expect(fs.readFileSync).not.toHaveBeenCalled();
125
+ expect(prompt).not.toContain('custom system prompt');
126
+ });
127
+
128
+ it('should use default prompt when GEMINI_SYSTEM_MD is "0"', () => {
129
+ vi.stubEnv('GEMINI_SYSTEM_MD', '0');
130
+ const prompt = getCoreSystemPrompt();
131
+ expect(fs.readFileSync).not.toHaveBeenCalled();
132
+ expect(prompt).not.toContain('custom system prompt');
133
+ });
134
+
135
+ it('should throw error if GEMINI_SYSTEM_MD points to a non-existent file', () => {
136
+ const customPath = '/non/existent/path/system.md';
137
+ vi.stubEnv('GEMINI_SYSTEM_MD', customPath);
138
+ vi.mocked(fs.existsSync).mockReturnValue(false);
139
+ expect(() => getCoreSystemPrompt()).toThrow(
140
+ `missing system prompt file '${path.resolve(customPath)}'`,
141
+ );
142
+ });
143
+
144
+ it('should read from default path when GEMINI_SYSTEM_MD is "true"', () => {
145
+ const defaultPath = path.resolve(
146
+ path.join(GEMINI_CONFIG_DIR, 'system.md'),
147
+ );
148
+ vi.stubEnv('GEMINI_SYSTEM_MD', 'true');
149
+ vi.mocked(fs.existsSync).mockReturnValue(true);
150
+ vi.mocked(fs.readFileSync).mockReturnValue('custom system prompt');
151
+
152
+ const prompt = getCoreSystemPrompt();
153
+ expect(fs.readFileSync).toHaveBeenCalledWith(defaultPath, 'utf8');
154
+ expect(prompt).toBe('custom system prompt');
155
+ });
156
+
157
+ it('should read from default path when GEMINI_SYSTEM_MD is "1"', () => {
158
+ const defaultPath = path.resolve(
159
+ path.join(GEMINI_CONFIG_DIR, 'system.md'),
160
+ );
161
+ vi.stubEnv('GEMINI_SYSTEM_MD', '1');
162
+ vi.mocked(fs.existsSync).mockReturnValue(true);
163
+ vi.mocked(fs.readFileSync).mockReturnValue('custom system prompt');
164
+
165
+ const prompt = getCoreSystemPrompt();
166
+ expect(fs.readFileSync).toHaveBeenCalledWith(defaultPath, 'utf8');
167
+ expect(prompt).toBe('custom system prompt');
168
+ });
169
+
170
+ it('should read from custom path when GEMINI_SYSTEM_MD provides one, preserving case', () => {
171
+ const customPath = path.resolve('/custom/path/SyStEm.Md');
172
+ vi.stubEnv('GEMINI_SYSTEM_MD', customPath);
173
+ vi.mocked(fs.existsSync).mockReturnValue(true);
174
+ vi.mocked(fs.readFileSync).mockReturnValue('custom system prompt');
175
+
176
+ const prompt = getCoreSystemPrompt();
177
+ expect(fs.readFileSync).toHaveBeenCalledWith(customPath, 'utf8');
178
+ expect(prompt).toBe('custom system prompt');
179
+ });
180
+
181
+ it('should expand tilde in custom path when GEMINI_SYSTEM_MD is set', () => {
182
+ const homeDir = '/Users/test';
183
+ vi.spyOn(os, 'homedir').mockReturnValue(homeDir);
184
+ const customPath = '~/custom/system.md';
185
+ const expectedPath = path.join(homeDir, 'custom/system.md');
186
+ vi.stubEnv('GEMINI_SYSTEM_MD', customPath);
187
+ vi.mocked(fs.existsSync).mockReturnValue(true);
188
+ vi.mocked(fs.readFileSync).mockReturnValue('custom system prompt');
189
+
190
+ const prompt = getCoreSystemPrompt();
191
+ expect(fs.readFileSync).toHaveBeenCalledWith(
192
+ path.resolve(expectedPath),
193
+ 'utf8',
194
+ );
195
+ expect(prompt).toBe('custom system prompt');
196
+ });
197
+ });
198
+
199
+ describe('GEMINI_WRITE_SYSTEM_MD environment variable', () => {
200
+ it('should not write to file when GEMINI_WRITE_SYSTEM_MD is "false"', () => {
201
+ vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', 'false');
202
+ getCoreSystemPrompt();
203
+ expect(fs.writeFileSync).not.toHaveBeenCalled();
204
+ });
205
+
206
+ it('should not write to file when GEMINI_WRITE_SYSTEM_MD is "0"', () => {
207
+ vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', '0');
208
+ getCoreSystemPrompt();
209
+ expect(fs.writeFileSync).not.toHaveBeenCalled();
210
+ });
211
+
212
+ it('should write to default path when GEMINI_WRITE_SYSTEM_MD is "true"', () => {
213
+ const defaultPath = path.resolve(
214
+ path.join(GEMINI_CONFIG_DIR, 'system.md'),
215
+ );
216
+ vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', 'true');
217
+ getCoreSystemPrompt();
218
+ expect(fs.writeFileSync).toHaveBeenCalledWith(
219
+ defaultPath,
220
+ expect.any(String),
221
+ );
222
+ });
223
+
224
+ it('should write to default path when GEMINI_WRITE_SYSTEM_MD is "1"', () => {
225
+ const defaultPath = path.resolve(
226
+ path.join(GEMINI_CONFIG_DIR, 'system.md'),
227
+ );
228
+ vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', '1');
229
+ getCoreSystemPrompt();
230
+ expect(fs.writeFileSync).toHaveBeenCalledWith(
231
+ defaultPath,
232
+ expect.any(String),
233
+ );
234
+ });
235
+
236
+ it('should write to custom path when GEMINI_WRITE_SYSTEM_MD provides one', () => {
237
+ const customPath = path.resolve('/custom/path/system.md');
238
+ vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', customPath);
239
+ getCoreSystemPrompt();
240
+ expect(fs.writeFileSync).toHaveBeenCalledWith(
241
+ customPath,
242
+ expect.any(String),
243
+ );
244
+ });
245
+
246
+ it('should expand tilde in custom path when GEMINI_WRITE_SYSTEM_MD is set', () => {
247
+ const homeDir = '/Users/test';
248
+ vi.spyOn(os, 'homedir').mockReturnValue(homeDir);
249
+ const customPath = '~/custom/system.md';
250
+ const expectedPath = path.join(homeDir, 'custom/system.md');
251
+ vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', customPath);
252
+ getCoreSystemPrompt();
253
+ expect(fs.writeFileSync).toHaveBeenCalledWith(
254
+ path.resolve(expectedPath),
255
+ expect.any(String),
256
+ );
257
+ });
258
+
259
+ it('should expand tilde in custom path when GEMINI_WRITE_SYSTEM_MD is just ~', () => {
260
+ const homeDir = '/Users/test';
261
+ vi.spyOn(os, 'homedir').mockReturnValue(homeDir);
262
+ const customPath = '~';
263
+ const expectedPath = homeDir;
264
+ vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', customPath);
265
+ getCoreSystemPrompt();
266
+ expect(fs.writeFileSync).toHaveBeenCalledWith(
267
+ path.resolve(expectedPath),
268
+ expect.any(String),
269
+ );
270
+ });
271
+ });
272
+ });
273
+
274
+ describe('URL matching with trailing slash compatibility', () => {
275
+ it('should match URLs with and without trailing slash', () => {
276
+ const config = {
277
+ systemPromptMappings: [
278
+ {
279
+ baseUrls: ['https://api.example.com'],
280
+ modelNames: ['gpt-4'],
281
+ template: 'Custom template for example.com',
282
+ },
283
+ {
284
+ baseUrls: ['https://api.openai.com/'],
285
+ modelNames: ['gpt-3.5-turbo'],
286
+ template: 'Custom template for openai.com',
287
+ },
288
+ ],
289
+ };
290
+
291
+ // Simulate environment variables
292
+ const originalEnv = process.env;
293
+
294
+ // Test case 1: No trailing slash in config, actual URL has trailing slash
295
+ process.env = {
296
+ ...originalEnv,
297
+ OPENAI_BASE_URL: 'https://api.example.com/',
298
+ OPENAI_MODEL: 'gpt-4',
299
+ };
300
+
301
+ const result1 = getCoreSystemPrompt(undefined, config);
302
+ expect(result1).toContain('Custom template for example.com');
303
+
304
+ // Test case 2: Config has trailing slash, actual URL has no trailing slash
305
+ process.env = {
306
+ ...originalEnv,
307
+ OPENAI_BASE_URL: 'https://api.openai.com',
308
+ OPENAI_MODEL: 'gpt-3.5-turbo',
309
+ };
310
+
311
+ const result2 = getCoreSystemPrompt(undefined, config);
312
+ expect(result2).toContain('Custom template for openai.com');
313
+
314
+ // Test case 3: No trailing slash in config, actual URL has no trailing slash
315
+ process.env = {
316
+ ...originalEnv,
317
+ OPENAI_BASE_URL: 'https://api.example.com',
318
+ OPENAI_MODEL: 'gpt-4',
319
+ };
320
+
321
+ const result3 = getCoreSystemPrompt(undefined, config);
322
+ expect(result3).toContain('Custom template for example.com');
323
+
324
+ // Test case 4: Config has trailing slash, actual URL has trailing slash
325
+ process.env = {
326
+ ...originalEnv,
327
+ OPENAI_BASE_URL: 'https://api.openai.com/',
328
+ OPENAI_MODEL: 'gpt-3.5-turbo',
329
+ };
330
+
331
+ const result4 = getCoreSystemPrompt(undefined, config);
332
+ expect(result4).toContain('Custom template for openai.com');
333
+
334
+ // Restore original environment variables
335
+ process.env = originalEnv;
336
+ });
337
+
338
+ it('should not match when URLs are different', () => {
339
+ const config = {
340
+ systemPromptMappings: [
341
+ {
342
+ baseUrls: ['https://api.example.com'],
343
+ modelNames: ['gpt-4'],
344
+ template: 'Custom template for example.com',
345
+ },
346
+ ],
347
+ };
348
+
349
+ const originalEnv = process.env;
350
+
351
+ // Test case: URLs do not match
352
+ process.env = {
353
+ ...originalEnv,
354
+ OPENAI_BASE_URL: 'https://api.different.com',
355
+ OPENAI_MODEL: 'gpt-4',
356
+ };
357
+
358
+ const result = getCoreSystemPrompt(undefined, config);
359
+ // Should return default template, not contain custom template
360
+ expect(result).not.toContain('Custom template for example.com');
361
+
362
+ // Restore original environment variables
363
+ process.env = originalEnv;
364
+ });
365
+ });
projects/ui/qwen-code/packages/core/src/core/prompts.ts ADDED
@@ -0,0 +1,542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import path from 'node:path';
8
+ import fs from 'node:fs';
9
+ import os from 'node:os';
10
+ import { EditTool } from '../tools/edit.js';
11
+ import { GlobTool } from '../tools/glob.js';
12
+ import { GrepTool } from '../tools/grep.js';
13
+ import { ReadFileTool } from '../tools/read-file.js';
14
+ import { ReadManyFilesTool } from '../tools/read-many-files.js';
15
+ import { ShellTool } from '../tools/shell.js';
16
+ import { WriteFileTool } from '../tools/write-file.js';
17
+ import process from 'node:process';
18
+ import { isGitRepository } from '../utils/gitUtils.js';
19
+ import { MemoryTool, GEMINI_CONFIG_DIR } from '../tools/memoryTool.js';
20
+ import { TodoWriteTool } from '../tools/todoWrite.js';
21
+
22
+ export interface ModelTemplateMapping {
23
+ baseUrls?: string[];
24
+ modelNames?: string[];
25
+ template?: string;
26
+ }
27
+
28
+ export interface SystemPromptConfig {
29
+ systemPromptMappings?: ModelTemplateMapping[];
30
+ }
31
+
32
+ /**
33
+ * Normalizes a URL by removing trailing slash for consistent comparison
34
+ */
35
+ function normalizeUrl(url: string): string {
36
+ return url.endsWith('/') ? url.slice(0, -1) : url;
37
+ }
38
+
39
+ /**
40
+ * Checks if a URL matches any URL in the array, ignoring trailing slashes
41
+ */
42
+ function urlMatches(urlArray: string[], targetUrl: string): boolean {
43
+ const normalizedTarget = normalizeUrl(targetUrl);
44
+ return urlArray.some((url) => normalizeUrl(url) === normalizedTarget);
45
+ }
46
+
47
+ export function getCoreSystemPrompt(
48
+ userMemory?: string,
49
+ config?: SystemPromptConfig,
50
+ ): string {
51
+ // if GEMINI_SYSTEM_MD is set (and not 0|false), override system prompt from file
52
+ // default path is .gemini/system.md but can be modified via custom path in GEMINI_SYSTEM_MD
53
+ let systemMdEnabled = false;
54
+ let systemMdPath = path.resolve(path.join(GEMINI_CONFIG_DIR, 'system.md'));
55
+ const systemMdVar = process.env['GEMINI_SYSTEM_MD'];
56
+ if (systemMdVar) {
57
+ const systemMdVarLower = systemMdVar.toLowerCase();
58
+ if (!['0', 'false'].includes(systemMdVarLower)) {
59
+ systemMdEnabled = true; // enable system prompt override
60
+ if (!['1', 'true'].includes(systemMdVarLower)) {
61
+ let customPath = systemMdVar;
62
+ if (customPath.startsWith('~/')) {
63
+ customPath = path.join(os.homedir(), customPath.slice(2));
64
+ } else if (customPath === '~') {
65
+ customPath = os.homedir();
66
+ }
67
+ systemMdPath = path.resolve(customPath); // use custom path from GEMINI_SYSTEM_MD
68
+ }
69
+ // require file to exist when override is enabled
70
+ if (!fs.existsSync(systemMdPath)) {
71
+ throw new Error(`missing system prompt file '${systemMdPath}'`);
72
+ }
73
+ }
74
+ }
75
+
76
+ // Check for system prompt mappings from global config
77
+ if (config?.systemPromptMappings) {
78
+ const currentModel = process.env['OPENAI_MODEL'] || '';
79
+ const currentBaseUrl = process.env['OPENAI_BASE_URL'] || '';
80
+
81
+ const matchedMapping = config.systemPromptMappings.find((mapping) => {
82
+ const { baseUrls, modelNames } = mapping;
83
+ // Check if baseUrl matches (when specified)
84
+ if (
85
+ baseUrls &&
86
+ modelNames &&
87
+ urlMatches(baseUrls, currentBaseUrl) &&
88
+ modelNames.includes(currentModel)
89
+ ) {
90
+ return true;
91
+ }
92
+
93
+ if (baseUrls && urlMatches(baseUrls, currentBaseUrl) && !modelNames) {
94
+ return true;
95
+ }
96
+ if (modelNames && modelNames.includes(currentModel) && !baseUrls) {
97
+ return true;
98
+ }
99
+
100
+ return false;
101
+ });
102
+
103
+ if (matchedMapping?.template) {
104
+ const isGitRepo = isGitRepository(process.cwd());
105
+
106
+ // Replace placeholders in template
107
+ let template = matchedMapping.template;
108
+ template = template.replace(
109
+ '{RUNTIME_VARS_IS_GIT_REPO}',
110
+ String(isGitRepo),
111
+ );
112
+ template = template.replace(
113
+ '{RUNTIME_VARS_SANDBOX}',
114
+ process.env['SANDBOX'] || '',
115
+ );
116
+
117
+ return template;
118
+ }
119
+ }
120
+
121
+ const basePrompt = systemMdEnabled
122
+ ? fs.readFileSync(systemMdPath, 'utf8')
123
+ : `
124
+ You are Qwen Code, an interactive CLI agent developed by Alibaba Group, specializing in software engineering tasks. Your primary goal is to help users safely and efficiently, adhering strictly to the following instructions and utilizing your available tools.
125
+
126
+ # Core Mandates
127
+
128
+ - **Conventions:** Rigorously adhere to existing project conventions when reading or modifying code. Analyze surrounding code, tests, and configuration first.
129
+ - **Libraries/Frameworks:** NEVER assume a library/framework is available or appropriate. Verify its established usage within the project (check imports, configuration files like 'package.json', 'Cargo.toml', 'requirements.txt', 'build.gradle', etc., or observe neighboring files) before employing it.
130
+ - **Style & Structure:** Mimic the style (formatting, naming), structure, framework choices, typing, and architectural patterns of existing code in the project.
131
+ - **Idiomatic Changes:** When editing, understand the local context (imports, functions/classes) to ensure your changes integrate naturally and idiomatically.
132
+ - **Comments:** Add code comments sparingly. Focus on *why* something is done, especially for complex logic, rather than *what* is done. Only add high-value comments if necessary for clarity or if requested by the user. Do not edit comments that are separate from the code you are changing. *NEVER* talk to the user or describe your changes through comments.
133
+ - **Proactiveness:** Fulfill the user's request thoroughly, including reasonable, directly implied follow-up actions.
134
+ - **Confirm Ambiguity/Expansion:** Do not take significant actions beyond the clear scope of the request without confirming with the user. If asked *how* to do something, explain first, don't just do it.
135
+ - **Explaining Changes:** After completing a code modification or file operation *do not* provide summaries unless asked.
136
+ - **Path Construction:** Before using any file system tool (e.g., ${ReadFileTool.Name}' or '${WriteFileTool.Name}'), you must construct the full absolute path for the file_path argument. Always combine the absolute path of the project's root directory with the file's path relative to the root. For example, if the project root is /path/to/project/ and the file is foo/bar/baz.txt, the final path you must use is /path/to/project/foo/bar/baz.txt. If the user provides a relative path, you must resolve it against the root directory to create an absolute path.
137
+ - **Do Not revert changes:** Do not revert changes to the codebase unless asked to do so by the user. Only revert changes made by you if they have resulted in an error or if the user has explicitly asked you to revert the changes.
138
+
139
+ # Task Management
140
+ You have access to the ${TodoWriteTool.Name} tool to help you manage and plan tasks. Use these tools VERY frequently to ensure that you are tracking your tasks and giving the user visibility into your progress.
141
+ These tools are also EXTREMELY helpful for planning tasks, and for breaking down larger complex tasks into smaller steps. If you do not use this tool when planning, you may forget to do important tasks - and that is unacceptable.
142
+
143
+ It is critical that you mark todos as completed as soon as you are done with a task. Do not batch up multiple tasks before marking them as completed.
144
+
145
+ Examples:
146
+
147
+ <example>
148
+ user: Run the build and fix any type errors
149
+ assistant: I'm going to use the ${TodoWriteTool.Name} tool to write the following items to the todo list:
150
+ - Run the build
151
+ - Fix any type errors
152
+
153
+ I'm now going to run the build using Bash.
154
+
155
+ Looks like I found 10 type errors. I'm going to use the ${TodoWriteTool.Name} tool to write 10 items to the todo list.
156
+
157
+ marking the first todo as in_progress
158
+
159
+ Let me start working on the first item...
160
+
161
+ The first item has been fixed, let me mark the first todo as completed, and move on to the second item...
162
+ ..
163
+ ..
164
+ </example>
165
+ In the above example, the assistant completes all the tasks, including the 10 error fixes and running the build and fixing all errors.
166
+
167
+ <example>
168
+ user: Help me write a new feature that allows users to track their usage metrics and export them to various formats
169
+
170
+ A: I'll help you implement a usage metrics tracking and export feature. Let me first use the ${TodoWriteTool.Name} tool to plan this task.
171
+ Adding the following todos to the todo list:
172
+ 1. Research existing metrics tracking in the codebase
173
+ 2. Design the metrics collection system
174
+ 3. Implement core metrics tracking functionality
175
+ 4. Create export functionality for different formats
176
+
177
+ Let me start by researching the existing codebase to understand what metrics we might already be tracking and how we can build on that.
178
+
179
+ I'm going to search for any existing metrics or telemetry code in the project.
180
+
181
+ I've found some existing telemetry code. Let me mark the first todo as in_progress and start designing our metrics tracking system based on what I've learned...
182
+
183
+ [Assistant continues implementing the feature step by step, marking todos as in_progress and completed as they go]
184
+ </example>
185
+
186
+
187
+ # Primary Workflows
188
+
189
+ ## Software Engineering Tasks
190
+ When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
191
+ - **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the '${TodoWriteTool.Name}' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
192
+ - **Implement:** Begin implementing the plan while gathering additional context as needed. Use '${GrepTool.Name}', '${GlobTool.Name}', '${ReadFileTool.Name}', and '${ReadManyFilesTool.Name}' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., '${EditTool.Name}', '${WriteFileTool.Name}' '${ShellTool.Name}' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
193
+ - **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
194
+ - **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
195
+ - **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
196
+
197
+ **Key Principle:** Start with a reasonable plan based on available information, then adapt as you learn. Users prefer seeing progress quickly rather than waiting for perfect understanding.
198
+
199
+ - Tool results and user messages may include <system-reminder> tags. <system-reminder> tags contain useful information and reminders. They are NOT part of the user's provided input or the tool result.
200
+
201
+ IMPORTANT: Always use the ${TodoWriteTool.Name} tool to plan and track tasks throughout the conversation.
202
+
203
+ ## New Applications
204
+
205
+ **Goal:** Autonomously implement and deliver a visually appealing, substantially complete, and functional prototype. Utilize all tools at your disposal to implement the application. Some tools you may especially find useful are '${WriteFileTool.Name}', '${EditTool.Name}' and '${ShellTool.Name}'.
206
+
207
+ 1. **Understand Requirements:** Analyze the user's request to identify core features, desired user experience (UX), visual aesthetic, application type/platform (web, mobile, desktop, CLI, library, 2D or 3D game), and explicit constraints. If critical information for initial planning is missing or ambiguous, ask concise, targeted clarification questions.
208
+ 2. **Propose Plan:** Formulate an internal development plan. Present a clear, concise, high-level summary to the user. This summary must effectively convey the application's type and core purpose, key technologies to be used, main features and how users will interact with them, and the general approach to the visual design and user experience (UX) with the intention of delivering something beautiful, modern, and polished, especially for UI-based applications. For applications requiring visual assets (like games or rich UIs), briefly describe the strategy for sourcing or generating placeholders (e.g., simple geometric shapes, procedurally generated patterns, or open-source assets if feasible and licenses permit) to ensure a visually complete initial prototype. Ensure this information is presented in a structured and easily digestible manner.
209
+ - When key technologies aren't specified, prefer the following:
210
+ - **Websites (Frontend):** React (JavaScript/TypeScript) with Bootstrap CSS, incorporating Material Design principles for UI/UX.
211
+ - **Back-End APIs:** Node.js with Express.js (JavaScript/TypeScript) or Python with FastAPI.
212
+ - **Full-stack:** Next.js (React/Node.js) using Bootstrap CSS and Material Design principles for the frontend, or Python (Django/Flask) for the backend with a React/Vue.js frontend styled with Bootstrap CSS and Material Design principles.
213
+ - **CLIs:** Python or Go.
214
+ - **Mobile App:** Compose Multiplatform (Kotlin Multiplatform) or Flutter (Dart) using Material Design libraries and principles, when sharing code between Android and iOS. Jetpack Compose (Kotlin JVM) with Material Design principles or SwiftUI (Swift) for native apps targeted at either Android or iOS, respectively.
215
+ - **3d Games:** HTML/CSS/JavaScript with Three.js.
216
+ - **2d Games:** HTML/CSS/JavaScript.
217
+ 3. **User Approval:** Obtain user approval for the proposed plan.
218
+ 4. **Implementation:** Use the '${TodoWriteTool.Name}' tool to convert the approved plan into a structured todo list with specific, actionable tasks, then autonomously implement each task utilizing all available tools. When starting ensure you scaffold the application using '${ShellTool.Name}' for commands like 'npm init', 'npx create-react-app'. Aim for full scope completion. Proactively create or source necessary placeholder assets (e.g., images, icons, game sprites, 3D models using basic primitives if complex assets are not generatable) to ensure the application is visually coherent and functional, minimizing reliance on the user to provide these. If the model can generate simple assets (e.g., a uniformly colored square sprite, a simple 3D cube), it should do so. Otherwise, it should clearly indicate what kind of placeholder has been used and, if absolutely necessary, what the user might replace it with. Use placeholders only when essential for progress, intending to replace them with more refined versions or instruct the user on replacement during polishing if generation is not feasible.
219
+ 5. **Verify:** Review work against the original request, the approved plan. Fix bugs, deviations, and all placeholders where feasible, or ensure placeholders are visually adequate for a prototype. Ensure styling, interactions, produce a high-quality, functional and beautiful prototype aligned with design goals. Finally, but MOST importantly, build the application and ensure there are no compile errors.
220
+ 6. **Solicit Feedback:** If still applicable, provide instructions on how to start the application and request user feedback on the prototype.
221
+
222
+ # Operational Guidelines
223
+
224
+ ## Tone and Style (CLI Interaction)
225
+ - **Concise & Direct:** Adopt a professional, direct, and concise tone suitable for a CLI environment.
226
+ - **Minimal Output:** Aim for fewer than 3 lines of text output (excluding tool use/code generation) per response whenever practical. Focus strictly on the user's query.
227
+ - **Clarity over Brevity (When Needed):** While conciseness is key, prioritize clarity for essential explanations or when seeking necessary clarification if a request is ambiguous.
228
+ - **No Chitchat:** Avoid conversational filler, preambles ("Okay, I will now..."), or postambles ("I have finished the changes..."). Get straight to the action or answer.
229
+ - **Formatting:** Use GitHub-flavored Markdown. Responses will be rendered in monospace.
230
+ - **Tools vs. Text:** Use tools for actions, text output *only* for communication. Do not add explanatory comments within tool calls or code blocks unless specifically part of the required code/command itself.
231
+ - **Handling Inability:** If unable/unwilling to fulfill a request, state so briefly (1-2 sentences) without excessive justification. Offer alternatives if appropriate.
232
+
233
+ ## Security and Safety Rules
234
+ - **Explain Critical Commands:** Before executing commands with '${ShellTool.Name}' that modify the file system, codebase, or system state, you *must* provide a brief explanation of the command's purpose and potential impact. Prioritize user understanding and safety. You should not ask permission to use the tool; the user will be presented with a confirmation dialogue upon use (you do not need to tell them this).
235
+ - **Security First:** Always apply security best practices. Never introduce code that exposes, logs, or commits secrets, API keys, or other sensitive information.
236
+
237
+ ## Tool Usage
238
+ - **File Paths:** Always use absolute paths when referring to files with tools like '${ReadFileTool.Name}' or '${WriteFileTool.Name}'. Relative paths are not supported. You must provide an absolute path.
239
+ - **Parallelism:** Execute multiple independent tool calls in parallel when feasible (i.e. searching the codebase).
240
+ - **Command Execution:** Use the '${ShellTool.Name}' tool for running shell commands, remembering the safety rule to explain modifying commands first.
241
+ - **Background Processes:** Use background processes (via \`&\`) for commands that are unlikely to stop on their own, e.g. \`node server.js &\`. If unsure, ask the user.
242
+ - **Interactive Commands:** Try to avoid shell commands that are likely to require user interaction (e.g. \`git rebase -i\`). Use non-interactive versions of commands (e.g. \`npm init -y\` instead of \`npm init\`) when available, and otherwise remind the user that interactive shell commands are not supported and may cause hangs until canceled by the user.
243
+ - **Task Management:** Use the '${TodoWriteTool.Name}' tool proactively for complex, multi-step tasks to track progress and provide visibility to users. This tool helps organize work systematically and ensures no requirements are missed.
244
+ - **Remembering Facts:** Use the '${MemoryTool.Name}' tool to remember specific, *user-related* facts or preferences when the user explicitly asks, or when they state a clear, concise piece of information that would help personalize or streamline *your future interactions with them* (e.g., preferred coding style, common project paths they use, personal tool aliases). This tool is for user-specific information that should persist across sessions. Do *not* use it for general project context or information. If unsure whether to save something, you can ask the user, "Should I remember that for you?"
245
+ - **Respect User Confirmations:** Most tool calls (also denoted as 'function calls') will first require confirmation from the user, where they will either approve or cancel the function call. If a user cancels a function call, respect their choice and do _not_ try to make the function call again. It is okay to request the tool call again _only_ if the user requests that same tool call on a subsequent prompt. When a user cancels a function call, assume best intentions from the user and consider inquiring if they prefer any alternative paths forward.
246
+
247
+ ## Interaction Details
248
+ - **Help Command:** The user can use '/help' to display help information.
249
+ - **Feedback:** To report a bug or provide feedback, please use the /bug command.
250
+
251
+ ${(function () {
252
+ // Determine sandbox status based on environment variables
253
+ const isSandboxExec = process.env['SANDBOX'] === 'sandbox-exec';
254
+ const isGenericSandbox = !!process.env['SANDBOX']; // Check if SANDBOX is set to any non-empty value
255
+
256
+ if (isSandboxExec) {
257
+ return `
258
+ # macOS Seatbelt
259
+ You are running under macos seatbelt with limited access to files outside the project directory or system temp directory, and with limited access to host system resources such as ports. If you encounter failures that could be due to MacOS Seatbelt (e.g. if a command fails with 'Operation not permitted' or similar error), as you report the error to the user, also explain why you think it could be due to MacOS Seatbelt, and how the user may need to adjust their Seatbelt profile.
260
+ `;
261
+ } else if (isGenericSandbox) {
262
+ return `
263
+ # Sandbox
264
+ You are running in a sandbox container with limited access to files outside the project directory or system temp directory, and with limited access to host system resources such as ports. If you encounter failures that could be due to sandboxing (e.g. if a command fails with 'Operation not permitted' or similar error), when you report the error to the user, also explain why you think it could be due to sandboxing, and how the user may need to adjust their sandbox configuration.
265
+ `;
266
+ } else {
267
+ return `
268
+ # Outside of Sandbox
269
+ You are running outside of a sandbox container, directly on the user's system. For critical commands that are particularly likely to modify the user's system outside of the project directory or system temp directory, as you explain the command to the user (per the Explain Critical Commands rule above), also remind the user to consider enabling sandboxing.
270
+ `;
271
+ }
272
+ })()}
273
+
274
+ ${(function () {
275
+ if (isGitRepository(process.cwd())) {
276
+ return `
277
+ # Git Repository
278
+ - The current working (project) directory is being managed by a git repository.
279
+ - When asked to commit changes or prepare a commit, always start by gathering information using shell commands:
280
+ - \`git status\` to ensure that all relevant files are tracked and staged, using \`git add ...\` as needed.
281
+ - \`git diff HEAD\` to review all changes (including unstaged changes) to tracked files in work tree since last commit.
282
+ - \`git diff --staged\` to review only staged changes when a partial commit makes sense or was requested by the user.
283
+ - \`git log -n 3\` to review recent commit messages and match their style (verbosity, formatting, signature line, etc.)
284
+ - Combine shell commands whenever possible to save time/steps, e.g. \`git status && git diff HEAD && git log -n 3\`.
285
+ - Always propose a draft commit message. Never just ask the user to give you the full commit message.
286
+ - Prefer commit messages that are clear, concise, and focused more on "why" and less on "what".
287
+ - Keep the user informed and ask for clarification or confirmation where needed.
288
+ - After each commit, confirm that it was successful by running \`git status\`.
289
+ - If a commit fails, never attempt to work around the issues without being asked to do so.
290
+ - Never push changes to a remote repository without being asked explicitly by the user.
291
+ `;
292
+ }
293
+ return '';
294
+ })()}
295
+
296
+ # Examples (Illustrating Tone and Workflow)
297
+ <example>
298
+ user: 1 + 2
299
+ model: 3
300
+ </example>
301
+
302
+ <example>
303
+ user: is 13 a prime number?
304
+ model: true
305
+ </example>
306
+
307
+ <example>
308
+ user: start the server implemented in server.js
309
+ model:
310
+ <tool_call>
311
+ <function=run_shell_command>
312
+ <parameter=command>
313
+ node server.js &
314
+ </parameter>
315
+ </function>
316
+ </tool_call>
317
+ </example>
318
+
319
+ <example>
320
+ user: Refactor the auth logic in src/auth.py to use the requests library instead of urllib.
321
+ model: Okay, I can refactor 'src/auth.py'.
322
+ First, I'll analyze the code and check for a test safety net before planning any changes.
323
+ <tool_call>
324
+ <function=glob>
325
+ <parameter=path>
326
+ tests/test_auth.py
327
+ </parameter>
328
+ </function>
329
+ </tool_call>
330
+ <tool_call>
331
+ <function=read_file>
332
+ <parameter=path>
333
+ /path/to/tests/test_auth.py
334
+ </parameter>
335
+ <parameter=offset>
336
+ 0
337
+ </parameter>
338
+ <parameter=limit>
339
+ 10
340
+ </parameter>
341
+ </function>
342
+ </tool_call>
343
+ (After analysis)
344
+ Great, 'tests/test_auth.py' exists and covers the core authentication logic. With this safety net in place, I can safely plan the refactoring.
345
+ I'll also confirm 'requests' is a dependency.
346
+ <tool_call>
347
+ <function=read_file>
348
+ <parameter=path>
349
+ /path/to/requirements.txt
350
+ </parameter>
351
+ </function>
352
+ </tool_call>
353
+ (After analysis)
354
+ Looks good, 'requests' is available.
355
+
356
+ Here's the plan:
357
+ 1. Replace the 'urllib' calls with 'requests'.
358
+ 2. Add proper 'try...except' error handling for the new network calls.
359
+ 3. Remove the old 'urllib' import.
360
+ 4. Run the project's linter and tests to verify the changes.
361
+
362
+ <tool_call>
363
+ <function=replace>
364
+ <parameter=path>
365
+ src/auth.py
366
+ </parameter>
367
+ <parameter=old_content>
368
+ (old code content)
369
+ </parameter>
370
+ <parameter=new_content>
371
+ (new code content)
372
+ </parameter>
373
+ </function>
374
+ </tool_call>
375
+ Refactoring complete. Running verification...
376
+ <tool_call>
377
+ <function=run_shell_command
378
+ <parameter=command>
379
+ ruff check src/auth.py && pytest
380
+ </parameter>
381
+ </function>
382
+ </tool_call>
383
+ (After verification passes)
384
+ All checks passed. This is a stable checkpoint.
385
+ Would you like me to write a commit message and commit these changes?
386
+ </example>
387
+
388
+ <example>
389
+ user: Delete the temp directory.
390
+ model: I can run \`rm -rf /path/to/project/temp\`. This will permanently delete the directory and all its contents.
391
+ </example>
392
+
393
+ <example>
394
+ user: Write tests for someFile.ts
395
+ model:
396
+ Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
397
+ <tool_call>
398
+ <function=read_file>
399
+ <parameter=path>
400
+ /path/to/someFile.ts
401
+ </parameter>
402
+ </function>
403
+ </tool_call>
404
+ Now I'll look for existing or related test files to understand current testing conventions and dependencies.
405
+ <tool_call>
406
+ <function>read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory]
407
+ </tool_call>
408
+ (After reviewing existing tests and the file content)
409
+ <tool_call>
410
+ <function=write_file>
411
+ <parameter=path>
412
+ /path/to/someFile.test.ts
413
+ </parameter>
414
+ </function>
415
+ </tool_call>
416
+ I've written the tests. Now I'll run the project's test command to verify them.
417
+ <tool_call>
418
+ <function=run_shell_command>
419
+ <parameter=command>
420
+ npm run test
421
+ </parameter>
422
+ </function>
423
+ </tool_call>
424
+ (After verification passes)
425
+ All checks passed. This is a stable checkpoint.
426
+ </example>
427
+
428
+ <example>
429
+ user: Where are all the 'app.config' files in this project? I need to check their settings.
430
+ model:
431
+ <tool_call>
432
+ <function=glob>
433
+ <parameter=pattern>
434
+ ./**/app.config
435
+ </parameter>
436
+ </function>
437
+ </tool_call>
438
+ (Assuming GlobTool returns a list of paths like ['/path/to/moduleA/app.config', '/path/to/moduleB/app.config'])
439
+ I found the following 'app.config' files:
440
+ - /path/to/moduleA/app.config
441
+ - /path/to/moduleB/app.config
442
+ To help you check their settings, I can read their contents. Which one would you like to start with, or should I read all of them?
443
+ </example>
444
+
445
+ # Final Reminder
446
+ Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use '${ReadFileTool.Name}' or '${ReadManyFilesTool.Name}' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved.
447
+ `.trim();
448
+
449
+ // if GEMINI_WRITE_SYSTEM_MD is set (and not 0|false), write base system prompt to file
450
+ const writeSystemMdVar = process.env['GEMINI_WRITE_SYSTEM_MD'];
451
+ if (writeSystemMdVar) {
452
+ const writeSystemMdVarLower = writeSystemMdVar.toLowerCase();
453
+ if (!['0', 'false'].includes(writeSystemMdVarLower)) {
454
+ if (['1', 'true'].includes(writeSystemMdVarLower)) {
455
+ fs.mkdirSync(path.dirname(systemMdPath), { recursive: true });
456
+ fs.writeFileSync(systemMdPath, basePrompt); // write to default path, can be modified via GEMINI_SYSTEM_MD
457
+ } else {
458
+ let customPath = writeSystemMdVar;
459
+ if (customPath.startsWith('~/')) {
460
+ customPath = path.join(os.homedir(), customPath.slice(2));
461
+ } else if (customPath === '~') {
462
+ customPath = os.homedir();
463
+ }
464
+ const resolvedPath = path.resolve(customPath);
465
+ fs.mkdirSync(path.dirname(resolvedPath), { recursive: true });
466
+ fs.writeFileSync(resolvedPath, basePrompt); // write to custom path from GEMINI_WRITE_SYSTEM_MD
467
+ }
468
+ }
469
+ }
470
+
471
+ const memorySuffix =
472
+ userMemory && userMemory.trim().length > 0
473
+ ? `\n\n---\n\n${userMemory.trim()}`
474
+ : '';
475
+
476
+ return `${basePrompt}${memorySuffix}`;
477
+ }
478
+
479
+ /**
480
+ * Provides the system prompt for the history compression process.
481
+ * This prompt instructs the model to act as a specialized state manager,
482
+ * think in a scratchpad, and produce a structured XML summary.
483
+ */
484
+ export function getCompressionPrompt(): string {
485
+ return `
486
+ You are the component that summarizes internal chat history into a given structure.
487
+
488
+ When the conversation history grows too large, you will be invoked to distill the entire history into a concise, structured XML snapshot. This snapshot is CRITICAL, as it will become the agent's *only* memory of the past. The agent will resume its work based solely on this snapshot. All crucial details, plans, errors, and user directives MUST be preserved.
489
+
490
+ First, you will think through the entire history in a private <scratchpad>. Review the user's overall goal, the agent's actions, tool outputs, file modifications, and any unresolved questions. Identify every piece of information that is essential for future actions.
491
+
492
+ After your reasoning is complete, generate the final <state_snapshot> XML object. Be incredibly dense with information. Omit any irrelevant conversational filler.
493
+
494
+ The structure MUST be as follows:
495
+
496
+ <state_snapshot>
497
+ <overall_goal>
498
+ <!-- A single, concise sentence describing the user's high-level objective. -->
499
+ <!-- Example: "Refactor the authentication service to use a new JWT library." -->
500
+ </overall_goal>
501
+
502
+ <key_knowledge>
503
+ <!-- Crucial facts, conventions, and constraints the agent must remember based on the conversation history and interaction with the user. Use bullet points. -->
504
+ <!-- Example:
505
+ - Build Command: \`npm run build\`
506
+ - Testing: Tests are run with \`npm test\`. Test files must end in \`.test.ts\`.
507
+ - API Endpoint: The primary API endpoint is \`https://api.example.com/v2\`.
508
+
509
+ -->
510
+ </key_knowledge>
511
+
512
+ <file_system_state>
513
+ <!-- List files that have been created, read, modified, or deleted. Note their status and critical learnings. -->
514
+ <!-- Example:
515
+ - CWD: \`/home/user/project/src\`
516
+ - READ: \`package.json\` - Confirmed 'axios' is a dependency.
517
+ - MODIFIED: \`services/auth.ts\` - Replaced 'jsonwebtoken' with 'jose'.
518
+ - CREATED: \`tests/new-feature.test.ts\` - Initial test structure for the new feature.
519
+ -->
520
+ </file_system_state>
521
+
522
+ <recent_actions>
523
+ <!-- A summary of the last few significant agent actions and their outcomes. Focus on facts. -->
524
+ <!-- Example:
525
+ - Ran \`grep 'old_function'\` which returned 3 results in 2 files.
526
+ - Ran \`npm run test\`, which failed due to a snapshot mismatch in \`UserProfile.test.ts\`.
527
+ - Ran \`ls -F static/\` and discovered image assets are stored as \`.webp\`.
528
+ -->
529
+ </recent_actions>
530
+
531
+ <current_plan>
532
+ <!-- The agent's step-by-step plan. Mark completed steps. -->
533
+ <!-- Example:
534
+ 1. [DONE] Identify all files using the deprecated 'UserAPI'.
535
+ 2. [IN PROGRESS] Refactor \`src/components/UserProfile.tsx\` to use the new 'ProfileAPI'.
536
+ 3. [TODO] Refactor the remaining files.
537
+ 4. [TODO] Update tests to reflect the API change.
538
+ -->
539
+ </current_plan>
540
+ </state_snapshot>
541
+ `.trim();
542
+ }
projects/ui/qwen-code/packages/core/src/core/subagent.test.ts ADDED
@@ -0,0 +1,813 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { vi, describe, it, expect, beforeEach, Mock, afterEach } from 'vitest';
8
+ import {
9
+ ContextState,
10
+ SubAgentScope,
11
+ SubagentTerminateMode,
12
+ PromptConfig,
13
+ ModelConfig,
14
+ RunConfig,
15
+ OutputConfig,
16
+ ToolConfig,
17
+ } from './subagent.js';
18
+ import { Config, ConfigParameters } from '../config/config.js';
19
+ import { GeminiChat } from './geminiChat.js';
20
+ import { createContentGenerator } from './contentGenerator.js';
21
+ import { getEnvironmentContext } from '../utils/environmentContext.js';
22
+ import { executeToolCall } from './nonInteractiveToolExecutor.js';
23
+ import { ToolRegistry } from '../tools/tool-registry.js';
24
+ import { DEFAULT_GEMINI_MODEL } from '../config/models.js';
25
+ import {
26
+ Content,
27
+ FunctionCall,
28
+ FunctionDeclaration,
29
+ GenerateContentConfig,
30
+ Type,
31
+ } from '@google/genai';
32
+ import { ToolErrorType } from '../tools/tool-error.js';
33
+
34
+ vi.mock('./geminiChat.js');
35
+ vi.mock('./contentGenerator.js');
36
+ vi.mock('../utils/environmentContext.js');
37
+ vi.mock('./nonInteractiveToolExecutor.js');
38
+ vi.mock('../ide/ide-client.js');
39
+
40
+ async function createMockConfig(
41
+ toolRegistryMocks = {},
42
+ ): Promise<{ config: Config; toolRegistry: ToolRegistry }> {
43
+ const configParams: ConfigParameters = {
44
+ sessionId: 'test-session',
45
+ model: DEFAULT_GEMINI_MODEL,
46
+ targetDir: '.',
47
+ debugMode: false,
48
+ cwd: process.cwd(),
49
+ };
50
+ const config = new Config(configParams);
51
+ await config.initialize();
52
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
53
+ await config.refreshAuth('test-auth' as any);
54
+
55
+ // Mock ToolRegistry
56
+ const mockToolRegistry = {
57
+ getTool: vi.fn(),
58
+ getFunctionDeclarationsFiltered: vi.fn().mockReturnValue([]),
59
+ ...toolRegistryMocks,
60
+ } as unknown as ToolRegistry;
61
+
62
+ vi.spyOn(config, 'getToolRegistry').mockReturnValue(mockToolRegistry);
63
+ return { config, toolRegistry: mockToolRegistry };
64
+ }
65
+
66
+ // Helper to simulate LLM responses (sequence of tool calls over multiple turns)
67
+ const createMockStream = (
68
+ functionCallsList: Array<FunctionCall[] | 'stop'>,
69
+ ) => {
70
+ let index = 0;
71
+ return vi.fn().mockImplementation(() => {
72
+ const response = functionCallsList[index] || 'stop';
73
+ index++;
74
+ return (async function* () {
75
+ if (response === 'stop') {
76
+ // When stopping, the model might return text, but the subagent logic primarily cares about the absence of functionCalls.
77
+ yield { text: 'Done.' };
78
+ } else if (response.length > 0) {
79
+ yield { functionCalls: response };
80
+ } else {
81
+ yield { text: 'Done.' }; // Handle empty array also as stop
82
+ }
83
+ })();
84
+ });
85
+ };
86
+
87
+ describe('subagent.ts', () => {
88
+ describe('ContextState', () => {
89
+ it('should set and get values correctly', () => {
90
+ const context = new ContextState();
91
+ context.set('key1', 'value1');
92
+ context.set('key2', 123);
93
+ expect(context.get('key1')).toBe('value1');
94
+ expect(context.get('key2')).toBe(123);
95
+ expect(context.get_keys()).toEqual(['key1', 'key2']);
96
+ });
97
+
98
+ it('should return undefined for missing keys', () => {
99
+ const context = new ContextState();
100
+ expect(context.get('missing')).toBeUndefined();
101
+ });
102
+ });
103
+
104
+ describe('SubAgentScope', () => {
105
+ let mockSendMessageStream: Mock;
106
+
107
+ const defaultModelConfig: ModelConfig = {
108
+ model: 'gemini-1.5-flash-latest',
109
+ temp: 0.5, // Specific temp to test override
110
+ top_p: 1,
111
+ };
112
+
113
+ const defaultRunConfig: RunConfig = {
114
+ max_time_minutes: 5,
115
+ max_turns: 10,
116
+ };
117
+
118
+ beforeEach(async () => {
119
+ vi.clearAllMocks();
120
+
121
+ vi.mocked(getEnvironmentContext).mockResolvedValue([
122
+ { text: 'Env Context' },
123
+ ]);
124
+ vi.mocked(createContentGenerator).mockResolvedValue({
125
+ getGenerativeModel: vi.fn(),
126
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
127
+ } as any);
128
+
129
+ mockSendMessageStream = vi.fn();
130
+ // We mock the implementation of the constructor.
131
+ vi.mocked(GeminiChat).mockImplementation(
132
+ () =>
133
+ ({
134
+ sendMessageStream: mockSendMessageStream,
135
+ }) as unknown as GeminiChat,
136
+ );
137
+ });
138
+
139
+ afterEach(() => {
140
+ vi.restoreAllMocks();
141
+ });
142
+
143
+ // Helper to safely access generationConfig from mock calls
144
+ const getGenerationConfigFromMock = (
145
+ callIndex = 0,
146
+ ): GenerateContentConfig & { systemInstruction?: string | Content } => {
147
+ const callArgs = vi.mocked(GeminiChat).mock.calls[callIndex];
148
+ const generationConfig = callArgs?.[2];
149
+ // Ensure it's defined before proceeding
150
+ expect(generationConfig).toBeDefined();
151
+ if (!generationConfig) throw new Error('generationConfig is undefined');
152
+ return generationConfig as GenerateContentConfig & {
153
+ systemInstruction?: string | Content;
154
+ };
155
+ };
156
+
157
+ describe('create (Tool Validation)', () => {
158
+ const promptConfig: PromptConfig = { systemPrompt: 'Test prompt' };
159
+
160
+ it('should create a SubAgentScope successfully with minimal config', async () => {
161
+ const { config } = await createMockConfig();
162
+ const scope = await SubAgentScope.create(
163
+ 'test-agent',
164
+ config,
165
+ promptConfig,
166
+ defaultModelConfig,
167
+ defaultRunConfig,
168
+ );
169
+ expect(scope).toBeInstanceOf(SubAgentScope);
170
+ });
171
+
172
+ it('should throw an error if a tool requires confirmation', async () => {
173
+ const mockTool = {
174
+ schema: { parameters: { type: Type.OBJECT, properties: {} } },
175
+ build: vi.fn().mockReturnValue({
176
+ shouldConfirmExecute: vi.fn().mockResolvedValue({
177
+ type: 'exec',
178
+ title: 'Confirm',
179
+ command: 'rm -rf /',
180
+ }),
181
+ }),
182
+ };
183
+
184
+ const { config } = await createMockConfig({
185
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
186
+ getTool: vi.fn().mockReturnValue(mockTool as any),
187
+ });
188
+
189
+ const toolConfig: ToolConfig = { tools: ['risky_tool'] };
190
+
191
+ await expect(
192
+ SubAgentScope.create(
193
+ 'test-agent',
194
+ config,
195
+ promptConfig,
196
+ defaultModelConfig,
197
+ defaultRunConfig,
198
+ toolConfig,
199
+ ),
200
+ ).rejects.toThrow(
201
+ 'Tool "risky_tool" requires user confirmation and cannot be used in a non-interactive subagent.',
202
+ );
203
+ });
204
+
205
+ it('should succeed if tools do not require confirmation', async () => {
206
+ const mockTool = {
207
+ schema: { parameters: { type: Type.OBJECT, properties: {} } },
208
+ build: vi.fn().mockReturnValue({
209
+ shouldConfirmExecute: vi.fn().mockResolvedValue(null),
210
+ }),
211
+ };
212
+ const { config } = await createMockConfig({
213
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
214
+ getTool: vi.fn().mockReturnValue(mockTool as any),
215
+ });
216
+
217
+ const toolConfig: ToolConfig = { tools: ['safe_tool'] };
218
+
219
+ const scope = await SubAgentScope.create(
220
+ 'test-agent',
221
+ config,
222
+ promptConfig,
223
+ defaultModelConfig,
224
+ defaultRunConfig,
225
+ toolConfig,
226
+ );
227
+ expect(scope).toBeInstanceOf(SubAgentScope);
228
+ });
229
+
230
+ it('should skip interactivity check and warn for tools with required parameters', async () => {
231
+ const consoleWarnSpy = vi
232
+ .spyOn(console, 'warn')
233
+ .mockImplementation(() => {});
234
+
235
+ const mockToolWithParams = {
236
+ schema: {
237
+ parameters: {
238
+ type: Type.OBJECT,
239
+ properties: {
240
+ path: { type: Type.STRING },
241
+ },
242
+ required: ['path'],
243
+ },
244
+ },
245
+ // build should not be called, but we mock it to be safe
246
+ build: vi.fn(),
247
+ };
248
+
249
+ const { config } = await createMockConfig({
250
+ getTool: vi.fn().mockReturnValue(mockToolWithParams),
251
+ });
252
+
253
+ const toolConfig: ToolConfig = { tools: ['tool_with_params'] };
254
+
255
+ // The creation should succeed without throwing
256
+ const scope = await SubAgentScope.create(
257
+ 'test-agent',
258
+ config,
259
+ promptConfig,
260
+ defaultModelConfig,
261
+ defaultRunConfig,
262
+ toolConfig,
263
+ );
264
+
265
+ expect(scope).toBeInstanceOf(SubAgentScope);
266
+
267
+ // Check that the warning was logged
268
+ expect(consoleWarnSpy).toHaveBeenCalledWith(
269
+ 'Cannot check tool "tool_with_params" for interactivity because it requires parameters. Assuming it is safe for non-interactive use.',
270
+ );
271
+
272
+ // Ensure build was never called
273
+ expect(mockToolWithParams.build).not.toHaveBeenCalled();
274
+
275
+ consoleWarnSpy.mockRestore();
276
+ });
277
+ });
278
+
279
+ describe('runNonInteractive - Initialization and Prompting', () => {
280
+ it('should correctly template the system prompt and initialize GeminiChat', async () => {
281
+ const { config } = await createMockConfig();
282
+
283
+ vi.mocked(GeminiChat).mockClear();
284
+
285
+ const promptConfig: PromptConfig = {
286
+ systemPrompt: 'Hello ${name}, your task is ${task}.',
287
+ };
288
+ const context = new ContextState();
289
+ context.set('name', 'Agent');
290
+ context.set('task', 'Testing');
291
+
292
+ // Model stops immediately
293
+ mockSendMessageStream.mockImplementation(createMockStream(['stop']));
294
+
295
+ const scope = await SubAgentScope.create(
296
+ 'test-agent',
297
+ config,
298
+ promptConfig,
299
+ defaultModelConfig,
300
+ defaultRunConfig,
301
+ );
302
+
303
+ await scope.runNonInteractive(context);
304
+
305
+ // Check if GeminiChat was initialized correctly by the subagent
306
+ expect(GeminiChat).toHaveBeenCalledTimes(1);
307
+ const callArgs = vi.mocked(GeminiChat).mock.calls[0];
308
+
309
+ // Check Generation Config
310
+ const generationConfig = getGenerationConfigFromMock();
311
+
312
+ // Check temperature override
313
+ expect(generationConfig.temperature).toBe(defaultModelConfig.temp);
314
+ expect(generationConfig.systemInstruction).toContain(
315
+ 'Hello Agent, your task is Testing.',
316
+ );
317
+ expect(generationConfig.systemInstruction).toContain(
318
+ 'Important Rules:',
319
+ );
320
+
321
+ // Check History (should include environment context)
322
+ const history = callArgs[3];
323
+ expect(history).toEqual([
324
+ { role: 'user', parts: [{ text: 'Env Context' }] },
325
+ {
326
+ role: 'model',
327
+ parts: [{ text: 'Got it. Thanks for the context!' }],
328
+ },
329
+ ]);
330
+ });
331
+
332
+ it('should include output instructions in the system prompt when outputs are defined', async () => {
333
+ const { config } = await createMockConfig();
334
+ vi.mocked(GeminiChat).mockClear();
335
+
336
+ const promptConfig: PromptConfig = { systemPrompt: 'Do the task.' };
337
+ const outputConfig: OutputConfig = {
338
+ outputs: {
339
+ result1: 'The first result',
340
+ },
341
+ };
342
+ const context = new ContextState();
343
+
344
+ // Model stops immediately
345
+ mockSendMessageStream.mockImplementation(createMockStream(['stop']));
346
+
347
+ const scope = await SubAgentScope.create(
348
+ 'test-agent',
349
+ config,
350
+ promptConfig,
351
+ defaultModelConfig,
352
+ defaultRunConfig,
353
+ undefined, // ToolConfig
354
+ outputConfig,
355
+ );
356
+
357
+ await scope.runNonInteractive(context);
358
+
359
+ const generationConfig = getGenerationConfigFromMock();
360
+ const systemInstruction = generationConfig.systemInstruction as string;
361
+
362
+ expect(systemInstruction).toContain('Do the task.');
363
+ expect(systemInstruction).toContain(
364
+ 'you MUST emit the required output variables',
365
+ );
366
+ expect(systemInstruction).toContain(
367
+ "Use 'self.emitvalue' to emit the 'result1' key",
368
+ );
369
+ });
370
+
371
+ it('should use initialMessages instead of systemPrompt if provided', async () => {
372
+ const { config } = await createMockConfig();
373
+ vi.mocked(GeminiChat).mockClear();
374
+
375
+ const initialMessages: Content[] = [
376
+ { role: 'user', parts: [{ text: 'Hi' }] },
377
+ ];
378
+ const promptConfig: PromptConfig = { initialMessages };
379
+ const context = new ContextState();
380
+
381
+ // Model stops immediately
382
+ mockSendMessageStream.mockImplementation(createMockStream(['stop']));
383
+
384
+ const scope = await SubAgentScope.create(
385
+ 'test-agent',
386
+ config,
387
+ promptConfig,
388
+ defaultModelConfig,
389
+ defaultRunConfig,
390
+ );
391
+
392
+ await scope.runNonInteractive(context);
393
+
394
+ const callArgs = vi.mocked(GeminiChat).mock.calls[0];
395
+ const generationConfig = getGenerationConfigFromMock();
396
+ const history = callArgs[3];
397
+
398
+ expect(generationConfig.systemInstruction).toBeUndefined();
399
+ expect(history).toEqual([
400
+ { role: 'user', parts: [{ text: 'Env Context' }] },
401
+ {
402
+ role: 'model',
403
+ parts: [{ text: 'Got it. Thanks for the context!' }],
404
+ },
405
+ ...initialMessages,
406
+ ]);
407
+ });
408
+
409
+ it('should throw an error if template variables are missing', async () => {
410
+ const { config } = await createMockConfig();
411
+ const promptConfig: PromptConfig = {
412
+ systemPrompt: 'Hello ${name}, you are missing ${missing}.',
413
+ };
414
+ const context = new ContextState();
415
+ context.set('name', 'Agent');
416
+ // 'missing' is not set
417
+
418
+ const scope = await SubAgentScope.create(
419
+ 'test-agent',
420
+ config,
421
+ promptConfig,
422
+ defaultModelConfig,
423
+ defaultRunConfig,
424
+ );
425
+
426
+ // The error from templating causes the runNonInteractive to reject and the terminate_reason to be ERROR.
427
+ await expect(scope.runNonInteractive(context)).rejects.toThrow(
428
+ 'Missing context values for the following keys: missing',
429
+ );
430
+ expect(scope.output.terminate_reason).toBe(SubagentTerminateMode.ERROR);
431
+ });
432
+
433
+ it('should validate that systemPrompt and initialMessages are mutually exclusive', async () => {
434
+ const { config } = await createMockConfig();
435
+ const promptConfig: PromptConfig = {
436
+ systemPrompt: 'System',
437
+ initialMessages: [{ role: 'user', parts: [{ text: 'Hi' }] }],
438
+ };
439
+ const context = new ContextState();
440
+
441
+ const agent = await SubAgentScope.create(
442
+ 'TestAgent',
443
+ config,
444
+ promptConfig,
445
+ defaultModelConfig,
446
+ defaultRunConfig,
447
+ );
448
+
449
+ await expect(agent.runNonInteractive(context)).rejects.toThrow(
450
+ 'PromptConfig cannot have both `systemPrompt` and `initialMessages` defined.',
451
+ );
452
+ expect(agent.output.terminate_reason).toBe(SubagentTerminateMode.ERROR);
453
+ });
454
+ });
455
+
456
+ describe('runNonInteractive - Execution and Tool Use', () => {
457
+ const promptConfig: PromptConfig = { systemPrompt: 'Execute task.' };
458
+
459
+ it('should terminate with GOAL if no outputs are expected and model stops', async () => {
460
+ const { config } = await createMockConfig();
461
+ // Model stops immediately
462
+ mockSendMessageStream.mockImplementation(createMockStream(['stop']));
463
+
464
+ const scope = await SubAgentScope.create(
465
+ 'test-agent',
466
+ config,
467
+ promptConfig,
468
+ defaultModelConfig,
469
+ defaultRunConfig,
470
+ // No ToolConfig, No OutputConfig
471
+ );
472
+
473
+ await scope.runNonInteractive(new ContextState());
474
+
475
+ expect(scope.output.terminate_reason).toBe(SubagentTerminateMode.GOAL);
476
+ expect(scope.output.emitted_vars).toEqual({});
477
+ expect(mockSendMessageStream).toHaveBeenCalledTimes(1);
478
+ // Check the initial message
479
+ expect(mockSendMessageStream.mock.calls[0][0].message).toEqual([
480
+ { text: 'Get Started!' },
481
+ ]);
482
+ });
483
+
484
+ it('should handle self.emitvalue and terminate with GOAL when outputs are met', async () => {
485
+ const { config } = await createMockConfig();
486
+ const outputConfig: OutputConfig = {
487
+ outputs: { result: 'The final result' },
488
+ };
489
+
490
+ // Turn 1: Model responds with emitvalue call
491
+ // Turn 2: Model stops after receiving the tool response
492
+ mockSendMessageStream.mockImplementation(
493
+ createMockStream([
494
+ [
495
+ {
496
+ name: 'self.emitvalue',
497
+ args: {
498
+ emit_variable_name: 'result',
499
+ emit_variable_value: 'Success!',
500
+ },
501
+ },
502
+ ],
503
+ 'stop',
504
+ ]),
505
+ );
506
+
507
+ const scope = await SubAgentScope.create(
508
+ 'test-agent',
509
+ config,
510
+ promptConfig,
511
+ defaultModelConfig,
512
+ defaultRunConfig,
513
+ undefined,
514
+ outputConfig,
515
+ );
516
+
517
+ await scope.runNonInteractive(new ContextState());
518
+
519
+ expect(scope.output.terminate_reason).toBe(SubagentTerminateMode.GOAL);
520
+ expect(scope.output.emitted_vars).toEqual({ result: 'Success!' });
521
+ expect(mockSendMessageStream).toHaveBeenCalledTimes(2);
522
+
523
+ // Check the tool response sent back in the second call
524
+ const secondCallArgs = mockSendMessageStream.mock.calls[1][0];
525
+ expect(secondCallArgs.message).toEqual([
526
+ { text: 'Emitted variable result successfully' },
527
+ ]);
528
+ });
529
+
530
+ it('should execute external tools and provide the response to the model', async () => {
531
+ const listFilesToolDef: FunctionDeclaration = {
532
+ name: 'list_files',
533
+ description: 'Lists files',
534
+ parameters: { type: Type.OBJECT, properties: {} },
535
+ };
536
+
537
+ const { config } = await createMockConfig({
538
+ getFunctionDeclarationsFiltered: vi
539
+ .fn()
540
+ .mockReturnValue([listFilesToolDef]),
541
+ });
542
+ const toolConfig: ToolConfig = { tools: ['list_files'] };
543
+
544
+ // Turn 1: Model calls the external tool
545
+ // Turn 2: Model stops
546
+ mockSendMessageStream.mockImplementation(
547
+ createMockStream([
548
+ [
549
+ {
550
+ id: 'call_1',
551
+ name: 'list_files',
552
+ args: { path: '.' },
553
+ },
554
+ ],
555
+ 'stop',
556
+ ]),
557
+ );
558
+
559
+ // Mock the tool execution result
560
+ vi.mocked(executeToolCall).mockResolvedValue({
561
+ callId: 'call_1',
562
+ responseParts: 'file1.txt\nfile2.ts',
563
+ resultDisplay: 'Listed 2 files',
564
+ error: undefined,
565
+ errorType: undefined, // Or ToolErrorType.NONE if available and appropriate
566
+ });
567
+
568
+ const scope = await SubAgentScope.create(
569
+ 'test-agent',
570
+ config,
571
+ promptConfig,
572
+ defaultModelConfig,
573
+ defaultRunConfig,
574
+ toolConfig,
575
+ );
576
+
577
+ await scope.runNonInteractive(new ContextState());
578
+
579
+ // Check tool execution
580
+ expect(executeToolCall).toHaveBeenCalledWith(
581
+ config,
582
+ expect.objectContaining({ name: 'list_files', args: { path: '.' } }),
583
+ expect.any(AbortSignal),
584
+ );
585
+
586
+ // Check the response sent back to the model
587
+ const secondCallArgs = mockSendMessageStream.mock.calls[1][0];
588
+ expect(secondCallArgs.message).toEqual([
589
+ { text: 'file1.txt\nfile2.ts' },
590
+ ]);
591
+
592
+ expect(scope.output.terminate_reason).toBe(SubagentTerminateMode.GOAL);
593
+ });
594
+
595
+ it('should provide specific tool error responses to the model', async () => {
596
+ const { config } = await createMockConfig();
597
+ const toolConfig: ToolConfig = { tools: ['failing_tool'] };
598
+
599
+ // Turn 1: Model calls the failing tool
600
+ // Turn 2: Model stops after receiving the error response
601
+ mockSendMessageStream.mockImplementation(
602
+ createMockStream([
603
+ [
604
+ {
605
+ id: 'call_fail',
606
+ name: 'failing_tool',
607
+ args: {},
608
+ },
609
+ ],
610
+ 'stop',
611
+ ]),
612
+ );
613
+
614
+ // Mock the tool execution failure.
615
+ vi.mocked(executeToolCall).mockResolvedValue({
616
+ callId: 'call_fail',
617
+ responseParts: 'ERROR: Tool failed catastrophically', // This should be sent to the model
618
+ resultDisplay: 'Tool failed catastrophically',
619
+ error: new Error('Failure'),
620
+ errorType: ToolErrorType.INVALID_TOOL_PARAMS,
621
+ });
622
+
623
+ const scope = await SubAgentScope.create(
624
+ 'test-agent',
625
+ config,
626
+ promptConfig,
627
+ defaultModelConfig,
628
+ defaultRunConfig,
629
+ toolConfig,
630
+ );
631
+
632
+ await scope.runNonInteractive(new ContextState());
633
+
634
+ // The agent should send the specific error message from responseParts.
635
+ const secondCallArgs = mockSendMessageStream.mock.calls[1][0];
636
+
637
+ expect(secondCallArgs.message).toEqual([
638
+ {
639
+ text: 'ERROR: Tool failed catastrophically',
640
+ },
641
+ ]);
642
+ });
643
+
644
+ it('should nudge the model if it stops before emitting all required variables', async () => {
645
+ const { config } = await createMockConfig();
646
+ const outputConfig: OutputConfig = {
647
+ outputs: { required_var: 'Must be present' },
648
+ };
649
+
650
+ // Turn 1: Model stops prematurely
651
+ // Turn 2: Model responds to the nudge and emits the variable
652
+ // Turn 3: Model stops
653
+ mockSendMessageStream.mockImplementation(
654
+ createMockStream([
655
+ 'stop',
656
+ [
657
+ {
658
+ name: 'self.emitvalue',
659
+ args: {
660
+ emit_variable_name: 'required_var',
661
+ emit_variable_value: 'Here it is',
662
+ },
663
+ },
664
+ ],
665
+ 'stop',
666
+ ]),
667
+ );
668
+
669
+ const scope = await SubAgentScope.create(
670
+ 'test-agent',
671
+ config,
672
+ promptConfig,
673
+ defaultModelConfig,
674
+ defaultRunConfig,
675
+ undefined,
676
+ outputConfig,
677
+ );
678
+
679
+ await scope.runNonInteractive(new ContextState());
680
+
681
+ // Check the nudge message sent in Turn 2
682
+ const secondCallArgs = mockSendMessageStream.mock.calls[1][0];
683
+
684
+ // We check that the message contains the required variable name and the nudge phrasing.
685
+ expect(secondCallArgs.message[0].text).toContain('required_var');
686
+ expect(secondCallArgs.message[0].text).toContain(
687
+ 'You have stopped calling tools',
688
+ );
689
+
690
+ expect(scope.output.terminate_reason).toBe(SubagentTerminateMode.GOAL);
691
+ expect(scope.output.emitted_vars).toEqual({
692
+ required_var: 'Here it is',
693
+ });
694
+ expect(mockSendMessageStream).toHaveBeenCalledTimes(3);
695
+ });
696
+ });
697
+
698
+ describe('runNonInteractive - Termination and Recovery', () => {
699
+ const promptConfig: PromptConfig = { systemPrompt: 'Execute task.' };
700
+
701
+ it('should terminate with MAX_TURNS if the limit is reached', async () => {
702
+ const { config } = await createMockConfig();
703
+ const runConfig: RunConfig = { ...defaultRunConfig, max_turns: 2 };
704
+
705
+ // Model keeps looping by calling emitvalue repeatedly
706
+ mockSendMessageStream.mockImplementation(
707
+ createMockStream([
708
+ [
709
+ {
710
+ name: 'self.emitvalue',
711
+ args: { emit_variable_name: 'loop', emit_variable_value: 'v1' },
712
+ },
713
+ ],
714
+ [
715
+ {
716
+ name: 'self.emitvalue',
717
+ args: { emit_variable_name: 'loop', emit_variable_value: 'v2' },
718
+ },
719
+ ],
720
+ // This turn should not happen
721
+ [
722
+ {
723
+ name: 'self.emitvalue',
724
+ args: { emit_variable_name: 'loop', emit_variable_value: 'v3' },
725
+ },
726
+ ],
727
+ ]),
728
+ );
729
+
730
+ const scope = await SubAgentScope.create(
731
+ 'test-agent',
732
+ config,
733
+ promptConfig,
734
+ defaultModelConfig,
735
+ runConfig,
736
+ );
737
+
738
+ await scope.runNonInteractive(new ContextState());
739
+
740
+ expect(mockSendMessageStream).toHaveBeenCalledTimes(2);
741
+ expect(scope.output.terminate_reason).toBe(
742
+ SubagentTerminateMode.MAX_TURNS,
743
+ );
744
+ });
745
+
746
+ it('should terminate with TIMEOUT if the time limit is reached during an LLM call', async () => {
747
+ // Use fake timers to reliably test timeouts
748
+ vi.useFakeTimers();
749
+
750
+ const { config } = await createMockConfig();
751
+ const runConfig: RunConfig = { max_time_minutes: 5, max_turns: 100 };
752
+
753
+ // We need to control the resolution of the sendMessageStream promise to advance the timer during execution.
754
+ let resolveStream: (
755
+ value: AsyncGenerator<unknown, void, unknown>,
756
+ ) => void;
757
+ const streamPromise = new Promise<
758
+ AsyncGenerator<unknown, void, unknown>
759
+ >((resolve) => {
760
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
761
+ resolveStream = resolve as any;
762
+ });
763
+
764
+ // The LLM call will hang until we resolve the promise.
765
+ mockSendMessageStream.mockReturnValue(streamPromise);
766
+
767
+ const scope = await SubAgentScope.create(
768
+ 'test-agent',
769
+ config,
770
+ promptConfig,
771
+ defaultModelConfig,
772
+ runConfig,
773
+ );
774
+
775
+ const runPromise = scope.runNonInteractive(new ContextState());
776
+
777
+ // Advance time beyond the limit (6 minutes) while the agent is awaiting the LLM response.
778
+ await vi.advanceTimersByTimeAsync(6 * 60 * 1000);
779
+
780
+ // Now resolve the stream. The model returns 'stop'.
781
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
782
+ resolveStream!(createMockStream(['stop'])() as any);
783
+
784
+ await runPromise;
785
+
786
+ expect(scope.output.terminate_reason).toBe(
787
+ SubagentTerminateMode.TIMEOUT,
788
+ );
789
+ expect(mockSendMessageStream).toHaveBeenCalledTimes(1);
790
+
791
+ vi.useRealTimers();
792
+ });
793
+
794
+ it('should terminate with ERROR if the model call throws', async () => {
795
+ const { config } = await createMockConfig();
796
+ mockSendMessageStream.mockRejectedValue(new Error('API Failure'));
797
+
798
+ const scope = await SubAgentScope.create(
799
+ 'test-agent',
800
+ config,
801
+ promptConfig,
802
+ defaultModelConfig,
803
+ defaultRunConfig,
804
+ );
805
+
806
+ await expect(
807
+ scope.runNonInteractive(new ContextState()),
808
+ ).rejects.toThrow('API Failure');
809
+ expect(scope.output.terminate_reason).toBe(SubagentTerminateMode.ERROR);
810
+ });
811
+ });
812
+ });
813
+ });
projects/ui/qwen-code/packages/core/src/core/subagent.ts ADDED
@@ -0,0 +1,676 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { reportError } from '../utils/errorReporting.js';
8
+ import { Config } from '../config/config.js';
9
+ import { ToolCallRequestInfo } from './turn.js';
10
+ import { executeToolCall } from './nonInteractiveToolExecutor.js';
11
+ import { createContentGenerator } from './contentGenerator.js';
12
+ import { getEnvironmentContext } from '../utils/environmentContext.js';
13
+ import {
14
+ Content,
15
+ Part,
16
+ FunctionCall,
17
+ GenerateContentConfig,
18
+ FunctionDeclaration,
19
+ Type,
20
+ } from '@google/genai';
21
+ import { GeminiChat } from './geminiChat.js';
22
+
23
+ /**
24
+ * @fileoverview Defines the configuration interfaces for a subagent.
25
+ *
26
+ * These interfaces specify the structure for defining the subagent's prompt,
27
+ * the model parameters, and the execution settings.
28
+ */
29
+
30
+ /**
31
+ * Describes the possible termination modes for a subagent.
32
+ * This enum provides a clear indication of why a subagent's execution might have ended.
33
+ */
34
+ export enum SubagentTerminateMode {
35
+ /**
36
+ * Indicates that the subagent's execution terminated due to an unrecoverable error.
37
+ */
38
+ ERROR = 'ERROR',
39
+ /**
40
+ * Indicates that the subagent's execution terminated because it exceeded the maximum allowed working time.
41
+ */
42
+ TIMEOUT = 'TIMEOUT',
43
+ /**
44
+ * Indicates that the subagent's execution successfully completed all its defined goals.
45
+ */
46
+ GOAL = 'GOAL',
47
+ /**
48
+ * Indicates that the subagent's execution terminated because it exceeded the maximum number of turns.
49
+ */
50
+ MAX_TURNS = 'MAX_TURNS',
51
+ }
52
+
53
+ /**
54
+ * Represents the output structure of a subagent's execution.
55
+ * This interface defines the data that a subagent will return upon completion,
56
+ * including any emitted variables and the reason for its termination.
57
+ */
58
+ export interface OutputObject {
59
+ /**
60
+ * A record of key-value pairs representing variables emitted by the subagent
61
+ * during its execution. These variables can be used by the calling agent.
62
+ */
63
+ emitted_vars: Record<string, string>;
64
+ /**
65
+ * The reason for the subagent's termination, indicating whether it completed
66
+ * successfully, timed out, or encountered an error.
67
+ */
68
+ terminate_reason: SubagentTerminateMode;
69
+ }
70
+
71
+ /**
72
+ * Configures the initial prompt for the subagent.
73
+ */
74
+ export interface PromptConfig {
75
+ /**
76
+ * A single system prompt string that defines the subagent's persona and instructions.
77
+ * Note: You should use either `systemPrompt` or `initialMessages`, but not both.
78
+ */
79
+ systemPrompt?: string;
80
+
81
+ /**
82
+ * An array of user/model content pairs to seed the chat history for few-shot prompting.
83
+ * Note: You should use either `systemPrompt` or `initialMessages`, but not both.
84
+ */
85
+ initialMessages?: Content[];
86
+ }
87
+
88
+ /**
89
+ * Configures the tools available to the subagent during its execution.
90
+ */
91
+ export interface ToolConfig {
92
+ /**
93
+ * A list of tool names (from the tool registry) or full function declarations
94
+ * that the subagent is permitted to use.
95
+ */
96
+ tools: Array<string | FunctionDeclaration>;
97
+ }
98
+
99
+ /**
100
+ * Configures the expected outputs for the subagent.
101
+ */
102
+ export interface OutputConfig {
103
+ /**
104
+ * A record describing the variables the subagent is expected to emit.
105
+ * The subagent will be prompted to generate these values before terminating.
106
+ */
107
+ outputs: Record<string, string>;
108
+ }
109
+
110
+ /**
111
+ * Configures the generative model parameters for the subagent.
112
+ * This interface specifies the model to be used and its associated generation settings,
113
+ * such as temperature and top-p values, which influence the creativity and diversity of the model's output.
114
+ */
115
+ export interface ModelConfig {
116
+ /**
117
+ * The name or identifier of the model to be used (e.g., 'gemini-2.5-pro').
118
+ *
119
+ * TODO: In the future, this needs to support 'auto' or some other string to support routing use cases.
120
+ */
121
+ model: string;
122
+ /**
123
+ * The temperature for the model's sampling process.
124
+ */
125
+ temp: number;
126
+ /**
127
+ * The top-p value for nucleus sampling.
128
+ */
129
+ top_p: number;
130
+ }
131
+
132
+ /**
133
+ * Configures the execution environment and constraints for the subagent.
134
+ * This interface defines parameters that control the subagent's runtime behavior,
135
+ * such as maximum execution time, to prevent infinite loops or excessive resource consumption.
136
+ *
137
+ * TODO: Consider adding max_tokens as a form of budgeting.
138
+ */
139
+ export interface RunConfig {
140
+ /** The maximum execution time for the subagent in minutes. */
141
+ max_time_minutes: number;
142
+ /**
143
+ * The maximum number of conversational turns (a user message + model response)
144
+ * before the execution is terminated. Helps prevent infinite loops.
145
+ */
146
+ max_turns?: number;
147
+ }
148
+
149
+ /**
150
+ * Manages the runtime context state for the subagent.
151
+ * This class provides a mechanism to store and retrieve key-value pairs
152
+ * that represent the dynamic state and variables accessible to the subagent
153
+ * during its execution.
154
+ */
155
+ export class ContextState {
156
+ private state: Record<string, unknown> = {};
157
+
158
+ /**
159
+ * Retrieves a value from the context state.
160
+ *
161
+ * @param key - The key of the value to retrieve.
162
+ * @returns The value associated with the key, or undefined if the key is not found.
163
+ */
164
+ get(key: string): unknown {
165
+ return this.state[key];
166
+ }
167
+
168
+ /**
169
+ * Sets a value in the context state.
170
+ *
171
+ * @param key - The key to set the value under.
172
+ * @param value - The value to set.
173
+ */
174
+ set(key: string, value: unknown): void {
175
+ this.state[key] = value;
176
+ }
177
+
178
+ /**
179
+ * Retrieves all keys in the context state.
180
+ *
181
+ * @returns An array of all keys in the context state.
182
+ */
183
+ get_keys(): string[] {
184
+ return Object.keys(this.state);
185
+ }
186
+ }
187
+
188
+ /**
189
+ * Replaces `${...}` placeholders in a template string with values from a context.
190
+ *
191
+ * This function identifies all placeholders in the format `${key}`, validates that
192
+ * each key exists in the provided `ContextState`, and then performs the substitution.
193
+ *
194
+ * @param template The template string containing placeholders.
195
+ * @param context The `ContextState` object providing placeholder values.
196
+ * @returns The populated string with all placeholders replaced.
197
+ * @throws {Error} if any placeholder key is not found in the context.
198
+ */
199
+ function templateString(template: string, context: ContextState): string {
200
+ const placeholderRegex = /\$\{(\w+)\}/g;
201
+
202
+ // First, find all unique keys required by the template.
203
+ const requiredKeys = new Set(
204
+ Array.from(template.matchAll(placeholderRegex), (match) => match[1]),
205
+ );
206
+
207
+ // Check if all required keys exist in the context.
208
+ const contextKeys = new Set(context.get_keys());
209
+ const missingKeys = Array.from(requiredKeys).filter(
210
+ (key) => !contextKeys.has(key),
211
+ );
212
+
213
+ if (missingKeys.length > 0) {
214
+ throw new Error(
215
+ `Missing context values for the following keys: ${missingKeys.join(
216
+ ', ',
217
+ )}`,
218
+ );
219
+ }
220
+
221
+ // Perform the replacement using a replacer function.
222
+ return template.replace(placeholderRegex, (_match, key) =>
223
+ String(context.get(key)),
224
+ );
225
+ }
226
+
227
+ /**
228
+ * Represents the scope and execution environment for a subagent.
229
+ * This class orchestrates the subagent's lifecycle, managing its chat interactions,
230
+ * runtime context, and the collection of its outputs.
231
+ */
232
+ export class SubAgentScope {
233
+ output: OutputObject = {
234
+ terminate_reason: SubagentTerminateMode.ERROR,
235
+ emitted_vars: {},
236
+ };
237
+ private readonly subagentId: string;
238
+
239
+ /**
240
+ * Constructs a new SubAgentScope instance.
241
+ * @param name - The name for the subagent, used for logging and identification.
242
+ * @param runtimeContext - The shared runtime configuration and services.
243
+ * @param promptConfig - Configuration for the subagent's prompt and behavior.
244
+ * @param modelConfig - Configuration for the generative model parameters.
245
+ * @param runConfig - Configuration for the subagent's execution environment.
246
+ * @param toolConfig - Optional configuration for tools available to the subagent.
247
+ * @param outputConfig - Optional configuration for the subagent's expected outputs.
248
+ */
249
+ private constructor(
250
+ readonly name: string,
251
+ readonly runtimeContext: Config,
252
+ private readonly promptConfig: PromptConfig,
253
+ private readonly modelConfig: ModelConfig,
254
+ private readonly runConfig: RunConfig,
255
+ private readonly toolConfig?: ToolConfig,
256
+ private readonly outputConfig?: OutputConfig,
257
+ ) {
258
+ const randomPart = Math.random().toString(36).slice(2, 8);
259
+ this.subagentId = `${this.name}-${randomPart}`;
260
+ }
261
+
262
+ /**
263
+ * Creates and validates a new SubAgentScope instance.
264
+ * This factory method ensures that all tools provided in the prompt configuration
265
+ * are valid for non-interactive use before creating the subagent instance.
266
+ * @param {string} name - The name of the subagent.
267
+ * @param {Config} runtimeContext - The shared runtime configuration and services.
268
+ * @param {PromptConfig} promptConfig - Configuration for the subagent's prompt and behavior.
269
+ * @param {ModelConfig} modelConfig - Configuration for the generative model parameters.
270
+ * @param {RunConfig} runConfig - Configuration for the subagent's execution environment.
271
+ * @param {ToolConfig} [toolConfig] - Optional configuration for tools.
272
+ * @param {OutputConfig} [outputConfig] - Optional configuration for expected outputs.
273
+ * @returns {Promise<SubAgentScope>} A promise that resolves to a valid SubAgentScope instance.
274
+ * @throws {Error} If any tool requires user confirmation.
275
+ */
276
+ static async create(
277
+ name: string,
278
+ runtimeContext: Config,
279
+ promptConfig: PromptConfig,
280
+ modelConfig: ModelConfig,
281
+ runConfig: RunConfig,
282
+ toolConfig?: ToolConfig,
283
+ outputConfig?: OutputConfig,
284
+ ): Promise<SubAgentScope> {
285
+ if (toolConfig) {
286
+ const toolRegistry = runtimeContext.getToolRegistry();
287
+ const toolsToLoad: string[] = [];
288
+ for (const tool of toolConfig.tools) {
289
+ if (typeof tool === 'string') {
290
+ toolsToLoad.push(tool);
291
+ }
292
+ }
293
+
294
+ for (const toolName of toolsToLoad) {
295
+ const tool = toolRegistry.getTool(toolName);
296
+ if (tool) {
297
+ const requiredParams = tool.schema.parameters?.required ?? [];
298
+ if (requiredParams.length > 0) {
299
+ // This check is imperfect. A tool might require parameters but still
300
+ // be interactive (e.g., `delete_file(path)`). However, we cannot
301
+ // build a generic invocation without knowing what dummy parameters
302
+ // to provide. Crashing here because `build({})` fails is worse
303
+ // than allowing a potential hang later if an interactive tool is
304
+ // used. This is a best-effort check.
305
+ console.warn(
306
+ `Cannot check tool "${toolName}" for interactivity because it requires parameters. Assuming it is safe for non-interactive use.`,
307
+ );
308
+ continue;
309
+ }
310
+
311
+ const invocation = tool.build({});
312
+ const confirmationDetails = await invocation.shouldConfirmExecute(
313
+ new AbortController().signal,
314
+ );
315
+ if (confirmationDetails) {
316
+ throw new Error(
317
+ `Tool "${toolName}" requires user confirmation and cannot be used in a non-interactive subagent.`,
318
+ );
319
+ }
320
+ }
321
+ }
322
+ }
323
+
324
+ return new SubAgentScope(
325
+ name,
326
+ runtimeContext,
327
+ promptConfig,
328
+ modelConfig,
329
+ runConfig,
330
+ toolConfig,
331
+ outputConfig,
332
+ );
333
+ }
334
+
335
+ /**
336
+ * Runs the subagent in a non-interactive mode.
337
+ * This method orchestrates the subagent's execution loop, including prompt templating,
338
+ * tool execution, and termination conditions.
339
+ * @param {ContextState} context - The current context state containing variables for prompt templating.
340
+ * @returns {Promise<void>} A promise that resolves when the subagent has completed its execution.
341
+ */
342
+ async runNonInteractive(context: ContextState): Promise<void> {
343
+ const chat = await this.createChatObject(context);
344
+
345
+ if (!chat) {
346
+ this.output.terminate_reason = SubagentTerminateMode.ERROR;
347
+ return;
348
+ }
349
+
350
+ const abortController = new AbortController();
351
+ const toolRegistry = this.runtimeContext.getToolRegistry();
352
+
353
+ // Prepare the list of tools available to the subagent.
354
+ const toolsList: FunctionDeclaration[] = [];
355
+ if (this.toolConfig) {
356
+ const toolsToLoad: string[] = [];
357
+ for (const tool of this.toolConfig.tools) {
358
+ if (typeof tool === 'string') {
359
+ toolsToLoad.push(tool);
360
+ } else {
361
+ toolsList.push(tool);
362
+ }
363
+ }
364
+ toolsList.push(
365
+ ...toolRegistry.getFunctionDeclarationsFiltered(toolsToLoad),
366
+ );
367
+ }
368
+ // Add local scope functions if outputs are expected.
369
+ if (this.outputConfig && this.outputConfig.outputs) {
370
+ toolsList.push(...this.getScopeLocalFuncDefs());
371
+ }
372
+
373
+ let currentMessages: Content[] = [
374
+ { role: 'user', parts: [{ text: 'Get Started!' }] },
375
+ ];
376
+
377
+ const startTime = Date.now();
378
+ let turnCounter = 0;
379
+ try {
380
+ while (true) {
381
+ // Check termination conditions.
382
+ if (
383
+ this.runConfig.max_turns &&
384
+ turnCounter >= this.runConfig.max_turns
385
+ ) {
386
+ this.output.terminate_reason = SubagentTerminateMode.MAX_TURNS;
387
+ break;
388
+ }
389
+ let durationMin = (Date.now() - startTime) / (1000 * 60);
390
+ if (durationMin >= this.runConfig.max_time_minutes) {
391
+ this.output.terminate_reason = SubagentTerminateMode.TIMEOUT;
392
+ break;
393
+ }
394
+
395
+ const promptId = `${this.runtimeContext.getSessionId()}#${this.subagentId}#${turnCounter++}`;
396
+ const messageParams = {
397
+ message: currentMessages[0]?.parts || [],
398
+ config: {
399
+ abortSignal: abortController.signal,
400
+ tools: [{ functionDeclarations: toolsList }],
401
+ },
402
+ };
403
+
404
+ const responseStream = await chat.sendMessageStream(
405
+ messageParams,
406
+ promptId,
407
+ );
408
+
409
+ const functionCalls: FunctionCall[] = [];
410
+ for await (const resp of responseStream) {
411
+ if (abortController.signal.aborted) return;
412
+ if (resp.functionCalls) functionCalls.push(...resp.functionCalls);
413
+ }
414
+
415
+ durationMin = (Date.now() - startTime) / (1000 * 60);
416
+ if (durationMin >= this.runConfig.max_time_minutes) {
417
+ this.output.terminate_reason = SubagentTerminateMode.TIMEOUT;
418
+ break;
419
+ }
420
+
421
+ if (functionCalls.length > 0) {
422
+ currentMessages = await this.processFunctionCalls(
423
+ functionCalls,
424
+ abortController,
425
+ promptId,
426
+ );
427
+ } else {
428
+ // Model stopped calling tools. Check if goal is met.
429
+ if (
430
+ !this.outputConfig ||
431
+ Object.keys(this.outputConfig.outputs).length === 0
432
+ ) {
433
+ this.output.terminate_reason = SubagentTerminateMode.GOAL;
434
+ break;
435
+ }
436
+
437
+ const remainingVars = Object.keys(this.outputConfig.outputs).filter(
438
+ (key) => !(key in this.output.emitted_vars),
439
+ );
440
+
441
+ if (remainingVars.length === 0) {
442
+ this.output.terminate_reason = SubagentTerminateMode.GOAL;
443
+ break;
444
+ }
445
+
446
+ const nudgeMessage = `You have stopped calling tools but have not emitted the following required variables: ${remainingVars.join(
447
+ ', ',
448
+ )}. Please use the 'self.emitvalue' tool to emit them now, or continue working if necessary.`;
449
+
450
+ console.debug(nudgeMessage);
451
+
452
+ currentMessages = [
453
+ {
454
+ role: 'user',
455
+ parts: [{ text: nudgeMessage }],
456
+ },
457
+ ];
458
+ }
459
+ }
460
+ } catch (error) {
461
+ console.error('Error during subagent execution:', error);
462
+ this.output.terminate_reason = SubagentTerminateMode.ERROR;
463
+ throw error;
464
+ }
465
+ }
466
+
467
+ /**
468
+ * Processes a list of function calls, executing each one and collecting their responses.
469
+ * This method iterates through the provided function calls, executes them using the
470
+ * `executeToolCall` function (or handles `self.emitvalue` internally), and aggregates
471
+ * their results. It also manages error reporting for failed tool executions.
472
+ * @param {FunctionCall[]} functionCalls - An array of `FunctionCall` objects to process.
473
+ * @param {ToolRegistry} toolRegistry - The tool registry to look up and execute tools.
474
+ * @param {AbortController} abortController - An `AbortController` to signal cancellation of tool executions.
475
+ * @returns {Promise<Content[]>} A promise that resolves to an array of `Content` parts representing the tool responses,
476
+ * which are then used to update the chat history.
477
+ */
478
+ private async processFunctionCalls(
479
+ functionCalls: FunctionCall[],
480
+ abortController: AbortController,
481
+ promptId: string,
482
+ ): Promise<Content[]> {
483
+ const toolResponseParts: Part[] = [];
484
+
485
+ for (const functionCall of functionCalls) {
486
+ const callId = functionCall.id ?? `${functionCall.name}-${Date.now()}`;
487
+ const requestInfo: ToolCallRequestInfo = {
488
+ callId,
489
+ name: functionCall.name as string,
490
+ args: (functionCall.args ?? {}) as Record<string, unknown>,
491
+ isClientInitiated: true,
492
+ prompt_id: promptId,
493
+ };
494
+
495
+ let toolResponse;
496
+
497
+ // Handle scope-local tools first.
498
+ if (functionCall.name === 'self.emitvalue') {
499
+ const valName = String(requestInfo.args['emit_variable_name']);
500
+ const valVal = String(requestInfo.args['emit_variable_value']);
501
+ this.output.emitted_vars[valName] = valVal;
502
+
503
+ toolResponse = {
504
+ callId,
505
+ responseParts: `Emitted variable ${valName} successfully`,
506
+ resultDisplay: `Emitted variable ${valName} successfully`,
507
+ error: undefined,
508
+ };
509
+ } else {
510
+ toolResponse = await executeToolCall(
511
+ this.runtimeContext,
512
+ requestInfo,
513
+ abortController.signal,
514
+ );
515
+ }
516
+
517
+ if (toolResponse.error) {
518
+ console.error(
519
+ `Error executing tool ${functionCall.name}: ${toolResponse.resultDisplay || toolResponse.error.message}`,
520
+ );
521
+ }
522
+
523
+ if (toolResponse.responseParts) {
524
+ const parts = Array.isArray(toolResponse.responseParts)
525
+ ? toolResponse.responseParts
526
+ : [toolResponse.responseParts];
527
+ for (const part of parts) {
528
+ if (typeof part === 'string') {
529
+ toolResponseParts.push({ text: part });
530
+ } else if (part) {
531
+ toolResponseParts.push(part);
532
+ }
533
+ }
534
+ }
535
+ }
536
+ // If all tool calls failed, inform the model so it can re-evaluate.
537
+ if (functionCalls.length > 0 && toolResponseParts.length === 0) {
538
+ toolResponseParts.push({
539
+ text: 'All tool calls failed. Please analyze the errors and try an alternative approach.',
540
+ });
541
+ }
542
+
543
+ return [{ role: 'user', parts: toolResponseParts }];
544
+ }
545
+
546
+ private async createChatObject(context: ContextState) {
547
+ if (!this.promptConfig.systemPrompt && !this.promptConfig.initialMessages) {
548
+ throw new Error(
549
+ 'PromptConfig must have either `systemPrompt` or `initialMessages` defined.',
550
+ );
551
+ }
552
+ if (this.promptConfig.systemPrompt && this.promptConfig.initialMessages) {
553
+ throw new Error(
554
+ 'PromptConfig cannot have both `systemPrompt` and `initialMessages` defined.',
555
+ );
556
+ }
557
+
558
+ const envParts = await getEnvironmentContext(this.runtimeContext);
559
+ const envHistory: Content[] = [
560
+ { role: 'user', parts: envParts },
561
+ { role: 'model', parts: [{ text: 'Got it. Thanks for the context!' }] },
562
+ ];
563
+
564
+ const start_history = [
565
+ ...envHistory,
566
+ ...(this.promptConfig.initialMessages ?? []),
567
+ ];
568
+
569
+ const systemInstruction = this.promptConfig.systemPrompt
570
+ ? this.buildChatSystemPrompt(context)
571
+ : undefined;
572
+
573
+ try {
574
+ const generationConfig: GenerateContentConfig & {
575
+ systemInstruction?: string | Content;
576
+ } = {
577
+ temperature: this.modelConfig.temp,
578
+ topP: this.modelConfig.top_p,
579
+ };
580
+
581
+ if (systemInstruction) {
582
+ generationConfig.systemInstruction = systemInstruction;
583
+ }
584
+
585
+ const contentGenerator = await createContentGenerator(
586
+ this.runtimeContext.getContentGeneratorConfig(),
587
+ this.runtimeContext,
588
+ this.runtimeContext.getSessionId(),
589
+ );
590
+
591
+ this.runtimeContext.setModel(this.modelConfig.model);
592
+
593
+ return new GeminiChat(
594
+ this.runtimeContext,
595
+ contentGenerator,
596
+ generationConfig,
597
+ start_history,
598
+ );
599
+ } catch (error) {
600
+ await reportError(
601
+ error,
602
+ 'Error initializing Gemini chat session.',
603
+ start_history,
604
+ 'startChat',
605
+ );
606
+ // The calling function will handle the undefined return.
607
+ return undefined;
608
+ }
609
+ }
610
+
611
+ /**
612
+ * Returns an array of FunctionDeclaration objects for tools that are local to the subagent's scope.
613
+ * Currently, this includes the `self.emitvalue` tool for emitting variables.
614
+ * @returns An array of `FunctionDeclaration` objects.
615
+ */
616
+ private getScopeLocalFuncDefs() {
617
+ const emitValueTool: FunctionDeclaration = {
618
+ name: 'self.emitvalue',
619
+ description: `* This tool emits A SINGLE return value from this execution, such that it can be collected and presented to the calling function.
620
+ * You can only emit ONE VALUE each time you call this tool. You are expected to call this tool MULTIPLE TIMES if you have MULTIPLE OUTPUTS.`,
621
+ parameters: {
622
+ type: Type.OBJECT,
623
+ properties: {
624
+ emit_variable_name: {
625
+ description: 'This is the name of the variable to be returned.',
626
+ type: Type.STRING,
627
+ },
628
+ emit_variable_value: {
629
+ description:
630
+ 'This is the _value_ to be returned for this variable.',
631
+ type: Type.STRING,
632
+ },
633
+ },
634
+ required: ['emit_variable_name', 'emit_variable_value'],
635
+ },
636
+ };
637
+
638
+ return [emitValueTool];
639
+ }
640
+
641
+ /**
642
+ * Builds the system prompt for the chat based on the provided configurations.
643
+ * It templates the base system prompt and appends instructions for emitting
644
+ * variables if an `OutputConfig` is provided.
645
+ * @param {ContextState} context - The context for templating.
646
+ * @returns {string} The complete system prompt.
647
+ */
648
+ private buildChatSystemPrompt(context: ContextState): string {
649
+ if (!this.promptConfig.systemPrompt) {
650
+ // This should ideally be caught in createChatObject, but serves as a safeguard.
651
+ return '';
652
+ }
653
+
654
+ let finalPrompt = templateString(this.promptConfig.systemPrompt, context);
655
+
656
+ // Add instructions for emitting variables if needed.
657
+ if (this.outputConfig && this.outputConfig.outputs) {
658
+ let outputInstructions =
659
+ '\n\nAfter you have achieved all other goals, you MUST emit the required output variables. For each expected output, make one final call to the `self.emitvalue` tool.';
660
+
661
+ for (const [key, value] of Object.entries(this.outputConfig.outputs)) {
662
+ outputInstructions += `\n* Use 'self.emitvalue' to emit the '${key}' key, with a value described as: '${value}'`;
663
+ }
664
+ finalPrompt += outputInstructions;
665
+ }
666
+
667
+ // Add general non-interactive instructions.
668
+ finalPrompt += `
669
+
670
+ Important Rules:
671
+ * You are running in a non-interactive mode. You CANNOT ask the user for input or clarification. You must proceed with the information you have.
672
+ * Once you believe all goals have been met and all required outputs have been emitted, stop calling tools.`;
673
+
674
+ return finalPrompt;
675
+ }
676
+ }
projects/ui/qwen-code/packages/core/src/core/tokenLimits.ts ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ type Model = string;
8
+ type TokenCount = number;
9
+
10
+ export const DEFAULT_TOKEN_LIMIT = 1_048_576;
11
+
12
+ export function tokenLimit(model: Model): TokenCount {
13
+ // Add other models as they become relevant or if specified by config
14
+ // Pulled from https://ai.google.dev/gemini-api/docs/models
15
+ switch (model) {
16
+ case 'gemini-1.5-pro':
17
+ return 2_097_152;
18
+ case 'gemini-1.5-flash':
19
+ case 'gemini-2.5-pro-preview-05-06':
20
+ case 'gemini-2.5-pro-preview-06-05':
21
+ case 'gemini-2.5-pro':
22
+ case 'gemini-2.5-flash-preview-05-20':
23
+ case 'gemini-2.5-flash':
24
+ case 'gemini-2.5-flash-lite':
25
+ case 'gemini-2.0-flash':
26
+ return 1_048_576;
27
+ case 'gemini-2.0-flash-preview-image-generation':
28
+ return 32_000;
29
+ default:
30
+ return DEFAULT_TOKEN_LIMIT;
31
+ }
32
+ }
projects/ui/qwen-code/packages/core/src/core/turn.test.ts ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
8
+ import {
9
+ Turn,
10
+ GeminiEventType,
11
+ ServerGeminiToolCallRequestEvent,
12
+ ServerGeminiErrorEvent,
13
+ } from './turn.js';
14
+ import { GenerateContentResponse, Part, Content } from '@google/genai';
15
+ import { reportError } from '../utils/errorReporting.js';
16
+ import { GeminiChat } from './geminiChat.js';
17
+
18
+ const mockSendMessageStream = vi.fn();
19
+ const mockGetHistory = vi.fn();
20
+ const mockMaybeIncludeSchemaDepthContext = vi.fn();
21
+
22
+ vi.mock('@google/genai', async (importOriginal) => {
23
+ const actual = await importOriginal<typeof import('@google/genai')>();
24
+ const MockChat = vi.fn().mockImplementation(() => ({
25
+ sendMessageStream: mockSendMessageStream,
26
+ getHistory: mockGetHistory,
27
+ maybeIncludeSchemaDepthContext: mockMaybeIncludeSchemaDepthContext,
28
+ }));
29
+ return {
30
+ ...actual,
31
+ Chat: MockChat,
32
+ };
33
+ });
34
+
35
+ vi.mock('../utils/errorReporting', () => ({
36
+ reportError: vi.fn(),
37
+ }));
38
+
39
+ vi.mock('../utils/generateContentResponseUtilities', () => ({
40
+ getResponseText: (resp: GenerateContentResponse) =>
41
+ resp.candidates?.[0]?.content?.parts?.map((part) => part.text).join('') ||
42
+ undefined,
43
+ }));
44
+
45
+ describe('Turn', () => {
46
+ let turn: Turn;
47
+ // Define a type for the mocked Chat instance for clarity
48
+ type MockedChatInstance = {
49
+ sendMessageStream: typeof mockSendMessageStream;
50
+ getHistory: typeof mockGetHistory;
51
+ maybeIncludeSchemaDepthContext: typeof mockMaybeIncludeSchemaDepthContext;
52
+ };
53
+ let mockChatInstance: MockedChatInstance;
54
+
55
+ beforeEach(() => {
56
+ vi.resetAllMocks();
57
+ mockChatInstance = {
58
+ sendMessageStream: mockSendMessageStream,
59
+ getHistory: mockGetHistory,
60
+ maybeIncludeSchemaDepthContext: mockMaybeIncludeSchemaDepthContext,
61
+ };
62
+ turn = new Turn(mockChatInstance as unknown as GeminiChat, 'prompt-id-1');
63
+ mockGetHistory.mockReturnValue([]);
64
+ mockSendMessageStream.mockResolvedValue((async function* () {})());
65
+ });
66
+
67
+ afterEach(() => {
68
+ vi.restoreAllMocks();
69
+ });
70
+
71
+ describe('constructor', () => {
72
+ it('should initialize pendingToolCalls and debugResponses', () => {
73
+ expect(turn.pendingToolCalls).toEqual([]);
74
+ expect(turn.getDebugResponses()).toEqual([]);
75
+ });
76
+ });
77
+
78
+ describe('run', () => {
79
+ it('should yield content events for text parts', async () => {
80
+ const mockResponseStream = (async function* () {
81
+ yield {
82
+ candidates: [{ content: { parts: [{ text: 'Hello' }] } }],
83
+ } as unknown as GenerateContentResponse;
84
+ yield {
85
+ candidates: [{ content: { parts: [{ text: ' world' }] } }],
86
+ } as unknown as GenerateContentResponse;
87
+ })();
88
+ mockSendMessageStream.mockResolvedValue(mockResponseStream);
89
+
90
+ const events = [];
91
+ const reqParts: Part[] = [{ text: 'Hi' }];
92
+ for await (const event of turn.run(
93
+ reqParts,
94
+ new AbortController().signal,
95
+ )) {
96
+ events.push(event);
97
+ }
98
+
99
+ expect(mockSendMessageStream).toHaveBeenCalledWith(
100
+ {
101
+ message: reqParts,
102
+ config: { abortSignal: expect.any(AbortSignal) },
103
+ },
104
+ 'prompt-id-1',
105
+ );
106
+
107
+ expect(events).toEqual([
108
+ { type: GeminiEventType.Content, value: 'Hello' },
109
+ { type: GeminiEventType.Content, value: ' world' },
110
+ ]);
111
+ expect(turn.getDebugResponses().length).toBe(2);
112
+ });
113
+
114
+ it('should yield tool_call_request events for function calls', async () => {
115
+ const mockResponseStream = (async function* () {
116
+ yield {
117
+ functionCalls: [
118
+ {
119
+ id: 'fc1',
120
+ name: 'tool1',
121
+ args: { arg1: 'val1' },
122
+ isClientInitiated: false,
123
+ },
124
+ { name: 'tool2', args: { arg2: 'val2' }, isClientInitiated: false }, // No ID
125
+ ],
126
+ } as unknown as GenerateContentResponse;
127
+ })();
128
+ mockSendMessageStream.mockResolvedValue(mockResponseStream);
129
+
130
+ const events = [];
131
+ const reqParts: Part[] = [{ text: 'Use tools' }];
132
+ for await (const event of turn.run(
133
+ reqParts,
134
+ new AbortController().signal,
135
+ )) {
136
+ events.push(event);
137
+ }
138
+
139
+ expect(events.length).toBe(2);
140
+ const event1 = events[0] as ServerGeminiToolCallRequestEvent;
141
+ expect(event1.type).toBe(GeminiEventType.ToolCallRequest);
142
+ expect(event1.value).toEqual(
143
+ expect.objectContaining({
144
+ callId: 'fc1',
145
+ name: 'tool1',
146
+ args: { arg1: 'val1' },
147
+ isClientInitiated: false,
148
+ }),
149
+ );
150
+ expect(turn.pendingToolCalls[0]).toEqual(event1.value);
151
+
152
+ const event2 = events[1] as ServerGeminiToolCallRequestEvent;
153
+ expect(event2.type).toBe(GeminiEventType.ToolCallRequest);
154
+ expect(event2.value).toEqual(
155
+ expect.objectContaining({
156
+ name: 'tool2',
157
+ args: { arg2: 'val2' },
158
+ isClientInitiated: false,
159
+ }),
160
+ );
161
+ expect(event2.value.callId).toEqual(
162
+ expect.stringMatching(/^tool2-\d{13}-\w{10,}$/),
163
+ );
164
+ expect(turn.pendingToolCalls[1]).toEqual(event2.value);
165
+ expect(turn.getDebugResponses().length).toBe(1);
166
+ });
167
+
168
+ it('should yield UserCancelled event if signal is aborted', async () => {
169
+ const abortController = new AbortController();
170
+ const mockResponseStream = (async function* () {
171
+ yield {
172
+ candidates: [{ content: { parts: [{ text: 'First part' }] } }],
173
+ } as unknown as GenerateContentResponse;
174
+ abortController.abort();
175
+ yield {
176
+ candidates: [
177
+ {
178
+ content: {
179
+ parts: [{ text: 'Second part - should not be processed' }],
180
+ },
181
+ },
182
+ ],
183
+ } as unknown as GenerateContentResponse;
184
+ })();
185
+ mockSendMessageStream.mockResolvedValue(mockResponseStream);
186
+
187
+ const events = [];
188
+ const reqParts: Part[] = [{ text: 'Test abort' }];
189
+ for await (const event of turn.run(reqParts, abortController.signal)) {
190
+ events.push(event);
191
+ }
192
+ expect(events).toEqual([
193
+ { type: GeminiEventType.Content, value: 'First part' },
194
+ { type: GeminiEventType.UserCancelled },
195
+ ]);
196
+ expect(turn.getDebugResponses().length).toBe(1);
197
+ });
198
+
199
+ it('should yield Error event and report if sendMessageStream throws', async () => {
200
+ const error = new Error('API Error');
201
+ mockSendMessageStream.mockRejectedValue(error);
202
+ const reqParts: Part[] = [{ text: 'Trigger error' }];
203
+ const historyContent: Content[] = [
204
+ { role: 'model', parts: [{ text: 'Previous history' }] },
205
+ ];
206
+ mockGetHistory.mockReturnValue(historyContent);
207
+ mockMaybeIncludeSchemaDepthContext.mockResolvedValue(undefined);
208
+ const events = [];
209
+ for await (const event of turn.run(
210
+ reqParts,
211
+ new AbortController().signal,
212
+ )) {
213
+ events.push(event);
214
+ }
215
+
216
+ expect(events.length).toBe(1);
217
+ const errorEvent = events[0] as ServerGeminiErrorEvent;
218
+ expect(errorEvent.type).toBe(GeminiEventType.Error);
219
+ expect(errorEvent.value).toEqual({
220
+ error: { message: 'API Error', status: undefined },
221
+ });
222
+ expect(turn.getDebugResponses().length).toBe(0);
223
+ expect(reportError).toHaveBeenCalledWith(
224
+ error,
225
+ 'Error when talking to Gemini API',
226
+ [...historyContent, reqParts],
227
+ 'Turn.run-sendMessageStream',
228
+ );
229
+ });
230
+
231
+ it('should handle function calls with undefined name or args', async () => {
232
+ const mockResponseStream = (async function* () {
233
+ yield {
234
+ functionCalls: [
235
+ { id: 'fc1', name: undefined, args: { arg1: 'val1' } },
236
+ { id: 'fc2', name: 'tool2', args: undefined },
237
+ { id: 'fc3', name: undefined, args: undefined },
238
+ ],
239
+ } as unknown as GenerateContentResponse;
240
+ })();
241
+ mockSendMessageStream.mockResolvedValue(mockResponseStream);
242
+ const events = [];
243
+ const reqParts: Part[] = [{ text: 'Test undefined tool parts' }];
244
+ for await (const event of turn.run(
245
+ reqParts,
246
+ new AbortController().signal,
247
+ )) {
248
+ events.push(event);
249
+ }
250
+
251
+ expect(events.length).toBe(3);
252
+ const event1 = events[0] as ServerGeminiToolCallRequestEvent;
253
+ expect(event1.type).toBe(GeminiEventType.ToolCallRequest);
254
+ expect(event1.value).toEqual(
255
+ expect.objectContaining({
256
+ callId: 'fc1',
257
+ name: 'undefined_tool_name',
258
+ args: { arg1: 'val1' },
259
+ isClientInitiated: false,
260
+ }),
261
+ );
262
+ expect(turn.pendingToolCalls[0]).toEqual(event1.value);
263
+
264
+ const event2 = events[1] as ServerGeminiToolCallRequestEvent;
265
+ expect(event2.type).toBe(GeminiEventType.ToolCallRequest);
266
+ expect(event2.value).toEqual(
267
+ expect.objectContaining({
268
+ callId: 'fc2',
269
+ name: 'tool2',
270
+ args: {},
271
+ isClientInitiated: false,
272
+ }),
273
+ );
274
+ expect(turn.pendingToolCalls[1]).toEqual(event2.value);
275
+
276
+ const event3 = events[2] as ServerGeminiToolCallRequestEvent;
277
+ expect(event3.type).toBe(GeminiEventType.ToolCallRequest);
278
+ expect(event3.value).toEqual(
279
+ expect.objectContaining({
280
+ callId: 'fc3',
281
+ name: 'undefined_tool_name',
282
+ args: {},
283
+ isClientInitiated: false,
284
+ }),
285
+ );
286
+ expect(turn.pendingToolCalls[2]).toEqual(event3.value);
287
+ expect(turn.getDebugResponses().length).toBe(1);
288
+ });
289
+
290
+ it('should yield finished event when response has finish reason', async () => {
291
+ const mockResponseStream = (async function* () {
292
+ yield {
293
+ candidates: [
294
+ {
295
+ content: { parts: [{ text: 'Partial response' }] },
296
+ finishReason: 'STOP',
297
+ },
298
+ ],
299
+ } as unknown as GenerateContentResponse;
300
+ })();
301
+ mockSendMessageStream.mockResolvedValue(mockResponseStream);
302
+
303
+ const events = [];
304
+ const reqParts: Part[] = [{ text: 'Test finish reason' }];
305
+ for await (const event of turn.run(
306
+ reqParts,
307
+ new AbortController().signal,
308
+ )) {
309
+ events.push(event);
310
+ }
311
+
312
+ expect(events).toEqual([
313
+ { type: GeminiEventType.Content, value: 'Partial response' },
314
+ { type: GeminiEventType.Finished, value: 'STOP' },
315
+ ]);
316
+ });
317
+
318
+ it('should yield finished event for MAX_TOKENS finish reason', async () => {
319
+ const mockResponseStream = (async function* () {
320
+ yield {
321
+ candidates: [
322
+ {
323
+ content: {
324
+ parts: [
325
+ { text: 'This is a long response that was cut off...' },
326
+ ],
327
+ },
328
+ finishReason: 'MAX_TOKENS',
329
+ },
330
+ ],
331
+ } as unknown as GenerateContentResponse;
332
+ })();
333
+ mockSendMessageStream.mockResolvedValue(mockResponseStream);
334
+
335
+ const events = [];
336
+ const reqParts: Part[] = [{ text: 'Generate long text' }];
337
+ for await (const event of turn.run(
338
+ reqParts,
339
+ new AbortController().signal,
340
+ )) {
341
+ events.push(event);
342
+ }
343
+
344
+ expect(events).toEqual([
345
+ {
346
+ type: GeminiEventType.Content,
347
+ value: 'This is a long response that was cut off...',
348
+ },
349
+ { type: GeminiEventType.Finished, value: 'MAX_TOKENS' },
350
+ ]);
351
+ });
352
+
353
+ it('should yield finished event for SAFETY finish reason', async () => {
354
+ const mockResponseStream = (async function* () {
355
+ yield {
356
+ candidates: [
357
+ {
358
+ content: { parts: [{ text: 'Content blocked' }] },
359
+ finishReason: 'SAFETY',
360
+ },
361
+ ],
362
+ } as unknown as GenerateContentResponse;
363
+ })();
364
+ mockSendMessageStream.mockResolvedValue(mockResponseStream);
365
+
366
+ const events = [];
367
+ const reqParts: Part[] = [{ text: 'Test safety' }];
368
+ for await (const event of turn.run(
369
+ reqParts,
370
+ new AbortController().signal,
371
+ )) {
372
+ events.push(event);
373
+ }
374
+
375
+ expect(events).toEqual([
376
+ { type: GeminiEventType.Content, value: 'Content blocked' },
377
+ { type: GeminiEventType.Finished, value: 'SAFETY' },
378
+ ]);
379
+ });
380
+
381
+ it('should not yield finished event when there is no finish reason', async () => {
382
+ const mockResponseStream = (async function* () {
383
+ yield {
384
+ candidates: [
385
+ {
386
+ content: { parts: [{ text: 'Response without finish reason' }] },
387
+ // No finishReason property
388
+ },
389
+ ],
390
+ } as unknown as GenerateContentResponse;
391
+ })();
392
+ mockSendMessageStream.mockResolvedValue(mockResponseStream);
393
+
394
+ const events = [];
395
+ const reqParts: Part[] = [{ text: 'Test no finish reason' }];
396
+ for await (const event of turn.run(
397
+ reqParts,
398
+ new AbortController().signal,
399
+ )) {
400
+ events.push(event);
401
+ }
402
+
403
+ expect(events).toEqual([
404
+ {
405
+ type: GeminiEventType.Content,
406
+ value: 'Response without finish reason',
407
+ },
408
+ ]);
409
+ // No Finished event should be emitted
410
+ });
411
+
412
+ it('should handle multiple responses with different finish reasons', async () => {
413
+ const mockResponseStream = (async function* () {
414
+ yield {
415
+ candidates: [
416
+ {
417
+ content: { parts: [{ text: 'First part' }] },
418
+ // No finish reason on first response
419
+ },
420
+ ],
421
+ } as unknown as GenerateContentResponse;
422
+ yield {
423
+ candidates: [
424
+ {
425
+ content: { parts: [{ text: 'Second part' }] },
426
+ finishReason: 'OTHER',
427
+ },
428
+ ],
429
+ } as unknown as GenerateContentResponse;
430
+ })();
431
+ mockSendMessageStream.mockResolvedValue(mockResponseStream);
432
+
433
+ const events = [];
434
+ const reqParts: Part[] = [{ text: 'Test multiple responses' }];
435
+ for await (const event of turn.run(
436
+ reqParts,
437
+ new AbortController().signal,
438
+ )) {
439
+ events.push(event);
440
+ }
441
+
442
+ expect(events).toEqual([
443
+ { type: GeminiEventType.Content, value: 'First part' },
444
+ { type: GeminiEventType.Content, value: 'Second part' },
445
+ { type: GeminiEventType.Finished, value: 'OTHER' },
446
+ ]);
447
+ });
448
+ });
449
+
450
+ describe('getDebugResponses', () => {
451
+ it('should return collected debug responses', async () => {
452
+ const resp1 = {
453
+ candidates: [{ content: { parts: [{ text: 'Debug 1' }] } }],
454
+ } as unknown as GenerateContentResponse;
455
+ const resp2 = {
456
+ functionCalls: [{ name: 'debugTool' }],
457
+ } as unknown as GenerateContentResponse;
458
+ const mockResponseStream = (async function* () {
459
+ yield resp1;
460
+ yield resp2;
461
+ })();
462
+ mockSendMessageStream.mockResolvedValue(mockResponseStream);
463
+ const reqParts: Part[] = [{ text: 'Hi' }];
464
+ for await (const _ of turn.run(reqParts, new AbortController().signal)) {
465
+ // consume stream
466
+ }
467
+ expect(turn.getDebugResponses()).toEqual([resp1, resp2]);
468
+ });
469
+ });
470
+ });
projects/ui/qwen-code/packages/core/src/core/turn.ts ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import {
8
+ PartListUnion,
9
+ GenerateContentResponse,
10
+ FunctionCall,
11
+ FunctionDeclaration,
12
+ FinishReason,
13
+ } from '@google/genai';
14
+ import {
15
+ ToolCallConfirmationDetails,
16
+ ToolResult,
17
+ ToolResultDisplay,
18
+ } from '../tools/tools.js';
19
+ import { ToolErrorType } from '../tools/tool-error.js';
20
+ import { getResponseText } from '../utils/generateContentResponseUtilities.js';
21
+ import { reportError } from '../utils/errorReporting.js';
22
+ import {
23
+ getErrorMessage,
24
+ UnauthorizedError,
25
+ toFriendlyError,
26
+ } from '../utils/errors.js';
27
+ import { GeminiChat } from './geminiChat.js';
28
+
29
+ // Define a structure for tools passed to the server
30
+ export interface ServerTool {
31
+ name: string;
32
+ schema: FunctionDeclaration;
33
+ // The execute method signature might differ slightly or be wrapped
34
+ execute(
35
+ params: Record<string, unknown>,
36
+ signal?: AbortSignal,
37
+ ): Promise<ToolResult>;
38
+ shouldConfirmExecute(
39
+ params: Record<string, unknown>,
40
+ abortSignal: AbortSignal,
41
+ ): Promise<ToolCallConfirmationDetails | false>;
42
+ }
43
+
44
+ export enum GeminiEventType {
45
+ Content = 'content',
46
+ ToolCallRequest = 'tool_call_request',
47
+ ToolCallResponse = 'tool_call_response',
48
+ ToolCallConfirmation = 'tool_call_confirmation',
49
+ UserCancelled = 'user_cancelled',
50
+ Error = 'error',
51
+ ChatCompressed = 'chat_compressed',
52
+ Thought = 'thought',
53
+ MaxSessionTurns = 'max_session_turns',
54
+ SessionTokenLimitExceeded = 'session_token_limit_exceeded',
55
+ Finished = 'finished',
56
+ LoopDetected = 'loop_detected',
57
+ }
58
+
59
+ export interface StructuredError {
60
+ message: string;
61
+ status?: number;
62
+ }
63
+
64
+ export interface GeminiErrorEventValue {
65
+ error: StructuredError;
66
+ }
67
+
68
+ export interface SessionTokenLimitExceededValue {
69
+ currentTokens: number;
70
+ limit: number;
71
+ message: string;
72
+ }
73
+
74
+ export interface ToolCallRequestInfo {
75
+ callId: string;
76
+ name: string;
77
+ args: Record<string, unknown>;
78
+ isClientInitiated: boolean;
79
+ prompt_id: string;
80
+ }
81
+
82
+ export interface ToolCallResponseInfo {
83
+ callId: string;
84
+ responseParts: PartListUnion;
85
+ resultDisplay: ToolResultDisplay | undefined;
86
+ error: Error | undefined;
87
+ errorType: ToolErrorType | undefined;
88
+ }
89
+
90
+ export interface ServerToolCallConfirmationDetails {
91
+ request: ToolCallRequestInfo;
92
+ details: ToolCallConfirmationDetails;
93
+ }
94
+
95
+ export type ThoughtSummary = {
96
+ subject: string;
97
+ description: string;
98
+ };
99
+
100
+ export type ServerGeminiContentEvent = {
101
+ type: GeminiEventType.Content;
102
+ value: string;
103
+ };
104
+
105
+ export type ServerGeminiThoughtEvent = {
106
+ type: GeminiEventType.Thought;
107
+ value: ThoughtSummary;
108
+ };
109
+
110
+ export type ServerGeminiToolCallRequestEvent = {
111
+ type: GeminiEventType.ToolCallRequest;
112
+ value: ToolCallRequestInfo;
113
+ };
114
+
115
+ export type ServerGeminiToolCallResponseEvent = {
116
+ type: GeminiEventType.ToolCallResponse;
117
+ value: ToolCallResponseInfo;
118
+ };
119
+
120
+ export type ServerGeminiToolCallConfirmationEvent = {
121
+ type: GeminiEventType.ToolCallConfirmation;
122
+ value: ServerToolCallConfirmationDetails;
123
+ };
124
+
125
+ export type ServerGeminiUserCancelledEvent = {
126
+ type: GeminiEventType.UserCancelled;
127
+ };
128
+
129
+ export type ServerGeminiErrorEvent = {
130
+ type: GeminiEventType.Error;
131
+ value: GeminiErrorEventValue;
132
+ };
133
+
134
+ export interface ChatCompressionInfo {
135
+ originalTokenCount: number;
136
+ newTokenCount: number;
137
+ }
138
+
139
+ export type ServerGeminiChatCompressedEvent = {
140
+ type: GeminiEventType.ChatCompressed;
141
+ value: ChatCompressionInfo | null;
142
+ };
143
+
144
+ export type ServerGeminiMaxSessionTurnsEvent = {
145
+ type: GeminiEventType.MaxSessionTurns;
146
+ };
147
+
148
+ export type ServerGeminiSessionTokenLimitExceededEvent = {
149
+ type: GeminiEventType.SessionTokenLimitExceeded;
150
+ value: SessionTokenLimitExceededValue;
151
+ };
152
+
153
+ export type ServerGeminiFinishedEvent = {
154
+ type: GeminiEventType.Finished;
155
+ value: FinishReason;
156
+ };
157
+
158
+ export type ServerGeminiLoopDetectedEvent = {
159
+ type: GeminiEventType.LoopDetected;
160
+ };
161
+
162
+ // The original union type, now composed of the individual types
163
+ export type ServerGeminiStreamEvent =
164
+ | ServerGeminiContentEvent
165
+ | ServerGeminiToolCallRequestEvent
166
+ | ServerGeminiToolCallResponseEvent
167
+ | ServerGeminiToolCallConfirmationEvent
168
+ | ServerGeminiUserCancelledEvent
169
+ | ServerGeminiErrorEvent
170
+ | ServerGeminiChatCompressedEvent
171
+ | ServerGeminiThoughtEvent
172
+ | ServerGeminiMaxSessionTurnsEvent
173
+ | ServerGeminiSessionTokenLimitExceededEvent
174
+ | ServerGeminiFinishedEvent
175
+ | ServerGeminiLoopDetectedEvent;
176
+
177
+ // A turn manages the agentic loop turn within the server context.
178
+ export class Turn {
179
+ readonly pendingToolCalls: ToolCallRequestInfo[];
180
+ private debugResponses: GenerateContentResponse[];
181
+ finishReason: FinishReason | undefined;
182
+
183
+ constructor(
184
+ private readonly chat: GeminiChat,
185
+ private readonly prompt_id: string,
186
+ ) {
187
+ this.pendingToolCalls = [];
188
+ this.debugResponses = [];
189
+ this.finishReason = undefined;
190
+ }
191
+ // The run method yields simpler events suitable for server logic
192
+ async *run(
193
+ req: PartListUnion,
194
+ signal: AbortSignal,
195
+ ): AsyncGenerator<ServerGeminiStreamEvent> {
196
+ try {
197
+ const responseStream = await this.chat.sendMessageStream(
198
+ {
199
+ message: req,
200
+ config: {
201
+ abortSignal: signal,
202
+ },
203
+ },
204
+ this.prompt_id,
205
+ );
206
+
207
+ for await (const resp of responseStream) {
208
+ if (signal?.aborted) {
209
+ yield { type: GeminiEventType.UserCancelled };
210
+ // Do not add resp to debugResponses if aborted before processing
211
+ return;
212
+ }
213
+ this.debugResponses.push(resp);
214
+
215
+ const thoughtPart = resp.candidates?.[0]?.content?.parts?.[0];
216
+ if (thoughtPart?.thought) {
217
+ // Thought always has a bold "subject" part enclosed in double asterisks
218
+ // (e.g., **Subject**). The rest of the string is considered the description.
219
+ const rawText = thoughtPart.text ?? '';
220
+ const subjectStringMatches = rawText.match(/\*\*(.*?)\*\*/s);
221
+ const subject = subjectStringMatches
222
+ ? subjectStringMatches[1].trim()
223
+ : '';
224
+ const description = rawText.replace(/\*\*(.*?)\*\*/s, '').trim();
225
+ const thought: ThoughtSummary = {
226
+ subject,
227
+ description,
228
+ };
229
+
230
+ yield {
231
+ type: GeminiEventType.Thought,
232
+ value: thought,
233
+ };
234
+ continue;
235
+ }
236
+
237
+ const text = getResponseText(resp);
238
+ if (text) {
239
+ yield { type: GeminiEventType.Content, value: text };
240
+ }
241
+
242
+ // Handle function calls (requesting tool execution)
243
+ const functionCalls = resp.functionCalls ?? [];
244
+ for (const fnCall of functionCalls) {
245
+ const event = this.handlePendingFunctionCall(fnCall);
246
+ if (event) {
247
+ yield event;
248
+ }
249
+ }
250
+
251
+ // Check if response was truncated or stopped for various reasons
252
+ const finishReason = resp.candidates?.[0]?.finishReason;
253
+
254
+ if (finishReason) {
255
+ this.finishReason = finishReason;
256
+ yield {
257
+ type: GeminiEventType.Finished,
258
+ value: finishReason as FinishReason,
259
+ };
260
+ }
261
+ }
262
+ } catch (e) {
263
+ const error = toFriendlyError(e);
264
+ if (error instanceof UnauthorizedError) {
265
+ throw error;
266
+ }
267
+ if (signal.aborted) {
268
+ yield { type: GeminiEventType.UserCancelled };
269
+ // Regular cancellation error, fail gracefully.
270
+ return;
271
+ }
272
+
273
+ const contextForReport = [...this.chat.getHistory(/*curated*/ true), req];
274
+ await reportError(
275
+ error,
276
+ 'Error when talking to Gemini API',
277
+ contextForReport,
278
+ 'Turn.run-sendMessageStream',
279
+ );
280
+ const status =
281
+ typeof error === 'object' &&
282
+ error !== null &&
283
+ 'status' in error &&
284
+ typeof (error as { status: unknown }).status === 'number'
285
+ ? (error as { status: number }).status
286
+ : undefined;
287
+ const structuredError: StructuredError = {
288
+ message: getErrorMessage(error),
289
+ status,
290
+ };
291
+ await this.chat.maybeIncludeSchemaDepthContext(structuredError);
292
+ yield { type: GeminiEventType.Error, value: { error: structuredError } };
293
+ return;
294
+ }
295
+ }
296
+
297
+ private handlePendingFunctionCall(
298
+ fnCall: FunctionCall,
299
+ ): ServerGeminiStreamEvent | null {
300
+ const callId =
301
+ fnCall.id ??
302
+ `${fnCall.name}-${Date.now()}-${Math.random().toString(16).slice(2)}`;
303
+ const name = fnCall.name || 'undefined_tool_name';
304
+ const args = (fnCall.args || {}) as Record<string, unknown>;
305
+
306
+ const toolCallRequest: ToolCallRequestInfo = {
307
+ callId,
308
+ name,
309
+ args,
310
+ isClientInitiated: false,
311
+ prompt_id: this.prompt_id,
312
+ };
313
+
314
+ this.pendingToolCalls.push(toolCallRequest);
315
+
316
+ // Yield a request for the tool call, not the pending/confirming status
317
+ return { type: GeminiEventType.ToolCallRequest, value: toolCallRequest };
318
+ }
319
+
320
+ getDebugResponses(): GenerateContentResponse[] {
321
+ return this.debugResponses;
322
+ }
323
+ }
projects/ui/qwen-code/packages/core/src/ide/constants.ts ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ export const QWEN_CODE_COMPANION_EXTENSION_NAME = 'Qwen Code Companion';
projects/ui/qwen-code/packages/core/src/ide/detect-ide.test.ts ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { describe, it, expect, afterEach, vi } from 'vitest';
8
+ import { detectIde, DetectedIde } from './detect-ide.js';
9
+
10
+ describe('detectIde', () => {
11
+ afterEach(() => {
12
+ vi.unstubAllEnvs();
13
+ });
14
+
15
+ it.each([
16
+ {
17
+ env: {},
18
+ expected: DetectedIde.VSCode,
19
+ },
20
+ {
21
+ env: { __COG_BASHRC_SOURCED: '1' },
22
+ expected: DetectedIde.Devin,
23
+ },
24
+ {
25
+ env: { REPLIT_USER: 'test' },
26
+ expected: DetectedIde.Replit,
27
+ },
28
+ {
29
+ env: { CURSOR_TRACE_ID: 'test' },
30
+ expected: DetectedIde.Cursor,
31
+ },
32
+ {
33
+ env: { CODESPACES: 'true' },
34
+ expected: DetectedIde.Codespaces,
35
+ },
36
+ {
37
+ env: { EDITOR_IN_CLOUD_SHELL: 'true' },
38
+ expected: DetectedIde.CloudShell,
39
+ },
40
+ {
41
+ env: { CLOUD_SHELL: 'true' },
42
+ expected: DetectedIde.CloudShell,
43
+ },
44
+ {
45
+ env: { TERM_PRODUCT: 'Trae' },
46
+ expected: DetectedIde.Trae,
47
+ },
48
+ {
49
+ env: { FIREBASE_DEPLOY_AGENT: 'true' },
50
+ expected: DetectedIde.FirebaseStudio,
51
+ },
52
+ {
53
+ env: { MONOSPACE_ENV: 'true' },
54
+ expected: DetectedIde.FirebaseStudio,
55
+ },
56
+ ])('detects the IDE for $expected', ({ env, expected }) => {
57
+ // Clear all environment variables first
58
+ vi.unstubAllEnvs();
59
+
60
+ // Set TERM_PROGRAM to vscode (required for all IDE detection)
61
+ vi.stubEnv('TERM_PROGRAM', 'vscode');
62
+
63
+ // Explicitly stub all environment variables that detectIde() checks to undefined
64
+ // This ensures no real environment variables interfere with the tests
65
+ vi.stubEnv('__COG_BASHRC_SOURCED', undefined);
66
+ vi.stubEnv('REPLIT_USER', undefined);
67
+ vi.stubEnv('CURSOR_TRACE_ID', undefined);
68
+ vi.stubEnv('CODESPACES', undefined);
69
+ vi.stubEnv('EDITOR_IN_CLOUD_SHELL', undefined);
70
+ vi.stubEnv('CLOUD_SHELL', undefined);
71
+ vi.stubEnv('TERM_PRODUCT', undefined);
72
+ vi.stubEnv('FIREBASE_DEPLOY_AGENT', undefined);
73
+ vi.stubEnv('MONOSPACE_ENV', undefined);
74
+
75
+ // Set only the specific environment variables for this test case
76
+ for (const [key, value] of Object.entries(env)) {
77
+ vi.stubEnv(key, value);
78
+ }
79
+
80
+ expect(detectIde()).toBe(expected);
81
+ });
82
+
83
+ it('returns undefined for non-vscode', () => {
84
+ // Clear all environment variables first
85
+ vi.unstubAllEnvs();
86
+
87
+ // Set TERM_PROGRAM to something other than vscode
88
+ vi.stubEnv('TERM_PROGRAM', 'definitely-not-vscode');
89
+
90
+ expect(detectIde()).toBeUndefined();
91
+ });
92
+ });
projects/ui/qwen-code/packages/core/src/ide/detect-ide.ts ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ export enum DetectedIde {
8
+ Devin = 'devin',
9
+ Replit = 'replit',
10
+ VSCode = 'vscode',
11
+ Cursor = 'cursor',
12
+ CloudShell = 'cloudshell',
13
+ Codespaces = 'codespaces',
14
+ FirebaseStudio = 'firebasestudio',
15
+ Trae = 'trae',
16
+ }
17
+
18
+ export interface IdeInfo {
19
+ displayName: string;
20
+ }
21
+
22
+ export function getIdeInfo(ide: DetectedIde): IdeInfo {
23
+ switch (ide) {
24
+ case DetectedIde.Devin:
25
+ return {
26
+ displayName: 'Devin',
27
+ };
28
+ case DetectedIde.Replit:
29
+ return {
30
+ displayName: 'Replit',
31
+ };
32
+ case DetectedIde.VSCode:
33
+ return {
34
+ displayName: 'VS Code',
35
+ };
36
+ case DetectedIde.Cursor:
37
+ return {
38
+ displayName: 'Cursor',
39
+ };
40
+ case DetectedIde.CloudShell:
41
+ return {
42
+ displayName: 'Cloud Shell',
43
+ };
44
+ case DetectedIde.Codespaces:
45
+ return {
46
+ displayName: 'GitHub Codespaces',
47
+ };
48
+ case DetectedIde.FirebaseStudio:
49
+ return {
50
+ displayName: 'Firebase Studio',
51
+ };
52
+ case DetectedIde.Trae:
53
+ return {
54
+ displayName: 'Trae',
55
+ };
56
+ default: {
57
+ // This ensures that if a new IDE is added to the enum, we get a compile-time error.
58
+ const exhaustiveCheck: never = ide;
59
+ return exhaustiveCheck;
60
+ }
61
+ }
62
+ }
63
+
64
+ export function detectIde(): DetectedIde | undefined {
65
+ // Only VSCode-based integrations are currently supported.
66
+ if (process.env['TERM_PROGRAM'] !== 'vscode') {
67
+ return undefined;
68
+ }
69
+ if (process.env['__COG_BASHRC_SOURCED']) {
70
+ return DetectedIde.Devin;
71
+ }
72
+ if (process.env['REPLIT_USER']) {
73
+ return DetectedIde.Replit;
74
+ }
75
+ if (process.env['CURSOR_TRACE_ID']) {
76
+ return DetectedIde.Cursor;
77
+ }
78
+ if (process.env['CODESPACES']) {
79
+ return DetectedIde.Codespaces;
80
+ }
81
+ if (process.env['EDITOR_IN_CLOUD_SHELL'] || process.env['CLOUD_SHELL']) {
82
+ return DetectedIde.CloudShell;
83
+ }
84
+ if (process.env['TERM_PRODUCT'] === 'Trae') {
85
+ return DetectedIde.Trae;
86
+ }
87
+ if (process.env['FIREBASE_DEPLOY_AGENT'] || process.env['MONOSPACE_ENV']) {
88
+ return DetectedIde.FirebaseStudio;
89
+ }
90
+ return DetectedIde.VSCode;
91
+ }