ADAPT-Chase commited on
Commit
1c71d54
·
verified ·
1 Parent(s): b62a639

Add files using upload-large-folder tool

Browse files
Files changed (20) hide show
  1. projects/ui/qwen-code/packages/core/src/__mocks__/fs/promises.ts +48 -0
  2. projects/ui/qwen-code/packages/core/src/core/__snapshots__/prompts.test.ts.snap +0 -0
  3. projects/ui/qwen-code/packages/core/src/core/__tests__/openaiTimeoutHandling.test.ts +350 -0
  4. projects/ui/qwen-code/packages/core/src/core/__tests__/orphanedToolCallsTest.ts +125 -0
  5. projects/ui/qwen-code/packages/core/src/telemetry/clearcut-logger/clearcut-logger.test.ts +494 -0
  6. projects/ui/qwen-code/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts +860 -0
  7. projects/ui/qwen-code/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts +318 -0
  8. projects/ui/qwen-code/packages/core/src/telemetry/qwen-logger/event-types.ts +84 -0
  9. projects/ui/qwen-code/packages/core/src/telemetry/qwen-logger/qwen-logger.test.ts +407 -0
  10. projects/ui/qwen-code/packages/core/src/telemetry/qwen-logger/qwen-logger.ts +718 -0
  11. projects/ui/qwen-code/packages/core/src/tools/__snapshots__/shell.test.ts.snap +73 -0
  12. projects/ui/qwen-code/packages/core/src/utils/filesearch/crawlCache.test.ts +123 -0
  13. projects/ui/qwen-code/packages/core/src/utils/filesearch/crawlCache.ts +69 -0
  14. projects/ui/qwen-code/packages/core/src/utils/filesearch/crawler.test.ts +573 -0
  15. projects/ui/qwen-code/packages/core/src/utils/filesearch/crawler.ts +85 -0
  16. projects/ui/qwen-code/packages/core/src/utils/filesearch/fileSearch.test.ts +662 -0
  17. projects/ui/qwen-code/packages/core/src/utils/filesearch/fileSearch.ts +237 -0
  18. projects/ui/qwen-code/packages/core/src/utils/filesearch/ignore.test.ts +160 -0
  19. projects/ui/qwen-code/packages/core/src/utils/filesearch/ignore.ts +131 -0
  20. projects/ui/qwen-code/packages/core/src/utils/filesearch/result-cache.test.ts +55 -0
projects/ui/qwen-code/packages/core/src/__mocks__/fs/promises.ts ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { vi } from 'vitest';
8
+ import * as actualFsPromises from 'node:fs/promises';
9
+
10
+ const readFileMock = vi.fn();
11
+
12
+ // Export a control object so tests can access and manipulate the mock
13
+ export const mockControl = {
14
+ mockReadFile: readFileMock,
15
+ };
16
+
17
+ // Export all other functions from the actual fs/promises module
18
+ export const {
19
+ access,
20
+ appendFile,
21
+ chmod,
22
+ chown,
23
+ copyFile,
24
+ cp,
25
+ lchmod,
26
+ lchown,
27
+ link,
28
+ lstat,
29
+ mkdir,
30
+ open,
31
+ opendir,
32
+ readdir,
33
+ readlink,
34
+ realpath,
35
+ rename,
36
+ rmdir,
37
+ rm,
38
+ stat,
39
+ symlink,
40
+ truncate,
41
+ unlink,
42
+ utimes,
43
+ watch,
44
+ writeFile,
45
+ } = actualFsPromises;
46
+
47
+ // Override readFile with our mock
48
+ export const readFile = readFileMock;
projects/ui/qwen-code/packages/core/src/core/__snapshots__/prompts.test.ts.snap ADDED
The diff for this file is too large to render. See raw diff
 
projects/ui/qwen-code/packages/core/src/core/__tests__/openaiTimeoutHandling.test.ts ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
8
+ import { OpenAIContentGenerator } from '../openaiContentGenerator.js';
9
+ import { Config } from '../../config/config.js';
10
+ import { AuthType } from '../contentGenerator.js';
11
+ import OpenAI from 'openai';
12
+
13
+ // Mock OpenAI
14
+ vi.mock('openai');
15
+
16
+ // Mock logger modules
17
+ vi.mock('../../telemetry/loggers.js', () => ({
18
+ logApiResponse: vi.fn(),
19
+ logApiError: vi.fn(),
20
+ }));
21
+
22
+ vi.mock('../../utils/openaiLogger.js', () => ({
23
+ openaiLogger: {
24
+ logInteraction: vi.fn(),
25
+ },
26
+ }));
27
+
28
+ describe('OpenAIContentGenerator Timeout Handling', () => {
29
+ let generator: OpenAIContentGenerator;
30
+ let mockConfig: Config;
31
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
32
+ let mockOpenAIClient: any;
33
+
34
+ beforeEach(() => {
35
+ // Reset mocks
36
+ vi.clearAllMocks();
37
+
38
+ // Mock environment variables
39
+ vi.stubEnv('OPENAI_BASE_URL', '');
40
+
41
+ // Mock config
42
+ mockConfig = {
43
+ getContentGeneratorConfig: vi.fn().mockReturnValue({
44
+ authType: 'openai',
45
+ }),
46
+ getCliVersion: vi.fn().mockReturnValue('1.0.0'),
47
+ } as unknown as Config;
48
+
49
+ // Mock OpenAI client
50
+ mockOpenAIClient = {
51
+ chat: {
52
+ completions: {
53
+ create: vi.fn(),
54
+ },
55
+ },
56
+ };
57
+
58
+ vi.mocked(OpenAI).mockImplementation(() => mockOpenAIClient);
59
+
60
+ // Create generator instance
61
+ const contentGeneratorConfig = {
62
+ model: 'gpt-4',
63
+ apiKey: 'test-key',
64
+ authType: AuthType.USE_OPENAI,
65
+ };
66
+ generator = new OpenAIContentGenerator(contentGeneratorConfig, mockConfig);
67
+ });
68
+
69
+ afterEach(() => {
70
+ vi.restoreAllMocks();
71
+ });
72
+
73
+ describe('timeout error identification through actual requests', () => {
74
+ it('should handle various timeout error formats correctly', async () => {
75
+ const timeoutErrors = [
76
+ new Error('Request timeout'),
77
+ new Error('Connection timed out'),
78
+ new Error('ETIMEDOUT'),
79
+ Object.assign(new Error('Network error'), { code: 'ETIMEDOUT' }),
80
+ Object.assign(new Error('Socket error'), { code: 'ESOCKETTIMEDOUT' }),
81
+ Object.assign(new Error('API error'), { type: 'timeout' }),
82
+ new Error('request timed out'),
83
+ new Error('deadline exceeded'),
84
+ ];
85
+
86
+ const request = {
87
+ contents: [{ role: 'user' as const, parts: [{ text: 'Hello' }] }],
88
+ model: 'gpt-4',
89
+ };
90
+
91
+ for (const error of timeoutErrors) {
92
+ mockOpenAIClient.chat.completions.create.mockRejectedValueOnce(error);
93
+
94
+ try {
95
+ await generator.generateContent(request, 'test-prompt-id');
96
+ } catch (thrownError: unknown) {
97
+ // Should contain timeout-specific messaging and troubleshooting tips
98
+ const errorMessage =
99
+ thrownError instanceof Error
100
+ ? thrownError.message
101
+ : String(thrownError);
102
+ expect(errorMessage).toMatch(
103
+ /timeout after \d+s|Troubleshooting tips:/,
104
+ );
105
+ }
106
+ }
107
+ });
108
+
109
+ it('should handle non-timeout errors without timeout messaging', async () => {
110
+ const nonTimeoutErrors = [
111
+ new Error('Invalid API key'),
112
+ new Error('Rate limit exceeded'),
113
+ new Error('Model not found'),
114
+ Object.assign(new Error('Auth error'), { code: 'INVALID_REQUEST' }),
115
+ Object.assign(new Error('API error'), { type: 'authentication_error' }),
116
+ ];
117
+
118
+ const request = {
119
+ contents: [{ role: 'user' as const, parts: [{ text: 'Hello' }] }],
120
+ model: 'gpt-4',
121
+ };
122
+
123
+ for (const error of nonTimeoutErrors) {
124
+ mockOpenAIClient.chat.completions.create.mockRejectedValueOnce(error);
125
+
126
+ try {
127
+ await generator.generateContent(request, 'test-prompt-id');
128
+ } catch (thrownError: unknown) {
129
+ // Should NOT contain timeout-specific messaging
130
+ const errorMessage =
131
+ thrownError instanceof Error
132
+ ? thrownError.message
133
+ : String(thrownError);
134
+ expect(errorMessage).not.toMatch(/timeout after \d+s/);
135
+ expect(errorMessage).not.toMatch(/Troubleshooting tips:/);
136
+ // Should preserve the original error message
137
+ expect(errorMessage).toMatch(new RegExp(error.message));
138
+ }
139
+ }
140
+ });
141
+ });
142
+
143
+ describe('generateContent timeout handling', () => {
144
+ it('should handle timeout errors with helpful message', async () => {
145
+ // Mock timeout error
146
+ const timeoutError = new Error('Request timeout');
147
+ mockOpenAIClient.chat.completions.create.mockRejectedValue(timeoutError);
148
+
149
+ const request = {
150
+ contents: [{ role: 'user' as const, parts: [{ text: 'Hello' }] }],
151
+ model: 'gpt-4',
152
+ };
153
+
154
+ await expect(
155
+ generator.generateContent(request, 'test-prompt-id'),
156
+ ).rejects.toThrow(
157
+ /Request timeout after \d+s\. Try reducing input length or increasing timeout in config\./,
158
+ );
159
+ });
160
+
161
+ it('should handle non-timeout errors normally', async () => {
162
+ // Mock non-timeout error
163
+ const apiError = new Error('Invalid API key');
164
+ mockOpenAIClient.chat.completions.create.mockRejectedValue(apiError);
165
+
166
+ const request = {
167
+ contents: [{ role: 'user' as const, parts: [{ text: 'Hello' }] }],
168
+ model: 'gpt-4',
169
+ };
170
+
171
+ await expect(
172
+ generator.generateContent(request, 'test-prompt-id'),
173
+ ).rejects.toThrow('Invalid API key');
174
+ });
175
+
176
+ it('should include troubleshooting tips for timeout errors', async () => {
177
+ const timeoutError = new Error('Connection timed out');
178
+ mockOpenAIClient.chat.completions.create.mockRejectedValue(timeoutError);
179
+
180
+ const request = {
181
+ contents: [{ role: 'user' as const, parts: [{ text: 'Hello' }] }],
182
+ model: 'gpt-4',
183
+ };
184
+
185
+ try {
186
+ await generator.generateContent(request, 'test-prompt-id');
187
+ } catch (error: unknown) {
188
+ const errorMessage =
189
+ error instanceof Error ? error.message : String(error);
190
+ expect(errorMessage).toContain('Troubleshooting tips:');
191
+ expect(errorMessage).toContain('Reduce input length or complexity');
192
+ expect(errorMessage).toContain('Increase timeout in config');
193
+ expect(errorMessage).toContain('Check network connectivity');
194
+ expect(errorMessage).toContain('Consider using streaming mode');
195
+ }
196
+ });
197
+ });
198
+
199
+ describe('generateContentStream timeout handling', () => {
200
+ it('should handle streaming timeout errors', async () => {
201
+ const timeoutError = new Error('Streaming timeout');
202
+ mockOpenAIClient.chat.completions.create.mockRejectedValue(timeoutError);
203
+
204
+ const request = {
205
+ contents: [{ role: 'user' as const, parts: [{ text: 'Hello' }] }],
206
+ model: 'gpt-4',
207
+ };
208
+
209
+ await expect(
210
+ generator.generateContentStream(request, 'test-prompt-id'),
211
+ ).rejects.toThrow(
212
+ /Streaming setup timeout after \d+s\. Try reducing input length or increasing timeout in config\./,
213
+ );
214
+ });
215
+
216
+ it('should include streaming-specific troubleshooting tips', async () => {
217
+ const timeoutError = new Error('request timed out');
218
+ mockOpenAIClient.chat.completions.create.mockRejectedValue(timeoutError);
219
+
220
+ const request = {
221
+ contents: [{ role: 'user' as const, parts: [{ text: 'Hello' }] }],
222
+ model: 'gpt-4',
223
+ };
224
+
225
+ try {
226
+ await generator.generateContentStream(request, 'test-prompt-id');
227
+ } catch (error: unknown) {
228
+ const errorMessage =
229
+ error instanceof Error ? error.message : String(error);
230
+ expect(errorMessage).toContain(
231
+ 'Streaming setup timeout troubleshooting:',
232
+ );
233
+ expect(errorMessage).toContain(
234
+ 'Check network connectivity and firewall settings',
235
+ );
236
+ expect(errorMessage).toContain('Consider using non-streaming mode');
237
+ }
238
+ });
239
+ });
240
+
241
+ describe('timeout configuration', () => {
242
+ it('should use default timeout configuration', () => {
243
+ const contentGeneratorConfig = {
244
+ model: 'gpt-4',
245
+ apiKey: 'test-key',
246
+ authType: AuthType.USE_OPENAI,
247
+ baseUrl: 'http://localhost:8080',
248
+ };
249
+ new OpenAIContentGenerator(contentGeneratorConfig, mockConfig);
250
+
251
+ // Verify OpenAI client was created with timeout config
252
+ expect(OpenAI).toHaveBeenCalledWith({
253
+ apiKey: 'test-key',
254
+ baseURL: 'http://localhost:8080',
255
+ timeout: 120000,
256
+ maxRetries: 3,
257
+ defaultHeaders: {
258
+ 'User-Agent': expect.stringMatching(/^QwenCode/),
259
+ },
260
+ });
261
+ });
262
+
263
+ it('should use custom timeout from config', () => {
264
+ const customConfig = {
265
+ getContentGeneratorConfig: vi.fn().mockReturnValue({}),
266
+ getCliVersion: vi.fn().mockReturnValue('1.0.0'),
267
+ } as unknown as Config;
268
+
269
+ const contentGeneratorConfig = {
270
+ model: 'gpt-4',
271
+ apiKey: 'test-key',
272
+ baseUrl: 'http://localhost:8080',
273
+ authType: AuthType.USE_OPENAI,
274
+ timeout: 300000,
275
+ maxRetries: 5,
276
+ };
277
+ new OpenAIContentGenerator(contentGeneratorConfig, customConfig);
278
+
279
+ expect(OpenAI).toHaveBeenCalledWith({
280
+ apiKey: 'test-key',
281
+ baseURL: 'http://localhost:8080',
282
+ timeout: 300000,
283
+ maxRetries: 5,
284
+ defaultHeaders: {
285
+ 'User-Agent': expect.stringMatching(/^QwenCode/),
286
+ },
287
+ });
288
+ });
289
+
290
+ it('should handle missing timeout config gracefully', () => {
291
+ const noTimeoutConfig = {
292
+ getContentGeneratorConfig: vi.fn().mockReturnValue({}),
293
+ getCliVersion: vi.fn().mockReturnValue('1.0.0'),
294
+ } as unknown as Config;
295
+
296
+ const contentGeneratorConfig = {
297
+ model: 'gpt-4',
298
+ apiKey: 'test-key',
299
+ authType: AuthType.USE_OPENAI,
300
+ baseUrl: 'http://localhost:8080',
301
+ };
302
+ new OpenAIContentGenerator(contentGeneratorConfig, noTimeoutConfig);
303
+
304
+ expect(OpenAI).toHaveBeenCalledWith({
305
+ apiKey: 'test-key',
306
+ baseURL: 'http://localhost:8080',
307
+ timeout: 120000, // default
308
+ maxRetries: 3, // default
309
+ defaultHeaders: {
310
+ 'User-Agent': expect.stringMatching(/^QwenCode/),
311
+ },
312
+ });
313
+ });
314
+ });
315
+
316
+ describe('token estimation on timeout', () => {
317
+ it('should surface a clear timeout error when request times out', async () => {
318
+ const timeoutError = new Error('Request timeout');
319
+ mockOpenAIClient.chat.completions.create.mockRejectedValue(timeoutError);
320
+
321
+ const request = {
322
+ contents: [{ role: 'user' as const, parts: [{ text: 'Hello world' }] }],
323
+ model: 'gpt-4',
324
+ };
325
+
326
+ await expect(
327
+ generator.generateContent(request, 'test-prompt-id'),
328
+ ).rejects.toThrow(/Request timeout after \d+s/);
329
+ });
330
+
331
+ it('should fall back to character-based estimation if countTokens fails', async () => {
332
+ const timeoutError = new Error('Request timeout');
333
+ mockOpenAIClient.chat.completions.create.mockRejectedValue(timeoutError);
334
+
335
+ // Mock countTokens to throw error
336
+ const mockCountTokens = vi.spyOn(generator, 'countTokens');
337
+ mockCountTokens.mockRejectedValue(new Error('Count tokens failed'));
338
+
339
+ const request = {
340
+ contents: [{ role: 'user' as const, parts: [{ text: 'Hello world' }] }],
341
+ model: 'gpt-4',
342
+ };
343
+
344
+ // Should not throw due to token counting failure
345
+ await expect(
346
+ generator.generateContent(request, 'test-prompt-id'),
347
+ ).rejects.toThrow(/Request timeout after \d+s/);
348
+ });
349
+ });
350
+ });
projects/ui/qwen-code/packages/core/src/core/__tests__/orphanedToolCallsTest.ts ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ /**
8
+ * Test cases for orphaned tool calls cleanup
9
+ */
10
+
11
+ export const createTestMessages = () => [
12
+ // System message
13
+ {
14
+ role: 'system' as const,
15
+ content: 'You are a helpful assistant.',
16
+ },
17
+ // User message
18
+ {
19
+ role: 'user' as const,
20
+ content: 'Please use a tool to help me.',
21
+ },
22
+ // Assistant message with tool calls (some will be orphaned)
23
+ {
24
+ role: 'assistant' as const,
25
+ content: 'I will help you with that.',
26
+ tool_calls: [
27
+ {
28
+ id: 'call_1',
29
+ type: 'function' as const,
30
+ function: {
31
+ name: 'search_web',
32
+ arguments: '{"query": "test"}',
33
+ },
34
+ },
35
+ {
36
+ id: 'call_2',
37
+ type: 'function' as const,
38
+ function: {
39
+ name: 'calculate',
40
+ arguments: '{"expression": "2+2"}',
41
+ },
42
+ },
43
+ {
44
+ id: 'call_3', // This will be orphaned
45
+ type: 'function' as const,
46
+ function: {
47
+ name: 'send_email',
48
+ arguments: '{"to": "test@example.com"}',
49
+ },
50
+ },
51
+ ],
52
+ },
53
+ // Tool response for call_1
54
+ {
55
+ role: 'tool' as const,
56
+ tool_call_id: 'call_1',
57
+ content: 'Search results: Found relevant information.',
58
+ },
59
+ // Tool response for call_2
60
+ {
61
+ role: 'tool' as const,
62
+ tool_call_id: 'call_2',
63
+ content: 'Calculation result: 4',
64
+ },
65
+ // Note: No tool response for call_3 (this creates the orphaned tool call issue)
66
+
67
+ // User continues conversation
68
+ {
69
+ role: 'user' as const,
70
+ content: 'Thank you, that was helpful.',
71
+ },
72
+ ];
73
+
74
+ export const expectedCleanedMessages = () => [
75
+ // System message (unchanged)
76
+ {
77
+ role: 'system' as const,
78
+ content: 'You are a helpful assistant.',
79
+ },
80
+ // User message (unchanged)
81
+ {
82
+ role: 'user' as const,
83
+ content: 'Please use a tool to help me.',
84
+ },
85
+ // Assistant message with only valid tool calls
86
+ {
87
+ role: 'assistant' as const,
88
+ content: 'I will help you with that.',
89
+ tool_calls: [
90
+ {
91
+ id: 'call_1',
92
+ type: 'function' as const,
93
+ function: {
94
+ name: 'search_web',
95
+ arguments: '{"query": "test"}',
96
+ },
97
+ },
98
+ {
99
+ id: 'call_2',
100
+ type: 'function' as const,
101
+ function: {
102
+ name: 'calculate',
103
+ arguments: '{"expression": "2+2"}',
104
+ },
105
+ },
106
+ // call_3 removed because it has no response
107
+ ],
108
+ },
109
+ // Tool responses (unchanged because they have corresponding calls)
110
+ {
111
+ role: 'tool' as const,
112
+ tool_call_id: 'call_1',
113
+ content: 'Search results: Found relevant information.',
114
+ },
115
+ {
116
+ role: 'tool' as const,
117
+ tool_call_id: 'call_2',
118
+ content: 'Calculation result: 4',
119
+ },
120
+ // User message (unchanged)
121
+ {
122
+ role: 'user' as const,
123
+ content: 'Thank you, that was helpful.',
124
+ },
125
+ ];
projects/ui/qwen-code/packages/core/src/telemetry/clearcut-logger/clearcut-logger.test.ts ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import 'vitest';
8
+ import {
9
+ vi,
10
+ describe,
11
+ it,
12
+ expect,
13
+ afterEach,
14
+ beforeAll,
15
+ afterAll,
16
+ } from 'vitest';
17
+ import {
18
+ ClearcutLogger,
19
+ LogEvent,
20
+ LogEventEntry,
21
+ EventNames,
22
+ TEST_ONLY,
23
+ } from './clearcut-logger.js';
24
+ import { ConfigParameters } from '../../config/config.js';
25
+ import * as userAccount from '../../utils/user_account.js';
26
+ import * as userId from '../../utils/user_id.js';
27
+ import { EventMetadataKey } from './event-metadata-key.js';
28
+ import { makeFakeConfig } from '../../test-utils/config.js';
29
+ import { http, HttpResponse } from 'msw';
30
+ import { server } from '../../mocks/msw.js';
31
+ import { makeChatCompressionEvent } from '../types.js';
32
+
33
+ interface CustomMatchers<R = unknown> {
34
+ toHaveMetadataValue: ([key, value]: [EventMetadataKey, string]) => R;
35
+ toHaveEventName: (name: EventNames) => R;
36
+ }
37
+
38
+ declare module 'vitest' {
39
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/no-empty-object-type
40
+ interface Matchers<T = any> extends CustomMatchers<T> {}
41
+ }
42
+
43
+ expect.extend({
44
+ toHaveEventName(received: LogEventEntry[], name: EventNames) {
45
+ const { isNot } = this;
46
+ const event = JSON.parse(received[0].source_extension_json) as LogEvent;
47
+ const pass = event.event_name === (name as unknown as string);
48
+ return {
49
+ pass,
50
+ message: () =>
51
+ `event name ${event.event_name} does${isNot ? ' not ' : ''} match ${name}}`,
52
+ };
53
+ },
54
+
55
+ toHaveMetadataValue(
56
+ received: LogEventEntry[],
57
+ [key, value]: [EventMetadataKey, string],
58
+ ) {
59
+ const { isNot } = this;
60
+ const event = JSON.parse(received[0].source_extension_json) as LogEvent;
61
+ const metadata = event['event_metadata'][0];
62
+ const data = metadata.find((m) => m.gemini_cli_key === key)?.value;
63
+
64
+ const pass = data !== undefined && data === value;
65
+
66
+ return {
67
+ pass,
68
+ message: () =>
69
+ `event ${received} does${isNot ? ' not' : ''} have ${value}}`,
70
+ };
71
+ },
72
+ });
73
+
74
+ vi.mock('../../utils/user_account');
75
+ vi.mock('../../utils/user_id');
76
+
77
+ const mockUserAccount = vi.mocked(userAccount);
78
+ const mockUserId = vi.mocked(userId);
79
+
80
+ // TODO(richieforeman): Consider moving this to test setup globally.
81
+ beforeAll(() => {
82
+ server.listen({});
83
+ });
84
+
85
+ afterEach(() => {
86
+ server.resetHandlers();
87
+ });
88
+
89
+ afterAll(() => {
90
+ server.close();
91
+ });
92
+
93
+ describe('ClearcutLogger', () => {
94
+ const NEXT_WAIT_MS = 1234;
95
+ const CLEARCUT_URL = 'https://play.googleapis.com/log';
96
+ const MOCK_DATE = new Date('2025-01-02T00:00:00.000Z');
97
+ const EXAMPLE_RESPONSE = `["${NEXT_WAIT_MS}",null,[[["ANDROID_BACKUP",0],["BATTERY_STATS",0],["SMART_SETUP",0],["TRON",0]],-3334737594024971225],[]]`;
98
+
99
+ // A helper to get the internal events array for testing
100
+ const getEvents = (l: ClearcutLogger): LogEventEntry[][] =>
101
+ l['events'].toArray() as LogEventEntry[][];
102
+
103
+ const getEventsSize = (l: ClearcutLogger): number => l['events'].size;
104
+
105
+ const requeueFailedEvents = (l: ClearcutLogger, events: LogEventEntry[][]) =>
106
+ l['requeueFailedEvents'](events);
107
+
108
+ afterEach(() => {
109
+ vi.unstubAllEnvs();
110
+ });
111
+
112
+ function setup({
113
+ config = {} as Partial<ConfigParameters>,
114
+ lifetimeGoogleAccounts = 1,
115
+ cachedGoogleAccount = 'test@google.com',
116
+ installationId = 'test-installation-id',
117
+ } = {}) {
118
+ server.resetHandlers(
119
+ http.post(CLEARCUT_URL, () => HttpResponse.text(EXAMPLE_RESPONSE)),
120
+ );
121
+
122
+ vi.useFakeTimers();
123
+ vi.setSystemTime(MOCK_DATE);
124
+
125
+ const loggerConfig = makeFakeConfig({
126
+ ...config,
127
+ });
128
+ ClearcutLogger.clearInstance();
129
+
130
+ mockUserAccount.getCachedGoogleAccount.mockReturnValue(cachedGoogleAccount);
131
+ mockUserAccount.getLifetimeGoogleAccounts.mockReturnValue(
132
+ lifetimeGoogleAccounts,
133
+ );
134
+ mockUserId.getInstallationId.mockReturnValue(installationId);
135
+
136
+ const logger = ClearcutLogger.getInstance(loggerConfig);
137
+
138
+ return { logger, loggerConfig };
139
+ }
140
+
141
+ afterEach(() => {
142
+ ClearcutLogger.clearInstance();
143
+ vi.useRealTimers();
144
+ vi.restoreAllMocks();
145
+ });
146
+
147
+ describe('getInstance', () => {
148
+ it.each([
149
+ { usageStatisticsEnabled: false, expectedValue: undefined },
150
+ {
151
+ usageStatisticsEnabled: true,
152
+ expectedValue: expect.any(ClearcutLogger),
153
+ },
154
+ ])(
155
+ 'returns an instance if usage statistics are enabled',
156
+ ({ usageStatisticsEnabled, expectedValue }) => {
157
+ ClearcutLogger.clearInstance();
158
+ const { logger } = setup({
159
+ config: {
160
+ usageStatisticsEnabled,
161
+ },
162
+ });
163
+ expect(logger).toEqual(expectedValue);
164
+ },
165
+ );
166
+
167
+ it('is a singleton', () => {
168
+ ClearcutLogger.clearInstance();
169
+ const { loggerConfig } = setup();
170
+ const logger1 = ClearcutLogger.getInstance(loggerConfig);
171
+ const logger2 = ClearcutLogger.getInstance(loggerConfig);
172
+ expect(logger1).toBe(logger2);
173
+ });
174
+ });
175
+
176
+ describe('createLogEvent', () => {
177
+ it('logs the total number of google accounts', () => {
178
+ const { logger } = setup({
179
+ lifetimeGoogleAccounts: 9001,
180
+ });
181
+
182
+ const event = logger?.createLogEvent(EventNames.API_ERROR, []);
183
+
184
+ expect(event?.event_metadata[0]).toContainEqual({
185
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_GOOGLE_ACCOUNTS_COUNT,
186
+ value: '9001',
187
+ });
188
+ });
189
+
190
+ it('logs the current surface from a github action', () => {
191
+ const { logger } = setup({});
192
+
193
+ vi.stubEnv('GITHUB_SHA', '8675309');
194
+
195
+ const event = logger?.createLogEvent(EventNames.CHAT_COMPRESSION, []);
196
+
197
+ expect(event?.event_metadata[0]).toContainEqual({
198
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE,
199
+ value: 'GitHub',
200
+ });
201
+ });
202
+
203
+ it('logs the current surface', () => {
204
+ const { logger } = setup({});
205
+
206
+ vi.stubEnv('TERM_PROGRAM', 'vscode');
207
+ vi.stubEnv('SURFACE', 'ide-1234');
208
+
209
+ const event = logger?.createLogEvent(EventNames.API_ERROR, []);
210
+
211
+ expect(event?.event_metadata[0]).toContainEqual({
212
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE,
213
+ value: 'ide-1234',
214
+ });
215
+ });
216
+
217
+ it.each([
218
+ {
219
+ env: {
220
+ CURSOR_TRACE_ID: 'abc123',
221
+ GITHUB_SHA: undefined,
222
+ },
223
+ expectedValue: 'cursor',
224
+ },
225
+ {
226
+ env: {
227
+ TERM_PROGRAM: 'vscode',
228
+ GITHUB_SHA: undefined,
229
+ },
230
+ expectedValue: 'vscode',
231
+ },
232
+ {
233
+ env: {
234
+ MONOSPACE_ENV: 'true',
235
+ GITHUB_SHA: undefined,
236
+ },
237
+ expectedValue: 'firebasestudio',
238
+ },
239
+ {
240
+ env: {
241
+ __COG_BASHRC_SOURCED: 'true',
242
+ GITHUB_SHA: undefined,
243
+ },
244
+ expectedValue: 'devin',
245
+ },
246
+ {
247
+ env: {
248
+ CLOUD_SHELL: 'true',
249
+ GITHUB_SHA: undefined,
250
+ },
251
+ expectedValue: 'cloudshell',
252
+ },
253
+ ])(
254
+ 'logs the current surface as $expectedValue, preempting vscode detection',
255
+ ({ env, expectedValue }) => {
256
+ const { logger } = setup({});
257
+
258
+ // Clear all environment variables that could interfere with surface detection
259
+ vi.stubEnv('SURFACE', undefined);
260
+ vi.stubEnv('GITHUB_SHA', undefined);
261
+ vi.stubEnv('CURSOR_TRACE_ID', undefined);
262
+ vi.stubEnv('__COG_BASHRC_SOURCED', undefined);
263
+ vi.stubEnv('REPLIT_USER', undefined);
264
+ vi.stubEnv('CODESPACES', undefined);
265
+ vi.stubEnv('EDITOR_IN_CLOUD_SHELL', undefined);
266
+ vi.stubEnv('CLOUD_SHELL', undefined);
267
+ vi.stubEnv('TERM_PRODUCT', undefined);
268
+ vi.stubEnv('FIREBASE_DEPLOY_AGENT', undefined);
269
+ vi.stubEnv('MONOSPACE_ENV', undefined);
270
+
271
+ // Set the specific environment variables for this test case
272
+ for (const [key, value] of Object.entries(env)) {
273
+ vi.stubEnv(key, value);
274
+ }
275
+ vi.stubEnv('TERM_PROGRAM', 'vscode');
276
+ const event = logger?.createLogEvent(EventNames.API_ERROR, []);
277
+ expect(event?.event_metadata[0][3]).toEqual({
278
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE,
279
+ value: expectedValue,
280
+ });
281
+ },
282
+ );
283
+ });
284
+
285
+ describe('logChatCompressionEvent', () => {
286
+ it('logs an event with proper fields', () => {
287
+ const { logger } = setup();
288
+ logger?.logChatCompressionEvent(
289
+ makeChatCompressionEvent({
290
+ tokens_before: 9001,
291
+ tokens_after: 8000,
292
+ }),
293
+ );
294
+
295
+ const events = getEvents(logger!);
296
+ expect(events.length).toBe(1);
297
+ expect(events[0]).toHaveEventName(EventNames.CHAT_COMPRESSION);
298
+ expect(events[0]).toHaveMetadataValue([
299
+ EventMetadataKey.GEMINI_CLI_COMPRESSION_TOKENS_BEFORE,
300
+ '9001',
301
+ ]);
302
+ expect(events[0]).toHaveMetadataValue([
303
+ EventMetadataKey.GEMINI_CLI_COMPRESSION_TOKENS_AFTER,
304
+ '8000',
305
+ ]);
306
+ });
307
+ });
308
+
309
+ describe('enqueueLogEvent', () => {
310
+ it('should add events to the queue', () => {
311
+ const { logger } = setup();
312
+ logger!.enqueueLogEvent(logger!.createLogEvent(EventNames.API_ERROR));
313
+ expect(getEventsSize(logger!)).toBe(1);
314
+ });
315
+
316
+ it('should evict the oldest event when the queue is full', () => {
317
+ const { logger } = setup();
318
+
319
+ for (let i = 0; i < TEST_ONLY.MAX_EVENTS; i++) {
320
+ logger!.enqueueLogEvent(
321
+ logger!.createLogEvent(EventNames.API_ERROR, [
322
+ {
323
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_AI_ADDED_LINES,
324
+ value: `${i}`,
325
+ },
326
+ ]),
327
+ );
328
+ }
329
+
330
+ let events = getEvents(logger!);
331
+ expect(events.length).toBe(TEST_ONLY.MAX_EVENTS);
332
+ expect(events[0]).toHaveMetadataValue([
333
+ EventMetadataKey.GEMINI_CLI_AI_ADDED_LINES,
334
+ '0',
335
+ ]);
336
+
337
+ // This should push out the first event
338
+ logger!.enqueueLogEvent(
339
+ logger!.createLogEvent(EventNames.API_ERROR, [
340
+ {
341
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_AI_ADDED_LINES,
342
+ value: `${TEST_ONLY.MAX_EVENTS}`,
343
+ },
344
+ ]),
345
+ );
346
+ events = getEvents(logger!);
347
+ expect(events.length).toBe(TEST_ONLY.MAX_EVENTS);
348
+ expect(events[0]).toHaveMetadataValue([
349
+ EventMetadataKey.GEMINI_CLI_AI_ADDED_LINES,
350
+ '1',
351
+ ]);
352
+
353
+ expect(events.at(TEST_ONLY.MAX_EVENTS - 1)).toHaveMetadataValue([
354
+ EventMetadataKey.GEMINI_CLI_AI_ADDED_LINES,
355
+ `${TEST_ONLY.MAX_EVENTS}`,
356
+ ]);
357
+ });
358
+ });
359
+
360
+ describe('flushToClearcut', () => {
361
+ it('allows for usage with a configured proxy agent', async () => {
362
+ const { logger } = setup({
363
+ config: {
364
+ proxy: 'http://mycoolproxy.whatever.com:3128',
365
+ },
366
+ });
367
+
368
+ logger!.enqueueLogEvent(logger!.createLogEvent(EventNames.API_ERROR));
369
+
370
+ const response = await logger!.flushToClearcut();
371
+
372
+ expect(response.nextRequestWaitMs).toBe(NEXT_WAIT_MS);
373
+ });
374
+
375
+ it('should clear events on successful flush', async () => {
376
+ const { logger } = setup();
377
+
378
+ logger!.enqueueLogEvent(logger!.createLogEvent(EventNames.API_ERROR));
379
+ const response = await logger!.flushToClearcut();
380
+
381
+ expect(getEvents(logger!)).toEqual([]);
382
+ expect(response.nextRequestWaitMs).toBe(NEXT_WAIT_MS);
383
+ });
384
+
385
+ it('should handle a network error and requeue events', async () => {
386
+ const { logger } = setup();
387
+
388
+ server.resetHandlers(http.post(CLEARCUT_URL, () => HttpResponse.error()));
389
+ logger!.enqueueLogEvent(logger!.createLogEvent(EventNames.API_REQUEST));
390
+ logger!.enqueueLogEvent(logger!.createLogEvent(EventNames.API_ERROR));
391
+ expect(getEventsSize(logger!)).toBe(2);
392
+
393
+ const x = logger!.flushToClearcut();
394
+ await x;
395
+
396
+ expect(getEventsSize(logger!)).toBe(2);
397
+ const events = getEvents(logger!);
398
+
399
+ expect(events.length).toBe(2);
400
+ expect(events[0]).toHaveEventName(EventNames.API_REQUEST);
401
+ });
402
+
403
+ it('should handle an HTTP error and requeue events', async () => {
404
+ const { logger } = setup();
405
+
406
+ server.resetHandlers(
407
+ http.post(
408
+ CLEARCUT_URL,
409
+ () =>
410
+ new HttpResponse(
411
+ { 'the system is down': true },
412
+ {
413
+ status: 500,
414
+ },
415
+ ),
416
+ ),
417
+ );
418
+
419
+ logger!.enqueueLogEvent(logger!.createLogEvent(EventNames.API_REQUEST));
420
+ logger!.enqueueLogEvent(logger!.createLogEvent(EventNames.API_ERROR));
421
+
422
+ expect(getEvents(logger!).length).toBe(2);
423
+ await logger!.flushToClearcut();
424
+
425
+ const events = getEvents(logger!);
426
+
427
+ expect(events[0]).toHaveEventName(EventNames.API_REQUEST);
428
+ });
429
+ });
430
+
431
+ describe('requeueFailedEvents logic', () => {
432
+ it('should limit the number of requeued events to max_retry_events', () => {
433
+ const { logger } = setup();
434
+ const eventsToLogCount = TEST_ONLY.MAX_RETRY_EVENTS + 5;
435
+ const eventsToSend: LogEventEntry[][] = [];
436
+ for (let i = 0; i < eventsToLogCount; i++) {
437
+ eventsToSend.push([
438
+ {
439
+ event_time_ms: Date.now(),
440
+ source_extension_json: JSON.stringify({ event_id: i }),
441
+ },
442
+ ]);
443
+ }
444
+
445
+ requeueFailedEvents(logger!, eventsToSend);
446
+
447
+ expect(getEventsSize(logger!)).toBe(TEST_ONLY.MAX_RETRY_EVENTS);
448
+ const firstRequeuedEvent = JSON.parse(
449
+ getEvents(logger!)[0][0].source_extension_json,
450
+ ) as { event_id: string };
451
+ // The last `maxRetryEvents` are kept. The oldest of those is at index `eventsToLogCount - maxRetryEvents`.
452
+ expect(firstRequeuedEvent.event_id).toBe(
453
+ eventsToLogCount - TEST_ONLY.MAX_RETRY_EVENTS,
454
+ );
455
+ });
456
+
457
+ it('should not requeue more events than available space in the queue', () => {
458
+ const { logger } = setup();
459
+ const maxEvents = TEST_ONLY.MAX_EVENTS;
460
+ const spaceToLeave = 5;
461
+ const initialEventCount = maxEvents - spaceToLeave;
462
+ for (let i = 0; i < initialEventCount; i++) {
463
+ logger!.enqueueLogEvent(logger!.createLogEvent(EventNames.API_ERROR));
464
+ }
465
+ expect(getEventsSize(logger!)).toBe(initialEventCount);
466
+
467
+ const failedEventsCount = 10; // More than spaceToLeave
468
+ const eventsToSend: LogEventEntry[][] = [];
469
+ for (let i = 0; i < failedEventsCount; i++) {
470
+ eventsToSend.push([
471
+ {
472
+ event_time_ms: Date.now(),
473
+ source_extension_json: JSON.stringify({ event_id: `failed_${i}` }),
474
+ },
475
+ ]);
476
+ }
477
+
478
+ requeueFailedEvents(logger!, eventsToSend);
479
+
480
+ // availableSpace is 5. eventsToRequeue is min(10, 5) = 5.
481
+ // Total size should be initialEventCount + 5 = maxEvents.
482
+ expect(getEventsSize(logger!)).toBe(maxEvents);
483
+
484
+ // The requeued events are the *last* 5 of the failed events.
485
+ // startIndex = max(0, 10 - 5) = 5.
486
+ // Loop unshifts events from index 9 down to 5.
487
+ // The first element in the deque is the one with id 'failed_5'.
488
+ const firstRequeuedEvent = JSON.parse(
489
+ getEvents(logger!)[0][0].source_extension_json,
490
+ ) as { event_id: string };
491
+ expect(firstRequeuedEvent.event_id).toBe('failed_5');
492
+ });
493
+ });
494
+ });
projects/ui/qwen-code/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts ADDED
@@ -0,0 +1,860 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { HttpsProxyAgent } from 'https-proxy-agent';
8
+ import {
9
+ StartSessionEvent,
10
+ UserPromptEvent,
11
+ ToolCallEvent,
12
+ ApiRequestEvent,
13
+ ApiResponseEvent,
14
+ ApiErrorEvent,
15
+ LoopDetectedEvent,
16
+ NextSpeakerCheckEvent,
17
+ SlashCommandEvent,
18
+ MalformedJsonResponseEvent,
19
+ IdeConnectionEvent,
20
+ KittySequenceOverflowEvent,
21
+ ChatCompressionEvent,
22
+ InvalidChunkEvent,
23
+ ContentRetryEvent,
24
+ ContentRetryFailureEvent,
25
+ } from '../types.js';
26
+ import { EventMetadataKey } from './event-metadata-key.js';
27
+ import { Config } from '../../config/config.js';
28
+ import { safeJsonStringify } from '../../utils/safeJsonStringify.js';
29
+ import {
30
+ getCachedGoogleAccount,
31
+ getLifetimeGoogleAccounts,
32
+ } from '../../utils/user_account.js';
33
+ import { getInstallationId } from '../../utils/user_id.js';
34
+ import { FixedDeque } from 'mnemonist';
35
+ import { GIT_COMMIT_INFO, CLI_VERSION } from '../../generated/git-commit.js';
36
+ import { DetectedIde, detectIde } from '../../ide/detect-ide.js';
37
+
38
+ export enum EventNames {
39
+ START_SESSION = 'start_session',
40
+ NEW_PROMPT = 'new_prompt',
41
+ TOOL_CALL = 'tool_call',
42
+ API_REQUEST = 'api_request',
43
+ API_RESPONSE = 'api_response',
44
+ API_ERROR = 'api_error',
45
+ END_SESSION = 'end_session',
46
+ FLASH_FALLBACK = 'flash_fallback',
47
+ LOOP_DETECTED = 'loop_detected',
48
+ NEXT_SPEAKER_CHECK = 'next_speaker_check',
49
+ SLASH_COMMAND = 'slash_command',
50
+ MALFORMED_JSON_RESPONSE = 'malformed_json_response',
51
+ IDE_CONNECTION = 'ide_connection',
52
+ KITTY_SEQUENCE_OVERFLOW = 'kitty_sequence_overflow',
53
+ CHAT_COMPRESSION = 'chat_compression',
54
+ INVALID_CHUNK = 'invalid_chunk',
55
+ CONTENT_RETRY = 'content_retry',
56
+ CONTENT_RETRY_FAILURE = 'content_retry_failure',
57
+ }
58
+
59
+ export interface LogResponse {
60
+ nextRequestWaitMs?: number;
61
+ }
62
+
63
+ export interface LogEventEntry {
64
+ event_time_ms: number;
65
+ source_extension_json: string;
66
+ }
67
+
68
+ export interface EventValue {
69
+ gemini_cli_key: EventMetadataKey;
70
+ value: string;
71
+ }
72
+
73
+ export interface LogEvent {
74
+ console_type: 'GEMINI_CLI';
75
+ application: number;
76
+ event_name: string;
77
+ event_metadata: EventValue[][];
78
+ client_email?: string;
79
+ client_install_id?: string;
80
+ }
81
+
82
+ export interface LogRequest {
83
+ log_source_name: 'CONCORD';
84
+ request_time_ms: number;
85
+ log_event: LogEventEntry[][];
86
+ }
87
+
88
+ /**
89
+ * Determine the surface that the user is currently using. Surface is effectively the
90
+ * distribution channel in which the user is using Gemini CLI. Gemini CLI comes bundled
91
+ * w/ Firebase Studio and Cloud Shell. Users that manually download themselves will
92
+ * likely be "SURFACE_NOT_SET".
93
+ *
94
+ * This is computed based upon a series of environment variables these distribution
95
+ * methods might have in their runtimes.
96
+ */
97
+ function determineSurface(): string {
98
+ if (process.env['SURFACE']) {
99
+ return process.env['SURFACE'];
100
+ } else if (process.env['GITHUB_SHA']) {
101
+ return 'GitHub';
102
+ } else if (process.env['TERM_PROGRAM'] === 'vscode') {
103
+ return detectIde() || DetectedIde.VSCode;
104
+ } else {
105
+ return 'SURFACE_NOT_SET';
106
+ }
107
+ }
108
+
109
+ /**
110
+ * Clearcut URL to send logging events to.
111
+ */
112
+ const CLEARCUT_URL = 'https://play.googleapis.com/log?format=json&hasfast=true';
113
+
114
+ /**
115
+ * Interval in which buffered events are sent to clearcut.
116
+ */
117
+ const FLUSH_INTERVAL_MS = 1000 * 60;
118
+
119
+ /**
120
+ * Maximum amount of events to keep in memory. Events added after this amount
121
+ * are dropped until the next flush to clearcut, which happens periodically as
122
+ * defined by {@link FLUSH_INTERVAL_MS}.
123
+ */
124
+ const MAX_EVENTS = 1000;
125
+
126
+ /**
127
+ * Maximum events to retry after a failed clearcut flush
128
+ */
129
+ const MAX_RETRY_EVENTS = 100;
130
+
131
+ // Singleton class for batch posting log events to Clearcut. When a new event comes in, the elapsed time
132
+ // is checked and events are flushed to Clearcut if at least a minute has passed since the last flush.
133
+ export class ClearcutLogger {
134
+ private static instance: ClearcutLogger;
135
+ private config?: Config;
136
+ private sessionData: EventValue[] = [];
137
+ private promptId: string = '';
138
+
139
+ /**
140
+ * Queue of pending events that need to be flushed to the server. New events
141
+ * are added to this queue and then flushed on demand (via `flushToClearcut`)
142
+ */
143
+ private readonly events: FixedDeque<LogEventEntry[]>;
144
+
145
+ /**
146
+ * The last time that the events were successfully flushed to the server.
147
+ */
148
+ private lastFlushTime: number = Date.now();
149
+
150
+ /**
151
+ * the value is true when there is a pending flush happening. This prevents
152
+ * concurrent flush operations.
153
+ */
154
+ private flushing: boolean = false;
155
+
156
+ /**
157
+ * This value is true when a flush was requested during an ongoing flush.
158
+ */
159
+ private pendingFlush: boolean = false;
160
+
161
+ private constructor(config?: Config) {
162
+ this.config = config;
163
+ this.events = new FixedDeque<LogEventEntry[]>(Array, MAX_EVENTS);
164
+ this.promptId = config?.getSessionId() ?? '';
165
+ }
166
+
167
+ static getInstance(config?: Config): ClearcutLogger | undefined {
168
+ if (config === undefined || !config?.getUsageStatisticsEnabled())
169
+ return undefined;
170
+ if (!ClearcutLogger.instance) {
171
+ ClearcutLogger.instance = new ClearcutLogger(config);
172
+ }
173
+ return ClearcutLogger.instance;
174
+ }
175
+
176
+ /** For testing purposes only. */
177
+ static clearInstance(): void {
178
+ // @ts-expect-error - ClearcutLogger is a singleton, but we need to clear it for tests.
179
+ ClearcutLogger.instance = undefined;
180
+ }
181
+
182
+ enqueueLogEvent(event: LogEvent): void {
183
+ try {
184
+ // Manually handle overflow for FixedDeque, which throws when full.
185
+ const wasAtCapacity = this.events.size >= MAX_EVENTS;
186
+
187
+ if (wasAtCapacity) {
188
+ this.events.shift(); // Evict oldest element to make space.
189
+ }
190
+
191
+ this.events.push([
192
+ {
193
+ event_time_ms: Date.now(),
194
+ source_extension_json: safeJsonStringify(event),
195
+ },
196
+ ]);
197
+
198
+ if (wasAtCapacity && this.config?.getDebugMode()) {
199
+ console.debug(
200
+ `ClearcutLogger: Dropped old event to prevent memory leak (queue size: ${this.events.size})`,
201
+ );
202
+ }
203
+ } catch (error) {
204
+ if (this.config?.getDebugMode()) {
205
+ console.error('ClearcutLogger: Failed to enqueue log event.', error);
206
+ }
207
+ }
208
+ }
209
+
210
+ createLogEvent(eventName: EventNames, data: EventValue[] = []): LogEvent {
211
+ const email = getCachedGoogleAccount();
212
+
213
+ if (eventName !== EventNames.START_SESSION) {
214
+ data.push(...this.sessionData);
215
+ }
216
+ data = this.addDefaultFields(data);
217
+
218
+ const logEvent: LogEvent = {
219
+ console_type: 'GEMINI_CLI',
220
+ application: 102, // GEMINI_CLI
221
+ event_name: eventName as string,
222
+ event_metadata: [data],
223
+ };
224
+
225
+ // Should log either email or install ID, not both. See go/cloudmill-1p-oss-instrumentation#define-sessionable-id
226
+ if (email) {
227
+ logEvent.client_email = email;
228
+ } else {
229
+ logEvent.client_install_id = getInstallationId();
230
+ }
231
+
232
+ return logEvent;
233
+ }
234
+
235
+ flushIfNeeded(): void {
236
+ if (Date.now() - this.lastFlushTime < FLUSH_INTERVAL_MS) {
237
+ return;
238
+ }
239
+
240
+ this.flushToClearcut().catch((error) => {
241
+ console.debug('Error flushing to Clearcut:', error);
242
+ });
243
+ }
244
+
245
+ async flushToClearcut(): Promise<LogResponse> {
246
+ if (this.flushing) {
247
+ if (this.config?.getDebugMode()) {
248
+ console.debug(
249
+ 'ClearcutLogger: Flush already in progress, marking pending flush.',
250
+ );
251
+ }
252
+ this.pendingFlush = true;
253
+ return Promise.resolve({});
254
+ }
255
+ this.flushing = true;
256
+
257
+ if (this.config?.getDebugMode()) {
258
+ console.log('Flushing log events to Clearcut.');
259
+ }
260
+ const eventsToSend = this.events.toArray() as LogEventEntry[][];
261
+ this.events.clear();
262
+
263
+ const request: LogRequest[] = [
264
+ {
265
+ log_source_name: 'CONCORD',
266
+ request_time_ms: Date.now(),
267
+ log_event: eventsToSend,
268
+ },
269
+ ];
270
+
271
+ let result: LogResponse = {};
272
+
273
+ try {
274
+ const response = await fetch(CLEARCUT_URL, {
275
+ method: 'POST',
276
+ body: safeJsonStringify(request),
277
+ headers: {
278
+ 'Content-Type': 'application/json',
279
+ },
280
+ });
281
+
282
+ const responseBody = await response.text();
283
+
284
+ if (response.status >= 200 && response.status < 300) {
285
+ this.lastFlushTime = Date.now();
286
+ const nextRequestWaitMs = Number(JSON.parse(responseBody)[0]);
287
+ result = {
288
+ ...result,
289
+ nextRequestWaitMs,
290
+ };
291
+ } else {
292
+ if (this.config?.getDebugMode()) {
293
+ console.error(
294
+ `Error flushing log events: HTTP ${response.status}: ${response.statusText}`,
295
+ );
296
+ }
297
+
298
+ // Re-queue failed events for retry
299
+ this.requeueFailedEvents(eventsToSend);
300
+ }
301
+ } catch (e: unknown) {
302
+ if (this.config?.getDebugMode()) {
303
+ console.error('Error flushing log events:', e as Error);
304
+ }
305
+
306
+ // Re-queue failed events for retry
307
+ this.requeueFailedEvents(eventsToSend);
308
+ }
309
+
310
+ this.flushing = false;
311
+
312
+ // If a flush was requested while we were flushing, flush again
313
+ if (this.pendingFlush) {
314
+ this.pendingFlush = false;
315
+ // Fire and forget the pending flush
316
+ this.flushToClearcut().catch((error) => {
317
+ if (this.config?.getDebugMode()) {
318
+ console.debug('Error in pending flush to Clearcut:', error);
319
+ }
320
+ });
321
+ }
322
+
323
+ return result;
324
+ }
325
+
326
+ logStartSessionEvent(event: StartSessionEvent): void {
327
+ const data: EventValue[] = [
328
+ {
329
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_START_SESSION_MODEL,
330
+ value: event.model,
331
+ },
332
+ {
333
+ gemini_cli_key:
334
+ EventMetadataKey.GEMINI_CLI_START_SESSION_EMBEDDING_MODEL,
335
+ value: event.embedding_model,
336
+ },
337
+ {
338
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_START_SESSION_SANDBOX,
339
+ value: event.sandbox_enabled.toString(),
340
+ },
341
+ {
342
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_START_SESSION_CORE_TOOLS,
343
+ value: event.core_tools_enabled,
344
+ },
345
+ {
346
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_START_SESSION_APPROVAL_MODE,
347
+ value: event.approval_mode,
348
+ },
349
+ {
350
+ gemini_cli_key:
351
+ EventMetadataKey.GEMINI_CLI_START_SESSION_API_KEY_ENABLED,
352
+ value: event.api_key_enabled.toString(),
353
+ },
354
+ {
355
+ gemini_cli_key:
356
+ EventMetadataKey.GEMINI_CLI_START_SESSION_VERTEX_API_ENABLED,
357
+ value: event.vertex_ai_enabled.toString(),
358
+ },
359
+ {
360
+ gemini_cli_key:
361
+ EventMetadataKey.GEMINI_CLI_START_SESSION_DEBUG_MODE_ENABLED,
362
+ value: event.debug_enabled.toString(),
363
+ },
364
+ {
365
+ gemini_cli_key:
366
+ EventMetadataKey.GEMINI_CLI_START_SESSION_VERTEX_API_ENABLED,
367
+ value: event.vertex_ai_enabled.toString(),
368
+ },
369
+ {
370
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_START_SESSION_MCP_SERVERS,
371
+ value: event.mcp_servers,
372
+ },
373
+ {
374
+ gemini_cli_key:
375
+ EventMetadataKey.GEMINI_CLI_START_SESSION_VERTEX_API_ENABLED,
376
+ value: event.vertex_ai_enabled.toString(),
377
+ },
378
+ {
379
+ gemini_cli_key:
380
+ EventMetadataKey.GEMINI_CLI_START_SESSION_TELEMETRY_ENABLED,
381
+ value: event.telemetry_enabled.toString(),
382
+ },
383
+ {
384
+ gemini_cli_key:
385
+ EventMetadataKey.GEMINI_CLI_START_SESSION_TELEMETRY_LOG_USER_PROMPTS_ENABLED,
386
+ value: event.telemetry_log_user_prompts_enabled.toString(),
387
+ },
388
+ ];
389
+ this.sessionData = data;
390
+
391
+ // Flush start event immediately
392
+ this.enqueueLogEvent(this.createLogEvent(EventNames.START_SESSION, data));
393
+ this.flushToClearcut().catch((error) => {
394
+ console.debug('Error flushing to Clearcut:', error);
395
+ });
396
+ }
397
+
398
+ logNewPromptEvent(event: UserPromptEvent): void {
399
+ this.promptId = event.prompt_id;
400
+ const data: EventValue[] = [
401
+ {
402
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_USER_PROMPT_LENGTH,
403
+ value: JSON.stringify(event.prompt_length),
404
+ },
405
+ ];
406
+
407
+ this.enqueueLogEvent(this.createLogEvent(EventNames.NEW_PROMPT, data));
408
+ this.flushIfNeeded();
409
+ }
410
+
411
+ logToolCallEvent(event: ToolCallEvent): void {
412
+ const data: EventValue[] = [
413
+ {
414
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_TOOL_CALL_NAME,
415
+ value: JSON.stringify(event.function_name),
416
+ },
417
+ {
418
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_TOOL_CALL_DECISION,
419
+ value: JSON.stringify(event.decision),
420
+ },
421
+ {
422
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_TOOL_CALL_SUCCESS,
423
+ value: JSON.stringify(event.success),
424
+ },
425
+ {
426
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_TOOL_CALL_DURATION_MS,
427
+ value: JSON.stringify(event.duration_ms),
428
+ },
429
+ {
430
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_TOOL_ERROR_MESSAGE,
431
+ value: JSON.stringify(event.error),
432
+ },
433
+ {
434
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_TOOL_CALL_ERROR_TYPE,
435
+ value: JSON.stringify(event.error_type),
436
+ },
437
+ {
438
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_TOOL_TYPE,
439
+ value: JSON.stringify(event.tool_type),
440
+ },
441
+ ];
442
+
443
+ if (event.metadata) {
444
+ const metadataMapping: { [key: string]: EventMetadataKey } = {
445
+ ai_added_lines: EventMetadataKey.GEMINI_CLI_AI_ADDED_LINES,
446
+ ai_removed_lines: EventMetadataKey.GEMINI_CLI_AI_REMOVED_LINES,
447
+ user_added_lines: EventMetadataKey.GEMINI_CLI_USER_ADDED_LINES,
448
+ user_removed_lines: EventMetadataKey.GEMINI_CLI_USER_REMOVED_LINES,
449
+ };
450
+
451
+ for (const [key, gemini_cli_key] of Object.entries(metadataMapping)) {
452
+ if (event.metadata[key] !== undefined) {
453
+ data.push({
454
+ gemini_cli_key,
455
+ value: JSON.stringify(event.metadata[key]),
456
+ });
457
+ }
458
+ }
459
+ }
460
+
461
+ const logEvent = this.createLogEvent(EventNames.TOOL_CALL, data);
462
+ this.enqueueLogEvent(logEvent);
463
+ this.flushIfNeeded();
464
+ }
465
+
466
+ logApiRequestEvent(event: ApiRequestEvent): void {
467
+ const data: EventValue[] = [
468
+ {
469
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_API_REQUEST_MODEL,
470
+ value: JSON.stringify(event.model),
471
+ },
472
+ ];
473
+
474
+ this.enqueueLogEvent(this.createLogEvent(EventNames.API_REQUEST, data));
475
+ this.flushIfNeeded();
476
+ }
477
+
478
+ logApiResponseEvent(event: ApiResponseEvent): void {
479
+ const data: EventValue[] = [
480
+ {
481
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_API_RESPONSE_MODEL,
482
+ value: JSON.stringify(event.model),
483
+ },
484
+ {
485
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_API_RESPONSE_STATUS_CODE,
486
+ value: JSON.stringify(event.status_code),
487
+ },
488
+ {
489
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_API_RESPONSE_DURATION_MS,
490
+ value: JSON.stringify(event.duration_ms),
491
+ },
492
+ {
493
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_API_ERROR_MESSAGE,
494
+ value: JSON.stringify(event.error),
495
+ },
496
+ {
497
+ gemini_cli_key:
498
+ EventMetadataKey.GEMINI_CLI_API_RESPONSE_INPUT_TOKEN_COUNT,
499
+ value: JSON.stringify(event.input_token_count),
500
+ },
501
+ {
502
+ gemini_cli_key:
503
+ EventMetadataKey.GEMINI_CLI_API_RESPONSE_OUTPUT_TOKEN_COUNT,
504
+ value: JSON.stringify(event.output_token_count),
505
+ },
506
+ {
507
+ gemini_cli_key:
508
+ EventMetadataKey.GEMINI_CLI_API_RESPONSE_CACHED_TOKEN_COUNT,
509
+ value: JSON.stringify(event.cached_content_token_count),
510
+ },
511
+ {
512
+ gemini_cli_key:
513
+ EventMetadataKey.GEMINI_CLI_API_RESPONSE_THINKING_TOKEN_COUNT,
514
+ value: JSON.stringify(event.thoughts_token_count),
515
+ },
516
+ {
517
+ gemini_cli_key:
518
+ EventMetadataKey.GEMINI_CLI_API_RESPONSE_TOOL_TOKEN_COUNT,
519
+ value: JSON.stringify(event.tool_token_count),
520
+ },
521
+ ];
522
+
523
+ this.enqueueLogEvent(this.createLogEvent(EventNames.API_RESPONSE, data));
524
+ this.flushIfNeeded();
525
+ }
526
+
527
+ logApiErrorEvent(event: ApiErrorEvent): void {
528
+ const data: EventValue[] = [
529
+ {
530
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_API_ERROR_MODEL,
531
+ value: JSON.stringify(event.model),
532
+ },
533
+ {
534
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_API_ERROR_TYPE,
535
+ value: JSON.stringify(event.error_type),
536
+ },
537
+ {
538
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_API_ERROR_STATUS_CODE,
539
+ value: JSON.stringify(event.status_code),
540
+ },
541
+ {
542
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_API_ERROR_DURATION_MS,
543
+ value: JSON.stringify(event.duration_ms),
544
+ },
545
+ ];
546
+
547
+ this.enqueueLogEvent(this.createLogEvent(EventNames.API_ERROR, data));
548
+ this.flushIfNeeded();
549
+ }
550
+
551
+ logChatCompressionEvent(event: ChatCompressionEvent): void {
552
+ const data: EventValue[] = [
553
+ {
554
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_COMPRESSION_TOKENS_BEFORE,
555
+ value: `${event.tokens_before}`,
556
+ },
557
+ {
558
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_COMPRESSION_TOKENS_AFTER,
559
+ value: `${event.tokens_after}`,
560
+ },
561
+ ];
562
+
563
+ this.enqueueLogEvent(
564
+ this.createLogEvent(EventNames.CHAT_COMPRESSION, data),
565
+ );
566
+ }
567
+
568
+ logFlashFallbackEvent(): void {
569
+ this.enqueueLogEvent(this.createLogEvent(EventNames.FLASH_FALLBACK, []));
570
+ this.flushToClearcut().catch((error) => {
571
+ console.debug('Error flushing to Clearcut:', error);
572
+ });
573
+ }
574
+
575
+ logLoopDetectedEvent(event: LoopDetectedEvent): void {
576
+ const data: EventValue[] = [
577
+ {
578
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_LOOP_DETECTED_TYPE,
579
+ value: JSON.stringify(event.loop_type),
580
+ },
581
+ ];
582
+
583
+ this.enqueueLogEvent(this.createLogEvent(EventNames.LOOP_DETECTED, data));
584
+ this.flushIfNeeded();
585
+ }
586
+
587
+ logNextSpeakerCheck(event: NextSpeakerCheckEvent): void {
588
+ const data: EventValue[] = [
589
+ {
590
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_RESPONSE_FINISH_REASON,
591
+ value: JSON.stringify(event.finish_reason),
592
+ },
593
+ {
594
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_NEXT_SPEAKER_CHECK_RESULT,
595
+ value: JSON.stringify(event.result),
596
+ },
597
+ ];
598
+
599
+ this.enqueueLogEvent(
600
+ this.createLogEvent(EventNames.NEXT_SPEAKER_CHECK, data),
601
+ );
602
+ this.flushIfNeeded();
603
+ }
604
+
605
+ logSlashCommandEvent(event: SlashCommandEvent): void {
606
+ const data: EventValue[] = [
607
+ {
608
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_SLASH_COMMAND_NAME,
609
+ value: JSON.stringify(event.command),
610
+ },
611
+ ];
612
+
613
+ if (event.subcommand) {
614
+ data.push({
615
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_SLASH_COMMAND_SUBCOMMAND,
616
+ value: JSON.stringify(event.subcommand),
617
+ });
618
+ }
619
+
620
+ if (event.status) {
621
+ data.push({
622
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_SLASH_COMMAND_STATUS,
623
+ value: JSON.stringify(event.status),
624
+ });
625
+ }
626
+
627
+ this.enqueueLogEvent(this.createLogEvent(EventNames.SLASH_COMMAND, data));
628
+ this.flushIfNeeded();
629
+ }
630
+
631
+ logMalformedJsonResponseEvent(event: MalformedJsonResponseEvent): void {
632
+ const data: EventValue[] = [
633
+ {
634
+ gemini_cli_key:
635
+ EventMetadataKey.GEMINI_CLI_MALFORMED_JSON_RESPONSE_MODEL,
636
+ value: JSON.stringify(event.model),
637
+ },
638
+ ];
639
+
640
+ this.enqueueLogEvent(
641
+ this.createLogEvent(EventNames.MALFORMED_JSON_RESPONSE, data),
642
+ );
643
+ this.flushIfNeeded();
644
+ }
645
+
646
+ logIdeConnectionEvent(event: IdeConnectionEvent): void {
647
+ const data: EventValue[] = [
648
+ {
649
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_IDE_CONNECTION_TYPE,
650
+ value: JSON.stringify(event.connection_type),
651
+ },
652
+ ];
653
+
654
+ this.enqueueLogEvent(this.createLogEvent(EventNames.IDE_CONNECTION, data));
655
+ this.flushIfNeeded();
656
+ }
657
+
658
+ logKittySequenceOverflowEvent(event: KittySequenceOverflowEvent): void {
659
+ const data: EventValue[] = [
660
+ {
661
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_KITTY_SEQUENCE_LENGTH,
662
+ value: event.sequence_length.toString(),
663
+ },
664
+ {
665
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_KITTY_TRUNCATED_SEQUENCE,
666
+ value: event.truncated_sequence,
667
+ },
668
+ ];
669
+
670
+ this.enqueueLogEvent(
671
+ this.createLogEvent(EventNames.KITTY_SEQUENCE_OVERFLOW, data),
672
+ );
673
+ this.flushIfNeeded();
674
+ }
675
+
676
+ logEndSessionEvent(): void {
677
+ // Flush immediately on session end.
678
+ this.enqueueLogEvent(this.createLogEvent(EventNames.END_SESSION, []));
679
+ this.flushToClearcut().catch((error) => {
680
+ console.debug('Error flushing to Clearcut:', error);
681
+ });
682
+ }
683
+
684
+ logInvalidChunkEvent(event: InvalidChunkEvent): void {
685
+ const data: EventValue[] = [];
686
+
687
+ if (event.error_message) {
688
+ data.push({
689
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_INVALID_CHUNK_ERROR_MESSAGE,
690
+ value: event.error_message,
691
+ });
692
+ }
693
+
694
+ this.enqueueLogEvent(this.createLogEvent(EventNames.INVALID_CHUNK, data));
695
+ this.flushIfNeeded();
696
+ }
697
+
698
+ logContentRetryEvent(event: ContentRetryEvent): void {
699
+ const data: EventValue[] = [
700
+ {
701
+ gemini_cli_key:
702
+ EventMetadataKey.GEMINI_CLI_CONTENT_RETRY_ATTEMPT_NUMBER,
703
+ value: String(event.attempt_number),
704
+ },
705
+ {
706
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_CONTENT_RETRY_ERROR_TYPE,
707
+ value: event.error_type,
708
+ },
709
+ {
710
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_CONTENT_RETRY_DELAY_MS,
711
+ value: String(event.retry_delay_ms),
712
+ },
713
+ ];
714
+
715
+ this.enqueueLogEvent(this.createLogEvent(EventNames.CONTENT_RETRY, data));
716
+ this.flushIfNeeded();
717
+ }
718
+
719
+ logContentRetryFailureEvent(event: ContentRetryFailureEvent): void {
720
+ const data: EventValue[] = [
721
+ {
722
+ gemini_cli_key:
723
+ EventMetadataKey.GEMINI_CLI_CONTENT_RETRY_FAILURE_TOTAL_ATTEMPTS,
724
+ value: String(event.total_attempts),
725
+ },
726
+ {
727
+ gemini_cli_key:
728
+ EventMetadataKey.GEMINI_CLI_CONTENT_RETRY_FAILURE_FINAL_ERROR_TYPE,
729
+ value: event.final_error_type,
730
+ },
731
+ ];
732
+
733
+ if (event.total_duration_ms) {
734
+ data.push({
735
+ gemini_cli_key:
736
+ EventMetadataKey.GEMINI_CLI_CONTENT_RETRY_FAILURE_TOTAL_DURATION_MS,
737
+ value: String(event.total_duration_ms),
738
+ });
739
+ }
740
+
741
+ this.enqueueLogEvent(
742
+ this.createLogEvent(EventNames.CONTENT_RETRY_FAILURE, data),
743
+ );
744
+ this.flushIfNeeded();
745
+ }
746
+
747
+ /**
748
+ * Adds default fields to data, and returns a new data array. This fields
749
+ * should exist on all log events.
750
+ */
751
+ addDefaultFields(data: EventValue[]): EventValue[] {
752
+ const totalAccounts = getLifetimeGoogleAccounts();
753
+ const surface = determineSurface();
754
+
755
+ const defaultLogMetadata: EventValue[] = [
756
+ {
757
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_SESSION_ID,
758
+ value: this.config?.getSessionId() ?? '',
759
+ },
760
+ {
761
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_AUTH_TYPE,
762
+ value: JSON.stringify(
763
+ this.config?.getContentGeneratorConfig()?.authType,
764
+ ),
765
+ },
766
+ {
767
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_GOOGLE_ACCOUNTS_COUNT,
768
+ value: `${totalAccounts}`,
769
+ },
770
+ {
771
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE,
772
+ value: surface,
773
+ },
774
+ {
775
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_VERSION,
776
+ value: CLI_VERSION,
777
+ },
778
+ {
779
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_GIT_COMMIT_HASH,
780
+ value: GIT_COMMIT_INFO,
781
+ },
782
+ {
783
+ gemini_cli_key: EventMetadataKey.GEMINI_CLI_PROMPT_ID,
784
+ value: this.promptId,
785
+ },
786
+ ];
787
+ return [...data, ...defaultLogMetadata];
788
+ }
789
+
790
+ getProxyAgent() {
791
+ const proxyUrl = this.config?.getProxy();
792
+ if (!proxyUrl) return undefined;
793
+ // undici which is widely used in the repo can only support http & https proxy protocol,
794
+ // https://github.com/nodejs/undici/issues/2224
795
+ if (proxyUrl.startsWith('http')) {
796
+ return new HttpsProxyAgent(proxyUrl);
797
+ } else {
798
+ throw new Error('Unsupported proxy type');
799
+ }
800
+ }
801
+
802
+ shutdown() {
803
+ this.logEndSessionEvent();
804
+ }
805
+
806
+ private requeueFailedEvents(eventsToSend: LogEventEntry[][]): void {
807
+ // Add the events back to the front of the queue to be retried, but limit retry queue size
808
+ const eventsToRetry = eventsToSend.slice(-MAX_RETRY_EVENTS); // Keep only the most recent events
809
+
810
+ // Log a warning if we're dropping events
811
+ if (eventsToSend.length > MAX_RETRY_EVENTS && this.config?.getDebugMode()) {
812
+ console.warn(
813
+ `ClearcutLogger: Dropping ${
814
+ eventsToSend.length - MAX_RETRY_EVENTS
815
+ } events due to retry queue limit. Total events: ${
816
+ eventsToSend.length
817
+ }, keeping: ${MAX_RETRY_EVENTS}`,
818
+ );
819
+ }
820
+
821
+ // Determine how many events can be re-queued
822
+ const availableSpace = MAX_EVENTS - this.events.size;
823
+ const numEventsToRequeue = Math.min(eventsToRetry.length, availableSpace);
824
+
825
+ if (numEventsToRequeue === 0) {
826
+ if (this.config?.getDebugMode()) {
827
+ console.debug(
828
+ `ClearcutLogger: No events re-queued (queue size: ${this.events.size})`,
829
+ );
830
+ }
831
+ return;
832
+ }
833
+
834
+ // Get the most recent events to re-queue
835
+ const eventsToRequeue = eventsToRetry.slice(
836
+ eventsToRetry.length - numEventsToRequeue,
837
+ );
838
+
839
+ // Prepend events to the front of the deque to be retried first.
840
+ // We iterate backwards to maintain the original order of the failed events.
841
+ for (let i = eventsToRequeue.length - 1; i >= 0; i--) {
842
+ this.events.unshift(eventsToRequeue[i]);
843
+ }
844
+ // Clear any potential overflow
845
+ while (this.events.size > MAX_EVENTS) {
846
+ this.events.pop();
847
+ }
848
+
849
+ if (this.config?.getDebugMode()) {
850
+ console.debug(
851
+ `ClearcutLogger: Re-queued ${numEventsToRequeue} events for retry (queue size: ${this.events.size})`,
852
+ );
853
+ }
854
+ }
855
+ }
856
+
857
+ export const TEST_ONLY = {
858
+ MAX_RETRY_EVENTS,
859
+ MAX_EVENTS,
860
+ };
projects/ui/qwen-code/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ // Defines valid event metadata keys for Qwen logging.
8
+ export enum EventMetadataKey {
9
+ GEMINI_CLI_KEY_UNKNOWN = 0,
10
+
11
+ // ==========================================================================
12
+ // Start Session Event Keys
13
+ // ===========================================================================
14
+
15
+ // Logs the model id used in the session.
16
+ GEMINI_CLI_START_SESSION_MODEL = 1,
17
+
18
+ // Logs the embedding model id used in the session.
19
+ GEMINI_CLI_START_SESSION_EMBEDDING_MODEL = 2,
20
+
21
+ // Logs the sandbox that was used in the session.
22
+ GEMINI_CLI_START_SESSION_SANDBOX = 3,
23
+
24
+ // Logs the core tools that were enabled in the session.
25
+ GEMINI_CLI_START_SESSION_CORE_TOOLS = 4,
26
+
27
+ // Logs the approval mode that was used in the session.
28
+ GEMINI_CLI_START_SESSION_APPROVAL_MODE = 5,
29
+
30
+ // Logs whether an API key was used in the session.
31
+ GEMINI_CLI_START_SESSION_API_KEY_ENABLED = 6,
32
+
33
+ // Logs whether the Vertex API was used in the session.
34
+ GEMINI_CLI_START_SESSION_VERTEX_API_ENABLED = 7,
35
+
36
+ // Logs whether debug mode was enabled in the session.
37
+ GEMINI_CLI_START_SESSION_DEBUG_MODE_ENABLED = 8,
38
+
39
+ // Logs the MCP servers that were enabled in the session.
40
+ GEMINI_CLI_START_SESSION_MCP_SERVERS = 9,
41
+
42
+ // Logs whether user-collected telemetry was enabled in the session.
43
+ GEMINI_CLI_START_SESSION_TELEMETRY_ENABLED = 10,
44
+
45
+ // Logs whether prompt collection was enabled for user-collected telemetry.
46
+ GEMINI_CLI_START_SESSION_TELEMETRY_LOG_USER_PROMPTS_ENABLED = 11,
47
+
48
+ // Logs whether the session was configured to respect gitignore files.
49
+ GEMINI_CLI_START_SESSION_RESPECT_GITIGNORE = 12,
50
+
51
+ // ==========================================================================
52
+ // User Prompt Event Keys
53
+ // ===========================================================================
54
+
55
+ // Logs the length of the prompt.
56
+ GEMINI_CLI_USER_PROMPT_LENGTH = 13,
57
+
58
+ // ==========================================================================
59
+ // Tool Call Event Keys
60
+ // ===========================================================================
61
+
62
+ // Logs the function name.
63
+ GEMINI_CLI_TOOL_CALL_NAME = 14,
64
+
65
+ // Logs the user's decision about how to handle the tool call.
66
+ GEMINI_CLI_TOOL_CALL_DECISION = 15,
67
+
68
+ // Logs whether the tool call succeeded.
69
+ GEMINI_CLI_TOOL_CALL_SUCCESS = 16,
70
+
71
+ // Logs the tool call duration in milliseconds.
72
+ GEMINI_CLI_TOOL_CALL_DURATION_MS = 17,
73
+
74
+ // Logs the tool call error message, if any.
75
+ GEMINI_CLI_TOOL_ERROR_MESSAGE = 18,
76
+
77
+ // Logs the tool call error type, if any.
78
+ GEMINI_CLI_TOOL_CALL_ERROR_TYPE = 19,
79
+
80
+ // ==========================================================================
81
+ // GenAI API Request Event Keys
82
+ // ===========================================================================
83
+
84
+ // Logs the model id of the request.
85
+ GEMINI_CLI_API_REQUEST_MODEL = 20,
86
+
87
+ // ==========================================================================
88
+ // GenAI API Response Event Keys
89
+ // ===========================================================================
90
+
91
+ // Logs the model id of the API call.
92
+ GEMINI_CLI_API_RESPONSE_MODEL = 21,
93
+
94
+ // Logs the status code of the response.
95
+ GEMINI_CLI_API_RESPONSE_STATUS_CODE = 22,
96
+
97
+ // Logs the duration of the API call in milliseconds.
98
+ GEMINI_CLI_API_RESPONSE_DURATION_MS = 23,
99
+
100
+ // Logs the error message of the API call, if any.
101
+ GEMINI_CLI_API_ERROR_MESSAGE = 24,
102
+
103
+ // Logs the input token count of the API call.
104
+ GEMINI_CLI_API_RESPONSE_INPUT_TOKEN_COUNT = 25,
105
+
106
+ // Logs the output token count of the API call.
107
+ GEMINI_CLI_API_RESPONSE_OUTPUT_TOKEN_COUNT = 26,
108
+
109
+ // Logs the cached token count of the API call.
110
+ GEMINI_CLI_API_RESPONSE_CACHED_TOKEN_COUNT = 27,
111
+
112
+ // Logs the thinking token count of the API call.
113
+ GEMINI_CLI_API_RESPONSE_THINKING_TOKEN_COUNT = 28,
114
+
115
+ // Logs the tool use token count of the API call.
116
+ GEMINI_CLI_API_RESPONSE_TOOL_TOKEN_COUNT = 29,
117
+
118
+ // ==========================================================================
119
+ // GenAI API Error Event Keys
120
+ // ===========================================================================
121
+
122
+ // Logs the model id of the API call.
123
+ GEMINI_CLI_API_ERROR_MODEL = 30,
124
+
125
+ // Logs the error type.
126
+ GEMINI_CLI_API_ERROR_TYPE = 31,
127
+
128
+ // Logs the status code of the error response.
129
+ GEMINI_CLI_API_ERROR_STATUS_CODE = 32,
130
+
131
+ // Logs the duration of the API call in milliseconds.
132
+ GEMINI_CLI_API_ERROR_DURATION_MS = 33,
133
+
134
+ // ==========================================================================
135
+ // End Session Event Keys
136
+ // ===========================================================================
137
+
138
+ // Logs the end of a session.
139
+ GEMINI_CLI_END_SESSION_ID = 34,
140
+
141
+ // ==========================================================================
142
+ // Shared Keys
143
+ // ===========================================================================
144
+
145
+ // Logs the Prompt Id
146
+ GEMINI_CLI_PROMPT_ID = 35,
147
+
148
+ // Logs the Auth type for the prompt, api responses and errors.
149
+ GEMINI_CLI_AUTH_TYPE = 36,
150
+
151
+ // Logs the total number of Google accounts ever used.
152
+ GEMINI_CLI_GOOGLE_ACCOUNTS_COUNT = 37,
153
+
154
+ // Logs the Surface from where the Gemini CLI was invoked, eg: VSCode.
155
+ GEMINI_CLI_SURFACE = 39,
156
+
157
+ // Logs the session id
158
+ GEMINI_CLI_SESSION_ID = 40,
159
+
160
+ // Logs the Gemini CLI version
161
+ GEMINI_CLI_VERSION = 54,
162
+
163
+ // Logs the Gemini CLI Git commit hash
164
+ GEMINI_CLI_GIT_COMMIT_HASH = 55,
165
+
166
+ // ==========================================================================
167
+ // Loop Detected Event Keys
168
+ // ===========================================================================
169
+
170
+ // Logs the type of loop detected.
171
+ GEMINI_CLI_LOOP_DETECTED_TYPE = 38,
172
+
173
+ // ==========================================================================
174
+ // Slash Command Event Keys
175
+ // ===========================================================================
176
+
177
+ // Logs the name of the slash command.
178
+ GEMINI_CLI_SLASH_COMMAND_NAME = 41,
179
+
180
+ // Logs the subcommand of the slash command.
181
+ GEMINI_CLI_SLASH_COMMAND_SUBCOMMAND = 42,
182
+
183
+ // Logs the status of the slash command (e.g. 'success', 'error')
184
+ GEMINI_CLI_SLASH_COMMAND_STATUS = 51,
185
+
186
+ // ==========================================================================
187
+ // Next Speaker Check Event Keys
188
+ // ===========================================================================
189
+
190
+ // Logs the finish reason of the previous streamGenerateContent response
191
+ GEMINI_CLI_RESPONSE_FINISH_REASON = 43,
192
+
193
+ // Logs the result of the next speaker check
194
+ GEMINI_CLI_NEXT_SPEAKER_CHECK_RESULT = 44,
195
+
196
+ // ==========================================================================
197
+ // Malformed JSON Response Event Keys
198
+ // ==========================================================================
199
+
200
+ // Logs the model that produced the malformed JSON response.
201
+ GEMINI_CLI_MALFORMED_JSON_RESPONSE_MODEL = 45,
202
+
203
+ // ==========================================================================
204
+ // IDE Connection Event Keys
205
+ // ===========================================================================
206
+
207
+ // Logs the type of the IDE connection.
208
+ GEMINI_CLI_IDE_CONNECTION_TYPE = 46,
209
+
210
+ // Logs AI added lines in edit/write tool response.
211
+ GEMINI_CLI_AI_ADDED_LINES = 47,
212
+
213
+ // Logs AI removed lines in edit/write tool response.
214
+ GEMINI_CLI_AI_REMOVED_LINES = 48,
215
+
216
+ // Logs user added lines in edit/write tool response.
217
+ GEMINI_CLI_USER_ADDED_LINES = 49,
218
+
219
+ // Logs user removed lines in edit/write tool response.
220
+ GEMINI_CLI_USER_REMOVED_LINES = 50,
221
+
222
+ // ==========================================================================
223
+ // Kitty Sequence Overflow Event Keys
224
+ // ===========================================================================
225
+
226
+ // Logs the truncated kitty sequence.
227
+ GEMINI_CLI_KITTY_TRUNCATED_SEQUENCE = 52,
228
+
229
+ // Logs the length of the kitty sequence that overflowed.
230
+ GEMINI_CLI_KITTY_SEQUENCE_LENGTH = 53,
231
+
232
+ // ==========================================================================
233
+ // Conversation Finished Event Keys
234
+ // ===========================================================================
235
+
236
+ // Logs the approval mode of the session.
237
+ GEMINI_CLI_APPROVAL_MODE = 58,
238
+
239
+ // Logs the number of turns
240
+ GEMINI_CLI_CONVERSATION_TURN_COUNT = 59,
241
+
242
+ // Logs the number of tokens before context window compression.
243
+ GEMINI_CLI_COMPRESSION_TOKENS_BEFORE = 60,
244
+
245
+ // Logs the number of tokens after context window compression.
246
+ GEMINI_CLI_COMPRESSION_TOKENS_AFTER = 61,
247
+
248
+ // Logs tool type whether it is mcp or native.
249
+ GEMINI_CLI_TOOL_TYPE = 62,
250
+ // Logs name of MCP tools as comma separated string
251
+ GEMINI_CLI_START_SESSION_MCP_TOOLS = 65,
252
+
253
+ // ==========================================================================
254
+ // Research Event Keys
255
+ // ===========================================================================
256
+
257
+ // Logs the research opt-in status (true/false)
258
+ GEMINI_CLI_RESEARCH_OPT_IN_STATUS = 66,
259
+
260
+ // Logs the contact email for research participation
261
+ GEMINI_CLI_RESEARCH_CONTACT_EMAIL = 67,
262
+
263
+ // Logs the user ID for research events
264
+ GEMINI_CLI_RESEARCH_USER_ID = 68,
265
+
266
+ // Logs the type of research feedback
267
+ GEMINI_CLI_RESEARCH_FEEDBACK_TYPE = 69,
268
+
269
+ // Logs the content of research feedback
270
+ GEMINI_CLI_RESEARCH_FEEDBACK_CONTENT = 70,
271
+
272
+ // Logs survey responses for research feedback (JSON stringified)
273
+ GEMINI_CLI_RESEARCH_SURVEY_RESPONSES = 71,
274
+
275
+ // ==========================================================================
276
+ // File Operation Event Keys
277
+ // ===========================================================================
278
+
279
+ // Logs the programming language of the project.
280
+ GEMINI_CLI_PROGRAMMING_LANGUAGE = 56,
281
+
282
+ // Logs the operation type of the file operation.
283
+ GEMINI_CLI_FILE_OPERATION_TYPE = 57,
284
+
285
+ // Logs the number of lines in the file operation.
286
+ GEMINI_CLI_FILE_OPERATION_LINES = 72,
287
+
288
+ // Logs the mimetype of the file in the file operation.
289
+ GEMINI_CLI_FILE_OPERATION_MIMETYPE = 73,
290
+
291
+ // Logs the extension of the file in the file operation.
292
+ GEMINI_CLI_FILE_OPERATION_EXTENSION = 74,
293
+
294
+ // ==========================================================================
295
+ // Content Streaming Event Keys
296
+ // ===========================================================================
297
+
298
+ // Logs the error message for an invalid chunk.
299
+ GEMINI_CLI_INVALID_CHUNK_ERROR_MESSAGE = 75,
300
+
301
+ // Logs the attempt number for a content retry.
302
+ GEMINI_CLI_CONTENT_RETRY_ATTEMPT_NUMBER = 76,
303
+
304
+ // Logs the error type for a content retry.
305
+ GEMINI_CLI_CONTENT_RETRY_ERROR_TYPE = 77,
306
+
307
+ // Logs the delay in milliseconds for a content retry.
308
+ GEMINI_CLI_CONTENT_RETRY_DELAY_MS = 78,
309
+
310
+ // Logs the total number of attempts for a content retry failure.
311
+ GEMINI_CLI_CONTENT_RETRY_FAILURE_TOTAL_ATTEMPTS = 79,
312
+
313
+ // Logs the final error type for a content retry failure.
314
+ GEMINI_CLI_CONTENT_RETRY_FAILURE_FINAL_ERROR_TYPE = 80,
315
+
316
+ // Logs the total duration in milliseconds for a content retry failure.
317
+ GEMINI_CLI_CONTENT_RETRY_FAILURE_TOTAL_DURATION_MS = 81,
318
+ }
projects/ui/qwen-code/packages/core/src/telemetry/qwen-logger/event-types.ts ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // RUM Protocol Data Structures
2
+ export interface RumApp {
3
+ id: string;
4
+ env: string;
5
+ version: string;
6
+ type: 'cli' | 'extension';
7
+ }
8
+
9
+ export interface RumUser {
10
+ id: string;
11
+ }
12
+
13
+ export interface RumSession {
14
+ id: string;
15
+ }
16
+
17
+ export interface RumView {
18
+ id: string;
19
+ name: string;
20
+ }
21
+
22
+ export interface RumEvent {
23
+ timestamp?: number;
24
+ event_type?: 'view' | 'action' | 'exception' | 'resource';
25
+ type: string; // Event type
26
+ name: string; // Event name
27
+ snapshots?: string; // JSON string of event snapshots
28
+ properties?: Record<string, unknown>;
29
+ // [key: string]: unknown;
30
+ }
31
+
32
+ export interface RumViewEvent extends RumEvent {
33
+ view_type?: string; // View rendering type
34
+ time_spent?: number; // Time spent on current view in ms
35
+ }
36
+
37
+ export interface RumActionEvent extends RumEvent {
38
+ target_name?: string; // Element user interacted with (for auto-collected actions only)
39
+ duration?: number; // Action duration in ms
40
+ method_info?: string; // Action callback, e.g.: onClick()
41
+ }
42
+
43
+ export interface RumExceptionEvent extends RumEvent {
44
+ source?: string; // Error source, e.g.: console, event
45
+ file?: string; // Error file
46
+ subtype?: string; // Secondary classification of error type
47
+ message?: string; // Concise, readable message explaining the event
48
+ stack?: string; // Stack trace or supplemental information about the error
49
+ caused_by?: string; // Exception cause
50
+ line?: number; // Line number where exception occurred
51
+ column?: number; // Column number where exception occurred
52
+ thread_id?: string; // Thread ID
53
+ binary_images?: string; // Error source
54
+ }
55
+
56
+ export interface RumResourceEvent extends RumEvent {
57
+ method?: string; // HTTP request method: POST, GET, etc.
58
+ status_code?: string; // Resource status code
59
+ message?: string; // Error message content, corresponds to resource.error_msg
60
+ url?: string; // Resource URL
61
+ provider_type?: string; // Resource provider type: first-party, cdn, ad, analytics
62
+ trace_id?: string; // Resource request TraceID
63
+ success?: number; // Resource loading success: 1 (default) success, 0 failure
64
+ duration?: number; // Total time spent loading resource in ms (responseEnd - redirectStart)
65
+ size?: number; // Resource size in bytes, corresponds to decodedBodySize
66
+ connect_duration?: number; // Time spent establishing connection to server in ms (connectEnd - connectStart)
67
+ ssl_duration?: number; // Time spent on TLS handshake in ms (connectEnd - secureConnectionStart), 0 if no SSL
68
+ dns_duration?: number; // Time spent resolving DNS name in ms (domainLookupEnd - domainLookupStart)
69
+ redirect_duration?: number; // Time spent on HTTP redirects in ms (redirectEnd - redirectStart)
70
+ first_byte_duration?: number; // Time waiting for first byte of response in ms (responseStart - requestStart)
71
+ download_duration?: number; // Time spent downloading response in ms (responseEnd - responseStart)
72
+ timing_data?: string; // JSON string of PerformanceResourceTiming
73
+ trace_data?: string; // Trace information snapshot JSON string
74
+ }
75
+
76
+ export interface RumPayload {
77
+ app: RumApp;
78
+ user: RumUser;
79
+ session: RumSession;
80
+ view: RumView;
81
+ events: RumEvent[];
82
+ properties?: Record<string, unknown>;
83
+ _v: string;
84
+ }
projects/ui/qwen-code/packages/core/src/telemetry/qwen-logger/qwen-logger.test.ts ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import {
8
+ describe,
9
+ it,
10
+ expect,
11
+ vi,
12
+ beforeEach,
13
+ afterEach,
14
+ afterAll,
15
+ } from 'vitest';
16
+ import { QwenLogger, TEST_ONLY } from './qwen-logger.js';
17
+ import { Config } from '../../config/config.js';
18
+ import {
19
+ StartSessionEvent,
20
+ EndSessionEvent,
21
+ IdeConnectionEvent,
22
+ KittySequenceOverflowEvent,
23
+ IdeConnectionType,
24
+ } from '../types.js';
25
+ import { RumEvent } from './event-types.js';
26
+
27
+ // Mock dependencies
28
+ vi.mock('../../utils/user_id.js', () => ({
29
+ getInstallationId: vi.fn(() => 'test-installation-id'),
30
+ }));
31
+
32
+ vi.mock('../../utils/safeJsonStringify.js', () => ({
33
+ safeJsonStringify: vi.fn((obj) => JSON.stringify(obj)),
34
+ }));
35
+
36
+ // Mock https module
37
+ vi.mock('https', () => ({
38
+ request: vi.fn(),
39
+ }));
40
+
41
+ const makeFakeConfig = (overrides: Partial<Config> = {}): Config => {
42
+ const defaults = {
43
+ getUsageStatisticsEnabled: () => true,
44
+ getDebugMode: () => false,
45
+ getSessionId: () => 'test-session-id',
46
+ getCliVersion: () => '1.0.0',
47
+ getProxy: () => undefined,
48
+ getContentGeneratorConfig: () => ({ authType: 'test-auth' }),
49
+ getMcpServers: () => ({}),
50
+ getModel: () => 'test-model',
51
+ getEmbeddingModel: () => 'test-embedding',
52
+ getSandbox: () => false,
53
+ getCoreTools: () => [],
54
+ getApprovalMode: () => 'auto',
55
+ getTelemetryEnabled: () => true,
56
+ getTelemetryLogPromptsEnabled: () => false,
57
+ getFileFilteringRespectGitIgnore: () => true,
58
+ ...overrides,
59
+ };
60
+ return defaults as Config;
61
+ };
62
+
63
+ describe('QwenLogger', () => {
64
+ let mockConfig: Config;
65
+
66
+ beforeEach(() => {
67
+ vi.useFakeTimers();
68
+ vi.setSystemTime(new Date('2025-01-01T12:00:00.000Z'));
69
+ mockConfig = makeFakeConfig();
70
+ // Clear singleton instance
71
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
72
+ (QwenLogger as any).instance = undefined;
73
+ });
74
+
75
+ afterEach(() => {
76
+ vi.useRealTimers();
77
+ vi.restoreAllMocks();
78
+ });
79
+
80
+ afterAll(() => {
81
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
82
+ (QwenLogger as any).instance = undefined;
83
+ });
84
+
85
+ describe('getInstance', () => {
86
+ it('returns undefined when usage statistics are disabled', () => {
87
+ const config = makeFakeConfig({ getUsageStatisticsEnabled: () => false });
88
+ const logger = QwenLogger.getInstance(config);
89
+ expect(logger).toBeUndefined();
90
+ });
91
+
92
+ it('returns an instance when usage statistics are enabled', () => {
93
+ const logger = QwenLogger.getInstance(mockConfig);
94
+ expect(logger).toBeInstanceOf(QwenLogger);
95
+ });
96
+
97
+ it('is a singleton', () => {
98
+ const logger1 = QwenLogger.getInstance(mockConfig);
99
+ const logger2 = QwenLogger.getInstance(mockConfig);
100
+ expect(logger1).toBe(logger2);
101
+ });
102
+ });
103
+
104
+ describe('event queue management', () => {
105
+ it('should handle event overflow gracefully', () => {
106
+ const debugConfig = makeFakeConfig({ getDebugMode: () => true });
107
+ const logger = QwenLogger.getInstance(debugConfig)!;
108
+ const consoleSpy = vi
109
+ .spyOn(console, 'debug')
110
+ .mockImplementation(() => {});
111
+
112
+ // Fill the queue beyond capacity
113
+ for (let i = 0; i < TEST_ONLY.MAX_EVENTS + 10; i++) {
114
+ logger.enqueueLogEvent({
115
+ timestamp: Date.now(),
116
+ event_type: 'action',
117
+ type: 'test',
118
+ name: `test-event-${i}`,
119
+ });
120
+ }
121
+
122
+ // Should have logged debug messages about dropping events
123
+ expect(consoleSpy).toHaveBeenCalledWith(
124
+ expect.stringContaining(
125
+ 'QwenLogger: Dropped old event to prevent memory leak',
126
+ ),
127
+ );
128
+ });
129
+
130
+ it('should handle enqueue errors gracefully', () => {
131
+ const debugConfig = makeFakeConfig({ getDebugMode: () => true });
132
+ const logger = QwenLogger.getInstance(debugConfig)!;
133
+ const consoleSpy = vi
134
+ .spyOn(console, 'error')
135
+ .mockImplementation(() => {});
136
+
137
+ // Mock the events deque to throw an error
138
+ const originalPush = logger['events'].push;
139
+ logger['events'].push = vi.fn(() => {
140
+ throw new Error('Test error');
141
+ });
142
+
143
+ logger.enqueueLogEvent({
144
+ timestamp: Date.now(),
145
+ event_type: 'action',
146
+ type: 'test',
147
+ name: 'test-event',
148
+ });
149
+
150
+ expect(consoleSpy).toHaveBeenCalledWith(
151
+ 'QwenLogger: Failed to enqueue log event.',
152
+ expect.any(Error),
153
+ );
154
+
155
+ // Restore original method
156
+ logger['events'].push = originalPush;
157
+ });
158
+ });
159
+
160
+ describe('concurrent flush protection', () => {
161
+ it('should handle concurrent flush requests', () => {
162
+ const debugConfig = makeFakeConfig({ getDebugMode: () => true });
163
+ const logger = QwenLogger.getInstance(debugConfig)!;
164
+ const consoleSpy = vi
165
+ .spyOn(console, 'debug')
166
+ .mockImplementation(() => {});
167
+
168
+ // Manually set the flush in progress flag to simulate concurrent access
169
+ logger['isFlushInProgress'] = true;
170
+
171
+ // Try to flush while another flush is in progress
172
+ const result = logger.flushToRum();
173
+
174
+ // Should have logged about pending flush
175
+ expect(consoleSpy).toHaveBeenCalledWith(
176
+ expect.stringContaining(
177
+ 'QwenLogger: Flush already in progress, marking pending flush',
178
+ ),
179
+ );
180
+
181
+ // Should return a resolved promise
182
+ expect(result).toBeInstanceOf(Promise);
183
+
184
+ // Reset the flag
185
+ logger['isFlushInProgress'] = false;
186
+ });
187
+ });
188
+
189
+ describe('failed event retry mechanism', () => {
190
+ it('should requeue failed events with size limits', () => {
191
+ const debugConfig = makeFakeConfig({ getDebugMode: () => true });
192
+ const logger = QwenLogger.getInstance(debugConfig)!;
193
+ const consoleSpy = vi
194
+ .spyOn(console, 'debug')
195
+ .mockImplementation(() => {});
196
+
197
+ const failedEvents: RumEvent[] = [];
198
+ for (let i = 0; i < TEST_ONLY.MAX_RETRY_EVENTS + 50; i++) {
199
+ failedEvents.push({
200
+ timestamp: Date.now(),
201
+ event_type: 'action',
202
+ type: 'test',
203
+ name: `failed-event-${i}`,
204
+ });
205
+ }
206
+
207
+ // Call the private method using bracket notation
208
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
209
+ (logger as any).requeueFailedEvents(failedEvents);
210
+
211
+ // Should have logged about dropping events due to retry limit
212
+ expect(consoleSpy).toHaveBeenCalledWith(
213
+ expect.stringContaining('QwenLogger: Re-queued'),
214
+ );
215
+ });
216
+
217
+ it('should handle empty retry queue gracefully', () => {
218
+ const debugConfig = makeFakeConfig({ getDebugMode: () => true });
219
+ const logger = QwenLogger.getInstance(debugConfig)!;
220
+ const consoleSpy = vi
221
+ .spyOn(console, 'debug')
222
+ .mockImplementation(() => {});
223
+
224
+ // Fill the queue to capacity first
225
+ for (let i = 0; i < TEST_ONLY.MAX_EVENTS; i++) {
226
+ logger.enqueueLogEvent({
227
+ timestamp: Date.now(),
228
+ event_type: 'action',
229
+ type: 'test',
230
+ name: `event-${i}`,
231
+ });
232
+ }
233
+
234
+ // Try to requeue when no space is available
235
+ const failedEvents: RumEvent[] = [
236
+ {
237
+ timestamp: Date.now(),
238
+ event_type: 'action',
239
+ type: 'test',
240
+ name: 'failed-event',
241
+ },
242
+ ];
243
+
244
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
245
+ (logger as any).requeueFailedEvents(failedEvents);
246
+
247
+ expect(consoleSpy).toHaveBeenCalledWith(
248
+ expect.stringContaining('QwenLogger: No events re-queued'),
249
+ );
250
+ });
251
+ });
252
+
253
+ describe('event handlers', () => {
254
+ it('should log IDE connection events', () => {
255
+ const logger = QwenLogger.getInstance(mockConfig)!;
256
+ const enqueueSpy = vi.spyOn(logger, 'enqueueLogEvent');
257
+
258
+ const event = new IdeConnectionEvent(IdeConnectionType.SESSION);
259
+
260
+ logger.logIdeConnectionEvent(event);
261
+
262
+ expect(enqueueSpy).toHaveBeenCalledWith(
263
+ expect.objectContaining({
264
+ event_type: 'action',
265
+ type: 'connection',
266
+ name: 'ide_connection',
267
+ snapshots: JSON.stringify({
268
+ connection_type: IdeConnectionType.SESSION,
269
+ }),
270
+ }),
271
+ );
272
+ });
273
+
274
+ it('should log Kitty sequence overflow events', () => {
275
+ const logger = QwenLogger.getInstance(mockConfig)!;
276
+ const enqueueSpy = vi.spyOn(logger, 'enqueueLogEvent');
277
+
278
+ const event = new KittySequenceOverflowEvent(1024, 'truncated...');
279
+
280
+ logger.logKittySequenceOverflowEvent(event);
281
+
282
+ expect(enqueueSpy).toHaveBeenCalledWith(
283
+ expect.objectContaining({
284
+ event_type: 'exception',
285
+ type: 'overflow',
286
+ name: 'kitty_sequence_overflow',
287
+ subtype: 'kitty_sequence_overflow',
288
+ snapshots: JSON.stringify({
289
+ sequence_length: 1024,
290
+ truncated_sequence: 'truncated...',
291
+ }),
292
+ }),
293
+ );
294
+ });
295
+
296
+ it('should flush start session events immediately', async () => {
297
+ const logger = QwenLogger.getInstance(mockConfig)!;
298
+ const flushSpy = vi.spyOn(logger, 'flushToRum').mockResolvedValue({});
299
+
300
+ const testConfig = makeFakeConfig({
301
+ getModel: () => 'test-model',
302
+ getEmbeddingModel: () => 'test-embedding',
303
+ });
304
+ const event = new StartSessionEvent(testConfig);
305
+
306
+ logger.logStartSessionEvent(event);
307
+
308
+ expect(flushSpy).toHaveBeenCalled();
309
+ });
310
+
311
+ it('should flush end session events immediately', async () => {
312
+ const logger = QwenLogger.getInstance(mockConfig)!;
313
+ const flushSpy = vi.spyOn(logger, 'flushToRum').mockResolvedValue({});
314
+
315
+ const event = new EndSessionEvent(mockConfig);
316
+
317
+ logger.logEndSessionEvent(event);
318
+
319
+ expect(flushSpy).toHaveBeenCalled();
320
+ });
321
+ });
322
+
323
+ describe('flush timing', () => {
324
+ it('should not flush if interval has not passed', () => {
325
+ const logger = QwenLogger.getInstance(mockConfig)!;
326
+ const flushSpy = vi.spyOn(logger, 'flushToRum');
327
+
328
+ // Add an event and try to flush immediately
329
+ logger.enqueueLogEvent({
330
+ timestamp: Date.now(),
331
+ event_type: 'action',
332
+ type: 'test',
333
+ name: 'test-event',
334
+ });
335
+
336
+ logger.flushIfNeeded();
337
+
338
+ expect(flushSpy).not.toHaveBeenCalled();
339
+ });
340
+
341
+ it('should flush when interval has passed', () => {
342
+ const logger = QwenLogger.getInstance(mockConfig)!;
343
+ const flushSpy = vi.spyOn(logger, 'flushToRum').mockResolvedValue({});
344
+
345
+ // Add an event
346
+ logger.enqueueLogEvent({
347
+ timestamp: Date.now(),
348
+ event_type: 'action',
349
+ type: 'test',
350
+ name: 'test-event',
351
+ });
352
+
353
+ // Advance time beyond flush interval
354
+ vi.advanceTimersByTime(TEST_ONLY.FLUSH_INTERVAL_MS + 1000);
355
+
356
+ logger.flushIfNeeded();
357
+
358
+ expect(flushSpy).toHaveBeenCalled();
359
+ });
360
+ });
361
+
362
+ describe('error handling', () => {
363
+ it('should handle flush errors gracefully with debug mode', async () => {
364
+ const debugConfig = makeFakeConfig({ getDebugMode: () => true });
365
+ const logger = QwenLogger.getInstance(debugConfig)!;
366
+ const consoleSpy = vi
367
+ .spyOn(console, 'debug')
368
+ .mockImplementation(() => {});
369
+
370
+ // Add an event first
371
+ logger.enqueueLogEvent({
372
+ timestamp: Date.now(),
373
+ event_type: 'action',
374
+ type: 'test',
375
+ name: 'test-event',
376
+ });
377
+
378
+ // Mock flushToRum to throw an error
379
+ const originalFlush = logger.flushToRum.bind(logger);
380
+ logger.flushToRum = vi.fn().mockRejectedValue(new Error('Network error'));
381
+
382
+ // Advance time to trigger flush
383
+ vi.advanceTimersByTime(TEST_ONLY.FLUSH_INTERVAL_MS + 1000);
384
+
385
+ logger.flushIfNeeded();
386
+
387
+ // Wait for async operations
388
+ await vi.runAllTimersAsync();
389
+
390
+ expect(consoleSpy).toHaveBeenCalledWith(
391
+ 'Error flushing to RUM:',
392
+ expect.any(Error),
393
+ );
394
+
395
+ // Restore original method
396
+ logger.flushToRum = originalFlush;
397
+ });
398
+ });
399
+
400
+ describe('constants export', () => {
401
+ it('should export test constants', () => {
402
+ expect(TEST_ONLY.MAX_EVENTS).toBe(1000);
403
+ expect(TEST_ONLY.MAX_RETRY_EVENTS).toBe(100);
404
+ expect(TEST_ONLY.FLUSH_INTERVAL_MS).toBe(60000);
405
+ });
406
+ });
407
+ });
projects/ui/qwen-code/packages/core/src/telemetry/qwen-logger/qwen-logger.ts ADDED
@@ -0,0 +1,718 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { Buffer } from 'buffer';
8
+ import * as https from 'https';
9
+ import { HttpsProxyAgent } from 'https-proxy-agent';
10
+
11
+ import {
12
+ StartSessionEvent,
13
+ EndSessionEvent,
14
+ UserPromptEvent,
15
+ ToolCallEvent,
16
+ ApiRequestEvent,
17
+ ApiResponseEvent,
18
+ ApiErrorEvent,
19
+ FlashFallbackEvent,
20
+ LoopDetectedEvent,
21
+ NextSpeakerCheckEvent,
22
+ SlashCommandEvent,
23
+ MalformedJsonResponseEvent,
24
+ IdeConnectionEvent,
25
+ KittySequenceOverflowEvent,
26
+ ChatCompressionEvent,
27
+ InvalidChunkEvent,
28
+ ContentRetryEvent,
29
+ ContentRetryFailureEvent,
30
+ } from '../types.js';
31
+ import {
32
+ RumEvent,
33
+ RumViewEvent,
34
+ RumActionEvent,
35
+ RumResourceEvent,
36
+ RumExceptionEvent,
37
+ RumPayload,
38
+ } from './event-types.js';
39
+ import { Config } from '../../config/config.js';
40
+ import { safeJsonStringify } from '../../utils/safeJsonStringify.js';
41
+ import { HttpError, retryWithBackoff } from '../../utils/retry.js';
42
+ import { getInstallationId } from '../../utils/user_id.js';
43
+ import { FixedDeque } from 'mnemonist';
44
+ import { AuthType } from '../../core/contentGenerator.js';
45
+
46
+ // Usage statistics collection endpoint
47
+ const USAGE_STATS_HOSTNAME = 'gb4w8c3ygj-default-sea.rum.aliyuncs.com';
48
+ const USAGE_STATS_PATH = '/';
49
+
50
+ const RUN_APP_ID = 'gb4w8c3ygj@851d5d500f08f92';
51
+
52
+ /**
53
+ * Interval in which buffered events are sent to RUM.
54
+ */
55
+ const FLUSH_INTERVAL_MS = 1000 * 60;
56
+
57
+ /**
58
+ * Maximum amount of events to keep in memory. Events added after this amount
59
+ * are dropped until the next flush to RUM, which happens periodically as
60
+ * defined by {@link FLUSH_INTERVAL_MS}.
61
+ */
62
+ const MAX_EVENTS = 1000;
63
+
64
+ /**
65
+ * Maximum events to retry after a failed RUM flush
66
+ */
67
+ const MAX_RETRY_EVENTS = 100;
68
+
69
+ export interface LogResponse {
70
+ nextRequestWaitMs?: number;
71
+ }
72
+
73
+ // Singleton class for batch posting log events to RUM. When a new event comes in, the elapsed time
74
+ // is checked and events are flushed to RUM if at least a minute has passed since the last flush.
75
+ export class QwenLogger {
76
+ private static instance: QwenLogger;
77
+ private config?: Config;
78
+
79
+ /**
80
+ * Queue of pending events that need to be flushed to the server. New events
81
+ * are added to this queue and then flushed on demand (via `flushToRum`)
82
+ */
83
+ private readonly events: FixedDeque<RumEvent>;
84
+
85
+ /**
86
+ * The last time that the events were successfully flushed to the server.
87
+ */
88
+ private lastFlushTime: number = Date.now();
89
+
90
+ private userId: string;
91
+ private sessionId: string;
92
+
93
+ /**
94
+ * The value is true when there is a pending flush happening. This prevents
95
+ * concurrent flush operations.
96
+ */
97
+ private isFlushInProgress: boolean = false;
98
+
99
+ /**
100
+ * This value is true when a flush was requested during an ongoing flush.
101
+ */
102
+ private pendingFlush: boolean = false;
103
+
104
+ private isShutdown: boolean = false;
105
+
106
+ private constructor(config?: Config) {
107
+ this.config = config;
108
+ this.events = new FixedDeque<RumEvent>(Array, MAX_EVENTS);
109
+ this.userId = this.generateUserId();
110
+ this.sessionId =
111
+ typeof this.config?.getSessionId === 'function'
112
+ ? this.config.getSessionId()
113
+ : '';
114
+ }
115
+
116
+ private generateUserId(): string {
117
+ // Use installation ID as user ID for consistency
118
+ return `user-${getInstallationId()}`;
119
+ }
120
+
121
+ static getInstance(config?: Config): QwenLogger | undefined {
122
+ if (config === undefined || !config?.getUsageStatisticsEnabled())
123
+ return undefined;
124
+ if (!QwenLogger.instance) {
125
+ QwenLogger.instance = new QwenLogger(config);
126
+ process.on(
127
+ 'exit',
128
+ QwenLogger.instance.shutdown.bind(QwenLogger.instance),
129
+ );
130
+ }
131
+
132
+ return QwenLogger.instance;
133
+ }
134
+
135
+ enqueueLogEvent(event: RumEvent): void {
136
+ try {
137
+ // Manually handle overflow for FixedDeque, which throws when full.
138
+ const wasAtCapacity = this.events.size >= MAX_EVENTS;
139
+
140
+ if (wasAtCapacity) {
141
+ this.events.shift(); // Evict oldest element to make space.
142
+ }
143
+
144
+ this.events.push(event);
145
+
146
+ if (wasAtCapacity && this.config?.getDebugMode()) {
147
+ console.debug(
148
+ `QwenLogger: Dropped old event to prevent memory leak (queue size: ${this.events.size})`,
149
+ );
150
+ }
151
+ } catch (error) {
152
+ if (this.config?.getDebugMode()) {
153
+ console.error('QwenLogger: Failed to enqueue log event.', error);
154
+ }
155
+ }
156
+ }
157
+
158
+ createRumEvent(
159
+ eventType: 'view' | 'action' | 'exception' | 'resource',
160
+ type: string,
161
+ name: string,
162
+ properties: Partial<RumEvent>,
163
+ ): RumEvent {
164
+ return {
165
+ timestamp: Date.now(),
166
+ event_type: eventType,
167
+ type,
168
+ name,
169
+ ...(properties || {}),
170
+ };
171
+ }
172
+
173
+ createViewEvent(
174
+ type: string,
175
+ name: string,
176
+ properties: Partial<RumViewEvent>,
177
+ ): RumEvent {
178
+ return this.createRumEvent('view', type, name, properties);
179
+ }
180
+
181
+ createActionEvent(
182
+ type: string,
183
+ name: string,
184
+ properties: Partial<RumActionEvent>,
185
+ ): RumEvent {
186
+ return this.createRumEvent('action', type, name, properties);
187
+ }
188
+
189
+ createResourceEvent(
190
+ type: string,
191
+ name: string,
192
+ properties: Partial<RumResourceEvent>,
193
+ ): RumEvent {
194
+ return this.createRumEvent('resource', type, name, properties);
195
+ }
196
+
197
+ createExceptionEvent(
198
+ type: string,
199
+ name: string,
200
+ properties: Partial<RumExceptionEvent>,
201
+ ): RumEvent {
202
+ return this.createRumEvent('exception', type, name, properties);
203
+ }
204
+
205
+ async createRumPayload(): Promise<RumPayload> {
206
+ const authType = this.config?.getAuthType();
207
+ const version = this.config?.getCliVersion() || 'unknown';
208
+
209
+ return {
210
+ app: {
211
+ id: RUN_APP_ID,
212
+ env: process.env['DEBUG'] ? 'dev' : 'prod',
213
+ version: version || 'unknown',
214
+ type: 'cli',
215
+ },
216
+ user: {
217
+ id: this.userId,
218
+ },
219
+ session: {
220
+ id: this.sessionId,
221
+ },
222
+ view: {
223
+ id: this.sessionId,
224
+ name: 'qwen-code-cli',
225
+ },
226
+
227
+ events: this.events.toArray() as RumEvent[],
228
+ properties: {
229
+ auth_type: authType,
230
+ model: this.config?.getModel(),
231
+ base_url:
232
+ authType === AuthType.USE_OPENAI
233
+ ? process.env['OPENAI_BASE_URL']
234
+ : '',
235
+ },
236
+ _v: `qwen-code@${version}`,
237
+ };
238
+ }
239
+
240
+ flushIfNeeded(): void {
241
+ if (Date.now() - this.lastFlushTime < FLUSH_INTERVAL_MS) {
242
+ return;
243
+ }
244
+
245
+ this.flushToRum().catch((error) => {
246
+ if (this.config?.getDebugMode()) {
247
+ console.debug('Error flushing to RUM:', error);
248
+ }
249
+ });
250
+ }
251
+
252
+ async flushToRum(): Promise<LogResponse> {
253
+ if (this.isFlushInProgress) {
254
+ if (this.config?.getDebugMode()) {
255
+ console.debug(
256
+ 'QwenLogger: Flush already in progress, marking pending flush.',
257
+ );
258
+ }
259
+ this.pendingFlush = true;
260
+ return Promise.resolve({});
261
+ }
262
+ this.isFlushInProgress = true;
263
+
264
+ if (this.config?.getDebugMode()) {
265
+ console.log('Flushing log events to RUM.');
266
+ }
267
+ if (this.events.size === 0) {
268
+ this.isFlushInProgress = false;
269
+ return {};
270
+ }
271
+
272
+ const eventsToSend = this.events.toArray() as RumEvent[];
273
+ this.events.clear();
274
+
275
+ const rumPayload = await this.createRumPayload();
276
+ // Override events with the ones we're sending
277
+ rumPayload.events = eventsToSend;
278
+ const flushFn = () =>
279
+ new Promise<Buffer>((resolve, reject) => {
280
+ const body = safeJsonStringify(rumPayload);
281
+ const options = {
282
+ hostname: USAGE_STATS_HOSTNAME,
283
+ path: USAGE_STATS_PATH,
284
+ method: 'POST',
285
+ headers: {
286
+ 'Content-Length': Buffer.byteLength(body),
287
+ 'Content-Type': 'text/plain;charset=UTF-8',
288
+ },
289
+ };
290
+ const bufs: Buffer[] = [];
291
+ const req = https.request(
292
+ {
293
+ ...options,
294
+ agent: this.getProxyAgent(),
295
+ },
296
+ (res) => {
297
+ if (
298
+ res.statusCode &&
299
+ (res.statusCode < 200 || res.statusCode >= 300)
300
+ ) {
301
+ const err: HttpError = new Error(
302
+ `Request failed with status ${res.statusCode}`,
303
+ );
304
+ err.status = res.statusCode;
305
+ res.resume();
306
+ return reject(err);
307
+ }
308
+ res.on('data', (buf) => bufs.push(buf));
309
+ res.on('end', () => resolve(Buffer.concat(bufs)));
310
+ },
311
+ );
312
+ req.on('error', reject);
313
+ req.end(body);
314
+ });
315
+
316
+ try {
317
+ await retryWithBackoff(flushFn, {
318
+ maxAttempts: 3,
319
+ initialDelayMs: 200,
320
+ shouldRetry: (err: unknown) => {
321
+ if (!(err instanceof Error)) return false;
322
+ const status = (err as HttpError).status as number | undefined;
323
+ // If status is not available, it's likely a network error
324
+ if (status === undefined) return true;
325
+
326
+ // Retry on 429 (Too many Requests) and 5xx server errors.
327
+ return status === 429 || (status >= 500 && status < 600);
328
+ },
329
+ });
330
+
331
+ this.lastFlushTime = Date.now();
332
+ return {};
333
+ } catch (error) {
334
+ if (this.config?.getDebugMode()) {
335
+ console.error('RUM flush failed after multiple retries.', error);
336
+ }
337
+
338
+ // Re-queue failed events for retry
339
+ this.requeueFailedEvents(eventsToSend);
340
+ return {};
341
+ } finally {
342
+ this.isFlushInProgress = false;
343
+
344
+ // If a flush was requested while we were flushing, flush again
345
+ if (this.pendingFlush) {
346
+ this.pendingFlush = false;
347
+ // Fire and forget the pending flush
348
+ this.flushToRum().catch((error) => {
349
+ if (this.config?.getDebugMode()) {
350
+ console.debug('Error in pending flush to RUM:', error);
351
+ }
352
+ });
353
+ }
354
+ }
355
+ }
356
+
357
+ logStartSessionEvent(event: StartSessionEvent): void {
358
+ const applicationEvent = this.createViewEvent('session', 'session_start', {
359
+ properties: {
360
+ model: event.model,
361
+ },
362
+ snapshots: JSON.stringify({
363
+ embedding_model: event.embedding_model,
364
+ sandbox_enabled: event.sandbox_enabled,
365
+ core_tools_enabled: event.core_tools_enabled,
366
+ approval_mode: event.approval_mode,
367
+ api_key_enabled: event.api_key_enabled,
368
+ vertex_ai_enabled: event.vertex_ai_enabled,
369
+ debug_enabled: event.debug_enabled,
370
+ mcp_servers: event.mcp_servers,
371
+ telemetry_enabled: event.telemetry_enabled,
372
+ telemetry_log_user_prompts_enabled:
373
+ event.telemetry_log_user_prompts_enabled,
374
+ }),
375
+ });
376
+
377
+ // Flush start event immediately
378
+ this.enqueueLogEvent(applicationEvent);
379
+ this.flushToRum().catch((error: unknown) => {
380
+ if (this.config?.getDebugMode()) {
381
+ console.debug('Error flushing to RUM:', error);
382
+ }
383
+ });
384
+ }
385
+
386
+ logNewPromptEvent(event: UserPromptEvent): void {
387
+ const rumEvent = this.createActionEvent('user_prompt', 'user_prompt', {
388
+ properties: {
389
+ auth_type: event.auth_type,
390
+ prompt_id: event.prompt_id,
391
+ },
392
+ snapshots: JSON.stringify({
393
+ prompt_length: event.prompt_length,
394
+ }),
395
+ });
396
+
397
+ this.enqueueLogEvent(rumEvent);
398
+ this.flushIfNeeded();
399
+ }
400
+
401
+ logToolCallEvent(event: ToolCallEvent): void {
402
+ const rumEvent = this.createActionEvent(
403
+ 'tool_call',
404
+ `tool_call#${event.function_name}`,
405
+ {
406
+ properties: {
407
+ prompt_id: event.prompt_id,
408
+ },
409
+ snapshots: JSON.stringify({
410
+ function_name: event.function_name,
411
+ decision: event.decision,
412
+ success: event.success,
413
+ duration_ms: event.duration_ms,
414
+ error: event.error,
415
+ error_type: event.error_type,
416
+ }),
417
+ },
418
+ );
419
+
420
+ this.enqueueLogEvent(rumEvent);
421
+ this.flushIfNeeded();
422
+ }
423
+
424
+ logApiRequestEvent(event: ApiRequestEvent): void {
425
+ const rumEvent = this.createResourceEvent('api', 'api_request', {
426
+ properties: {
427
+ model: event.model,
428
+ prompt_id: event.prompt_id,
429
+ },
430
+ });
431
+
432
+ this.enqueueLogEvent(rumEvent);
433
+ this.flushIfNeeded();
434
+ }
435
+
436
+ logApiResponseEvent(event: ApiResponseEvent): void {
437
+ const rumEvent = this.createResourceEvent('api', 'api_response', {
438
+ status_code: event.status_code?.toString() ?? '',
439
+ duration: event.duration_ms,
440
+ success: 1,
441
+ message: event.error,
442
+ trace_id: event.response_id,
443
+ properties: {
444
+ auth_type: event.auth_type,
445
+ model: event.model,
446
+ prompt_id: event.prompt_id,
447
+ },
448
+ snapshots: JSON.stringify({
449
+ input_token_count: event.input_token_count,
450
+ output_token_count: event.output_token_count,
451
+ cached_content_token_count: event.cached_content_token_count,
452
+ thoughts_token_count: event.thoughts_token_count,
453
+ tool_token_count: event.tool_token_count,
454
+ }),
455
+ });
456
+
457
+ this.enqueueLogEvent(rumEvent);
458
+ this.flushIfNeeded();
459
+ }
460
+
461
+ logApiErrorEvent(event: ApiErrorEvent): void {
462
+ const rumEvent = this.createResourceEvent('api', 'api_error', {
463
+ status_code: event.status_code?.toString() ?? '',
464
+ duration: event.duration_ms,
465
+ success: 0,
466
+ message: event.error,
467
+ trace_id: event.response_id,
468
+ properties: {
469
+ auth_type: event.auth_type,
470
+ model: event.model,
471
+ prompt_id: event.prompt_id,
472
+ },
473
+ snapshots: JSON.stringify({
474
+ error_type: event.error_type,
475
+ }),
476
+ });
477
+
478
+ this.enqueueLogEvent(rumEvent);
479
+ this.flushIfNeeded();
480
+ }
481
+
482
+ logFlashFallbackEvent(event: FlashFallbackEvent): void {
483
+ const rumEvent = this.createActionEvent('fallback', 'flash_fallback', {
484
+ properties: {
485
+ auth_type: event.auth_type,
486
+ },
487
+ });
488
+
489
+ this.enqueueLogEvent(rumEvent);
490
+ this.flushIfNeeded();
491
+ }
492
+
493
+ logLoopDetectedEvent(event: LoopDetectedEvent): void {
494
+ const rumEvent = this.createExceptionEvent('error', 'loop_detected', {
495
+ subtype: 'loop_detected',
496
+ properties: {
497
+ prompt_id: event.prompt_id,
498
+ },
499
+ snapshots: JSON.stringify({
500
+ loop_type: event.loop_type,
501
+ }),
502
+ });
503
+
504
+ this.enqueueLogEvent(rumEvent);
505
+ this.flushIfNeeded();
506
+ }
507
+
508
+ logNextSpeakerCheck(event: NextSpeakerCheckEvent): void {
509
+ const rumEvent = this.createActionEvent('check', 'next_speaker_check', {
510
+ properties: {
511
+ prompt_id: event.prompt_id,
512
+ },
513
+ snapshots: JSON.stringify({
514
+ finish_reason: event.finish_reason,
515
+ result: event.result,
516
+ }),
517
+ });
518
+
519
+ this.enqueueLogEvent(rumEvent);
520
+ this.flushIfNeeded();
521
+ }
522
+
523
+ logSlashCommandEvent(event: SlashCommandEvent): void {
524
+ const rumEvent = this.createActionEvent('command', 'slash_command', {
525
+ snapshots: JSON.stringify({
526
+ command: event.command,
527
+ subcommand: event.subcommand,
528
+ }),
529
+ });
530
+
531
+ this.enqueueLogEvent(rumEvent);
532
+ this.flushIfNeeded();
533
+ }
534
+
535
+ logMalformedJsonResponseEvent(event: MalformedJsonResponseEvent): void {
536
+ const rumEvent = this.createExceptionEvent(
537
+ 'error',
538
+ 'malformed_json_response',
539
+ {
540
+ subtype: 'malformed_json_response',
541
+ properties: {
542
+ model: event.model,
543
+ },
544
+ },
545
+ );
546
+
547
+ this.enqueueLogEvent(rumEvent);
548
+ this.flushIfNeeded();
549
+ }
550
+
551
+ logIdeConnectionEvent(event: IdeConnectionEvent): void {
552
+ const rumEvent = this.createActionEvent('connection', 'ide_connection', {
553
+ snapshots: JSON.stringify({ connection_type: event.connection_type }),
554
+ });
555
+
556
+ this.enqueueLogEvent(rumEvent);
557
+ this.flushIfNeeded();
558
+ }
559
+
560
+ logKittySequenceOverflowEvent(event: KittySequenceOverflowEvent): void {
561
+ const rumEvent = this.createExceptionEvent(
562
+ 'overflow',
563
+ 'kitty_sequence_overflow',
564
+ {
565
+ subtype: 'kitty_sequence_overflow',
566
+ snapshots: JSON.stringify({
567
+ sequence_length: event.sequence_length,
568
+ truncated_sequence: event.truncated_sequence,
569
+ }),
570
+ },
571
+ );
572
+
573
+ this.enqueueLogEvent(rumEvent);
574
+ this.flushIfNeeded();
575
+ }
576
+
577
+ logChatCompressionEvent(event: ChatCompressionEvent): void {
578
+ const rumEvent = this.createActionEvent('compression', 'chat_compression', {
579
+ snapshots: JSON.stringify({
580
+ tokens_before: event.tokens_before,
581
+ tokens_after: event.tokens_after,
582
+ }),
583
+ });
584
+
585
+ this.enqueueLogEvent(rumEvent);
586
+ this.flushIfNeeded();
587
+ }
588
+
589
+ logInvalidChunkEvent(event: InvalidChunkEvent): void {
590
+ const rumEvent = this.createExceptionEvent('error', 'invalid_chunk', {
591
+ subtype: 'invalid_chunk',
592
+ message: event.error_message,
593
+ });
594
+
595
+ this.enqueueLogEvent(rumEvent);
596
+ this.flushIfNeeded();
597
+ }
598
+
599
+ logContentRetryEvent(event: ContentRetryEvent): void {
600
+ const rumEvent = this.createActionEvent('retry', 'content_retry', {
601
+ snapshots: JSON.stringify({
602
+ attempt_number: event.attempt_number,
603
+ error_type: event.error_type,
604
+ retry_delay_ms: event.retry_delay_ms,
605
+ }),
606
+ });
607
+
608
+ this.enqueueLogEvent(rumEvent);
609
+ this.flushIfNeeded();
610
+ }
611
+
612
+ logContentRetryFailureEvent(event: ContentRetryFailureEvent): void {
613
+ const rumEvent = this.createExceptionEvent(
614
+ 'error',
615
+ 'content_retry_failure',
616
+ {
617
+ subtype: 'content_retry_failure',
618
+ message: `Content retry failed after ${event.total_attempts} attempts`,
619
+ snapshots: JSON.stringify({
620
+ total_attempts: event.total_attempts,
621
+ final_error_type: event.final_error_type,
622
+ total_duration_ms: event.total_duration_ms,
623
+ }),
624
+ },
625
+ );
626
+
627
+ this.enqueueLogEvent(rumEvent);
628
+ this.flushIfNeeded();
629
+ }
630
+
631
+ logEndSessionEvent(_event: EndSessionEvent): void {
632
+ const applicationEvent = this.createViewEvent('session', 'session_end', {});
633
+
634
+ // Flush immediately on session end.
635
+ this.enqueueLogEvent(applicationEvent);
636
+ this.flushToRum().catch((error: unknown) => {
637
+ if (this.config?.getDebugMode()) {
638
+ console.debug('Error flushing to RUM:', error);
639
+ }
640
+ });
641
+ }
642
+
643
+ getProxyAgent() {
644
+ const proxyUrl = this.config?.getProxy();
645
+ if (!proxyUrl) return undefined;
646
+ // undici which is widely used in the repo can only support http & https proxy protocol,
647
+ // https://github.com/nodejs/undici/issues/2224
648
+ if (proxyUrl.startsWith('http')) {
649
+ return new HttpsProxyAgent(proxyUrl);
650
+ } else {
651
+ throw new Error('Unsupported proxy type');
652
+ }
653
+ }
654
+
655
+ shutdown() {
656
+ if (this.isShutdown) return;
657
+
658
+ this.isShutdown = true;
659
+ const event = new EndSessionEvent(this.config);
660
+ this.logEndSessionEvent(event);
661
+ }
662
+
663
+ private requeueFailedEvents(eventsToSend: RumEvent[]): void {
664
+ // Add the events back to the front of the queue to be retried, but limit retry queue size
665
+ const eventsToRetry = eventsToSend.slice(-MAX_RETRY_EVENTS); // Keep only the most recent events
666
+
667
+ // Log a warning if we're dropping events
668
+ if (eventsToSend.length > MAX_RETRY_EVENTS && this.config?.getDebugMode()) {
669
+ console.warn(
670
+ `QwenLogger: Dropping ${
671
+ eventsToSend.length - MAX_RETRY_EVENTS
672
+ } events due to retry queue limit. Total events: ${
673
+ eventsToSend.length
674
+ }, keeping: ${MAX_RETRY_EVENTS}`,
675
+ );
676
+ }
677
+
678
+ // Determine how many events can be re-queued
679
+ const availableSpace = MAX_EVENTS - this.events.size;
680
+ const numEventsToRequeue = Math.min(eventsToRetry.length, availableSpace);
681
+
682
+ if (numEventsToRequeue === 0) {
683
+ if (this.config?.getDebugMode()) {
684
+ console.debug(
685
+ `QwenLogger: No events re-queued (queue size: ${this.events.size})`,
686
+ );
687
+ }
688
+ return;
689
+ }
690
+
691
+ // Get the most recent events to re-queue
692
+ const eventsToRequeue = eventsToRetry.slice(
693
+ eventsToRetry.length - numEventsToRequeue,
694
+ );
695
+
696
+ // Prepend events to the front of the deque to be retried first.
697
+ // We iterate backwards to maintain the original order of the failed events.
698
+ for (let i = eventsToRequeue.length - 1; i >= 0; i--) {
699
+ this.events.unshift(eventsToRequeue[i]);
700
+ }
701
+ // Clear any potential overflow
702
+ while (this.events.size > MAX_EVENTS) {
703
+ this.events.pop();
704
+ }
705
+
706
+ if (this.config?.getDebugMode()) {
707
+ console.debug(
708
+ `QwenLogger: Re-queued ${numEventsToRequeue} events for retry (queue size: ${this.events.size})`,
709
+ );
710
+ }
711
+ }
712
+ }
713
+
714
+ export const TEST_ONLY = {
715
+ MAX_RETRY_EVENTS,
716
+ MAX_EVENTS,
717
+ FLUSH_INTERVAL_MS,
718
+ };
projects/ui/qwen-code/packages/core/src/tools/__snapshots__/shell.test.ts.snap ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
+
3
+ exports[`ShellTool > getDescription > should return the non-windows description when not on windows 1`] = `
4
+ "
5
+ This tool executes a given shell command as \`bash -c <command>\`.
6
+
7
+ **Background vs Foreground Execution:**
8
+ You should decide whether commands should run in background or foreground based on their nature:
9
+
10
+ **Use background execution (is_background: true) for:**
11
+ - Long-running development servers: \`npm run start\`, \`npm run dev\`, \`yarn dev\`, \`bun run start\`
12
+ - Build watchers: \`npm run watch\`, \`webpack --watch\`
13
+ - Database servers: \`mongod\`, \`mysql\`, \`redis-server\`
14
+ - Web servers: \`python -m http.server\`, \`php -S localhost:8000\`
15
+ - Any command expected to run indefinitely until manually stopped
16
+
17
+ **Use foreground execution (is_background: false) for:**
18
+ - One-time commands: \`ls\`, \`cat\`, \`grep\`
19
+ - Build commands: \`npm run build\`, \`make\`
20
+ - Installation commands: \`npm install\`, \`pip install\`
21
+ - Git operations: \`git commit\`, \`git push\`
22
+ - Test runs: \`npm test\`, \`pytest\`
23
+
24
+ Command is executed as a subprocess that leads its own process group. Command process group can be terminated as \`kill -- -PGID\` or signaled as \`kill -s SIGNAL -- -PGID\`.
25
+
26
+ The following information is returned:
27
+
28
+ Command: Executed command.
29
+ Directory: Directory (relative to project root) where command was executed, or \`(root)\`.
30
+ Stdout: Output on stdout stream. Can be \`(empty)\` or partial on error and for any unwaited background processes.
31
+ Stderr: Output on stderr stream. Can be \`(empty)\` or partial on error and for any unwaited background processes.
32
+ Error: Error or \`(none)\` if no error was reported for the subprocess.
33
+ Exit Code: Exit code or \`(none)\` if terminated by signal.
34
+ Signal: Signal number or \`(none)\` if no signal was received.
35
+ Background PIDs: List of background processes started or \`(none)\`.
36
+ Process Group PGID: Process group started or \`(none)\`"
37
+ `;
38
+
39
+ exports[`ShellTool > getDescription > should return the windows description when on windows 1`] = `
40
+ "
41
+ This tool executes a given shell command as \`cmd.exe /c <command>\`.
42
+
43
+ **Background vs Foreground Execution:**
44
+ You should decide whether commands should run in background or foreground based on their nature:
45
+
46
+ **Use background execution (is_background: true) for:**
47
+ - Long-running development servers: \`npm run start\`, \`npm run dev\`, \`yarn dev\`, \`bun run start\`
48
+ - Build watchers: \`npm run watch\`, \`webpack --watch\`
49
+ - Database servers: \`mongod\`, \`mysql\`, \`redis-server\`
50
+ - Web servers: \`python -m http.server\`, \`php -S localhost:8000\`
51
+ - Any command expected to run indefinitely until manually stopped
52
+
53
+ **Use foreground execution (is_background: false) for:**
54
+ - One-time commands: \`ls\`, \`cat\`, \`grep\`
55
+ - Build commands: \`npm run build\`, \`make\`
56
+ - Installation commands: \`npm install\`, \`pip install\`
57
+ - Git operations: \`git commit\`, \`git push\`
58
+ - Test runs: \`npm test\`, \`pytest\`
59
+
60
+
61
+
62
+ The following information is returned:
63
+
64
+ Command: Executed command.
65
+ Directory: Directory (relative to project root) where command was executed, or \`(root)\`.
66
+ Stdout: Output on stdout stream. Can be \`(empty)\` or partial on error and for any unwaited background processes.
67
+ Stderr: Output on stderr stream. Can be \`(empty)\` or partial on error and for any unwaited background processes.
68
+ Error: Error or \`(none)\` if no error was reported for the subprocess.
69
+ Exit Code: Exit code or \`(none)\` if terminated by signal.
70
+ Signal: Signal number or \`(none)\` if no signal was received.
71
+ Background PIDs: List of background processes started or \`(none)\`.
72
+ Process Group PGID: Process group started or \`(none)\`"
73
+ `;
projects/ui/qwen-code/packages/core/src/utils/filesearch/crawlCache.test.ts ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { describe, it, expect, vi, afterEach, beforeEach } from 'vitest';
8
+ import { getCacheKey, read, write, clear } from './crawlCache.js';
9
+
10
+ describe('CrawlCache', () => {
11
+ describe('getCacheKey', () => {
12
+ it('should generate a consistent hash', () => {
13
+ const key1 = getCacheKey('/foo', 'bar');
14
+ const key2 = getCacheKey('/foo', 'bar');
15
+ expect(key1).toBe(key2);
16
+ });
17
+
18
+ it('should generate a different hash for different directories', () => {
19
+ const key1 = getCacheKey('/foo', 'bar');
20
+ const key2 = getCacheKey('/bar', 'bar');
21
+ expect(key1).not.toBe(key2);
22
+ });
23
+
24
+ it('should generate a different hash for different ignore content', () => {
25
+ const key1 = getCacheKey('/foo', 'bar');
26
+ const key2 = getCacheKey('/foo', 'baz');
27
+ expect(key1).not.toBe(key2);
28
+ });
29
+
30
+ it('should generate a different hash for different maxDepth values', () => {
31
+ const key1 = getCacheKey('/foo', 'bar', 1);
32
+ const key2 = getCacheKey('/foo', 'bar', 2);
33
+ const key3 = getCacheKey('/foo', 'bar', undefined);
34
+ const key4 = getCacheKey('/foo', 'bar');
35
+ expect(key1).not.toBe(key2);
36
+ expect(key1).not.toBe(key3);
37
+ expect(key2).not.toBe(key3);
38
+ expect(key3).toBe(key4);
39
+ });
40
+ });
41
+
42
+ describe('in-memory cache operations', () => {
43
+ beforeEach(() => {
44
+ // Ensure a clean slate before each test
45
+ clear();
46
+ });
47
+
48
+ afterEach(() => {
49
+ // Restore real timers after each test that uses fake ones
50
+ vi.useRealTimers();
51
+ });
52
+
53
+ it('should write and read data from the cache', () => {
54
+ const key = 'test-key';
55
+ const data = ['foo', 'bar'];
56
+ write(key, data, 10000); // 10 second TTL
57
+ const cachedData = read(key);
58
+ expect(cachedData).toEqual(data);
59
+ });
60
+
61
+ it('should return undefined for a nonexistent key', () => {
62
+ const cachedData = read('nonexistent-key');
63
+ expect(cachedData).toBeUndefined();
64
+ });
65
+
66
+ it('should clear the cache', () => {
67
+ const key = 'test-key';
68
+ const data = ['foo', 'bar'];
69
+ write(key, data, 10000);
70
+ clear();
71
+ const cachedData = read(key);
72
+ expect(cachedData).toBeUndefined();
73
+ });
74
+
75
+ it('should automatically evict a cache entry after its TTL expires', async () => {
76
+ vi.useFakeTimers();
77
+ const key = 'ttl-key';
78
+ const data = ['foo'];
79
+ const ttl = 5000; // 5 seconds
80
+
81
+ write(key, data, ttl);
82
+
83
+ // Should exist immediately after writing
84
+ expect(read(key)).toEqual(data);
85
+
86
+ // Advance time just before expiration
87
+ await vi.advanceTimersByTimeAsync(ttl - 1);
88
+ expect(read(key)).toEqual(data);
89
+
90
+ // Advance time past expiration
91
+ await vi.advanceTimersByTimeAsync(1);
92
+ expect(read(key)).toBeUndefined();
93
+ });
94
+
95
+ it('should reset the timer when an entry is updated', async () => {
96
+ vi.useFakeTimers();
97
+ const key = 'update-key';
98
+ const initialData = ['initial'];
99
+ const updatedData = ['updated'];
100
+ const ttl = 5000; // 5 seconds
101
+
102
+ // Write initial data
103
+ write(key, initialData, ttl);
104
+
105
+ // Advance time, but not enough to expire
106
+ await vi.advanceTimersByTimeAsync(3000);
107
+ expect(read(key)).toEqual(initialData);
108
+
109
+ // Update the data, which should reset the timer
110
+ write(key, updatedData, ttl);
111
+ expect(read(key)).toEqual(updatedData);
112
+
113
+ // Advance time again. If the timer wasn't reset, the total elapsed
114
+ // time (3000 + 3000 = 6000) would cause an eviction.
115
+ await vi.advanceTimersByTimeAsync(3000);
116
+ expect(read(key)).toEqual(updatedData);
117
+
118
+ // Advance past the new expiration time
119
+ await vi.advanceTimersByTimeAsync(2001);
120
+ expect(read(key)).toBeUndefined();
121
+ });
122
+ });
123
+ });
projects/ui/qwen-code/packages/core/src/utils/filesearch/crawlCache.ts ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import crypto from 'node:crypto';
8
+
9
+ const crawlCache = new Map<string, string[]>();
10
+ const cacheTimers = new Map<string, NodeJS.Timeout>();
11
+
12
+ /**
13
+ * Generates a unique cache key based on the project directory and the content
14
+ * of ignore files. This ensures that the cache is invalidated if the project
15
+ * or ignore rules change.
16
+ */
17
+ export const getCacheKey = (
18
+ directory: string,
19
+ ignoreContent: string,
20
+ maxDepth?: number,
21
+ ): string => {
22
+ const hash = crypto.createHash('sha256');
23
+ hash.update(directory);
24
+ hash.update(ignoreContent);
25
+ if (maxDepth !== undefined) {
26
+ hash.update(String(maxDepth));
27
+ }
28
+ return hash.digest('hex');
29
+ };
30
+
31
+ /**
32
+ * Reads cached data from the in-memory cache.
33
+ * Returns undefined if the key is not found.
34
+ */
35
+ export const read = (key: string): string[] | undefined => crawlCache.get(key);
36
+
37
+ /**
38
+ * Writes data to the in-memory cache and sets a timer to evict it after the TTL.
39
+ */
40
+ export const write = (key: string, results: string[], ttlMs: number): void => {
41
+ // Clear any existing timer for this key to prevent premature deletion
42
+ if (cacheTimers.has(key)) {
43
+ clearTimeout(cacheTimers.get(key)!);
44
+ }
45
+
46
+ // Store the new data
47
+ crawlCache.set(key, results);
48
+
49
+ // Set a timer to automatically delete the cache entry after the TTL
50
+ const timerId = setTimeout(() => {
51
+ crawlCache.delete(key);
52
+ cacheTimers.delete(key);
53
+ }, ttlMs);
54
+
55
+ // Store the timer handle so we can clear it if the entry is updated
56
+ cacheTimers.set(key, timerId);
57
+ };
58
+
59
+ /**
60
+ * Clears the entire cache and all active timers.
61
+ * Primarily used for testing.
62
+ */
63
+ export const clear = (): void => {
64
+ for (const timerId of cacheTimers.values()) {
65
+ clearTimeout(timerId);
66
+ }
67
+ crawlCache.clear();
68
+ cacheTimers.clear();
69
+ };
projects/ui/qwen-code/packages/core/src/utils/filesearch/crawler.test.ts ADDED
@@ -0,0 +1,573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { describe, it, expect, afterEach, vi, beforeEach } from 'vitest';
8
+ import * as fs from 'fs/promises';
9
+ import * as path from 'path';
10
+ import * as cache from './crawlCache.js';
11
+ import { crawl } from './crawler.js';
12
+ import { createTmpDir, cleanupTmpDir } from '@qwen-code/qwen-code-test-utils';
13
+ import { Ignore, loadIgnoreRules } from './ignore.js';
14
+
15
+ describe('crawler', () => {
16
+ let tmpDir: string;
17
+ afterEach(async () => {
18
+ if (tmpDir) {
19
+ await cleanupTmpDir(tmpDir);
20
+ }
21
+ vi.restoreAllMocks();
22
+ });
23
+
24
+ it('should use .geminiignore rules', async () => {
25
+ tmpDir = await createTmpDir({
26
+ '.geminiignore': 'dist/',
27
+ dist: ['ignored.js'],
28
+ src: ['not-ignored.js'],
29
+ });
30
+
31
+ const ignore = loadIgnoreRules({
32
+ projectRoot: tmpDir,
33
+ useGitignore: false,
34
+ useGeminiignore: true,
35
+ ignoreDirs: [],
36
+ });
37
+
38
+ const results = await crawl({
39
+ crawlDirectory: tmpDir,
40
+ cwd: tmpDir,
41
+ ignore,
42
+ cache: false,
43
+ cacheTtl: 0,
44
+ });
45
+
46
+ expect(results).toEqual(
47
+ expect.arrayContaining([
48
+ '.',
49
+ 'src/',
50
+ '.geminiignore',
51
+ 'src/not-ignored.js',
52
+ ]),
53
+ );
54
+ });
55
+
56
+ it('should combine .gitignore and .geminiignore rules', async () => {
57
+ tmpDir = await createTmpDir({
58
+ '.gitignore': 'dist/',
59
+ '.geminiignore': 'build/',
60
+ dist: ['ignored-by-git.js'],
61
+ build: ['ignored-by-gemini.js'],
62
+ src: ['not-ignored.js'],
63
+ });
64
+
65
+ const ignore = loadIgnoreRules({
66
+ projectRoot: tmpDir,
67
+ useGitignore: true,
68
+ useGeminiignore: true,
69
+ ignoreDirs: [],
70
+ });
71
+
72
+ const results = await crawl({
73
+ crawlDirectory: tmpDir,
74
+ cwd: tmpDir,
75
+ ignore,
76
+ cache: false,
77
+ cacheTtl: 0,
78
+ });
79
+
80
+ expect(results).toEqual(
81
+ expect.arrayContaining([
82
+ '.',
83
+ 'src/',
84
+ '.geminiignore',
85
+ '.gitignore',
86
+ 'src/not-ignored.js',
87
+ ]),
88
+ );
89
+ });
90
+
91
+ it('should use ignoreDirs option', async () => {
92
+ tmpDir = await createTmpDir({
93
+ logs: ['some.log'],
94
+ src: ['main.js'],
95
+ });
96
+
97
+ const ignore = loadIgnoreRules({
98
+ projectRoot: tmpDir,
99
+ useGitignore: false,
100
+ useGeminiignore: false,
101
+ ignoreDirs: ['logs'],
102
+ });
103
+
104
+ const results = await crawl({
105
+ crawlDirectory: tmpDir,
106
+ cwd: tmpDir,
107
+ ignore,
108
+ cache: false,
109
+ cacheTtl: 0,
110
+ });
111
+
112
+ expect(results).toEqual(
113
+ expect.arrayContaining(['.', 'src/', 'src/main.js']),
114
+ );
115
+ });
116
+
117
+ it('should handle negated directories', async () => {
118
+ tmpDir = await createTmpDir({
119
+ '.gitignore': ['build/**', '!build/public', '!build/public/**'].join(
120
+ '\n',
121
+ ),
122
+ build: {
123
+ 'private.js': '',
124
+ public: ['index.html'],
125
+ },
126
+ src: ['main.js'],
127
+ });
128
+
129
+ const ignore = loadIgnoreRules({
130
+ projectRoot: tmpDir,
131
+ useGitignore: true,
132
+ useGeminiignore: false,
133
+ ignoreDirs: [],
134
+ });
135
+
136
+ const results = await crawl({
137
+ crawlDirectory: tmpDir,
138
+ cwd: tmpDir,
139
+ ignore,
140
+ cache: false,
141
+ cacheTtl: 0,
142
+ });
143
+
144
+ expect(results).toEqual(
145
+ expect.arrayContaining([
146
+ '.',
147
+ 'build/',
148
+ 'build/public/',
149
+ 'src/',
150
+ '.gitignore',
151
+ 'build/public/index.html',
152
+ 'src/main.js',
153
+ ]),
154
+ );
155
+ });
156
+
157
+ it('should handle root-level file negation', async () => {
158
+ tmpDir = await createTmpDir({
159
+ '.gitignore': ['*.mk', '!Foo.mk'].join('\n'),
160
+ 'bar.mk': '',
161
+ 'Foo.mk': '',
162
+ });
163
+
164
+ const ignore = loadIgnoreRules({
165
+ projectRoot: tmpDir,
166
+ useGitignore: true,
167
+ useGeminiignore: false,
168
+ ignoreDirs: [],
169
+ });
170
+
171
+ const results = await crawl({
172
+ crawlDirectory: tmpDir,
173
+ cwd: tmpDir,
174
+ ignore,
175
+ cache: false,
176
+ cacheTtl: 0,
177
+ });
178
+
179
+ expect(results).toEqual(
180
+ expect.arrayContaining(['.', '.gitignore', 'Foo.mk', 'bar.mk']),
181
+ );
182
+ });
183
+
184
+ it('should handle directory negation with glob', async () => {
185
+ tmpDir = await createTmpDir({
186
+ '.gitignore': [
187
+ 'third_party/**',
188
+ '!third_party/foo',
189
+ '!third_party/foo/bar',
190
+ '!third_party/foo/bar/baz_buffer',
191
+ ].join('\n'),
192
+ third_party: {
193
+ foo: {
194
+ bar: {
195
+ baz_buffer: '',
196
+ },
197
+ },
198
+ ignore_this: '',
199
+ },
200
+ });
201
+
202
+ const ignore = loadIgnoreRules({
203
+ projectRoot: tmpDir,
204
+ useGitignore: true,
205
+ useGeminiignore: false,
206
+ ignoreDirs: [],
207
+ });
208
+
209
+ const results = await crawl({
210
+ crawlDirectory: tmpDir,
211
+ cwd: tmpDir,
212
+ ignore,
213
+ cache: false,
214
+ cacheTtl: 0,
215
+ });
216
+
217
+ expect(results).toEqual(
218
+ expect.arrayContaining([
219
+ '.',
220
+ 'third_party/',
221
+ 'third_party/foo/',
222
+ 'third_party/foo/bar/',
223
+ '.gitignore',
224
+ 'third_party/foo/bar/baz_buffer',
225
+ ]),
226
+ );
227
+ });
228
+
229
+ it('should correctly handle negated patterns in .gitignore', async () => {
230
+ tmpDir = await createTmpDir({
231
+ '.gitignore': ['dist/**', '!dist/keep.js'].join('\n'),
232
+ dist: ['ignore.js', 'keep.js'],
233
+ src: ['main.js'],
234
+ });
235
+
236
+ const ignore = loadIgnoreRules({
237
+ projectRoot: tmpDir,
238
+ useGitignore: true,
239
+ useGeminiignore: false,
240
+ ignoreDirs: [],
241
+ });
242
+
243
+ const results = await crawl({
244
+ crawlDirectory: tmpDir,
245
+ cwd: tmpDir,
246
+ ignore,
247
+ cache: false,
248
+ cacheTtl: 0,
249
+ });
250
+
251
+ expect(results).toEqual(
252
+ expect.arrayContaining([
253
+ '.',
254
+ 'dist/',
255
+ 'src/',
256
+ '.gitignore',
257
+ 'dist/keep.js',
258
+ 'src/main.js',
259
+ ]),
260
+ );
261
+ });
262
+
263
+ it('should initialize correctly when ignore files are missing', async () => {
264
+ tmpDir = await createTmpDir({
265
+ src: ['file1.js'],
266
+ });
267
+
268
+ const ignore = loadIgnoreRules({
269
+ projectRoot: tmpDir,
270
+ useGitignore: true,
271
+ useGeminiignore: true,
272
+ ignoreDirs: [],
273
+ });
274
+
275
+ const results = await crawl({
276
+ crawlDirectory: tmpDir,
277
+ cwd: tmpDir,
278
+ ignore,
279
+ cache: false,
280
+ cacheTtl: 0,
281
+ });
282
+ expect(results).toEqual(
283
+ expect.arrayContaining(['.', 'src/', 'src/file1.js']),
284
+ );
285
+ });
286
+
287
+ it('should handle empty or commented-only ignore files', async () => {
288
+ tmpDir = await createTmpDir({
289
+ '.gitignore': '# This is a comment\n\n \n',
290
+ src: ['main.js'],
291
+ });
292
+
293
+ const ignore = loadIgnoreRules({
294
+ projectRoot: tmpDir,
295
+ useGitignore: true,
296
+ useGeminiignore: false,
297
+ ignoreDirs: [],
298
+ });
299
+
300
+ const results = await crawl({
301
+ crawlDirectory: tmpDir,
302
+ cwd: tmpDir,
303
+ ignore,
304
+ cache: false,
305
+ cacheTtl: 0,
306
+ });
307
+
308
+ expect(results).toEqual(
309
+ expect.arrayContaining(['.', 'src/', '.gitignore', 'src/main.js']),
310
+ );
311
+ });
312
+
313
+ it('should always ignore the .git directory', async () => {
314
+ tmpDir = await createTmpDir({
315
+ '.git': ['config', 'HEAD'],
316
+ src: ['main.js'],
317
+ });
318
+
319
+ const ignore = loadIgnoreRules({
320
+ projectRoot: tmpDir,
321
+ useGitignore: false,
322
+ useGeminiignore: false,
323
+ ignoreDirs: [],
324
+ });
325
+
326
+ const results = await crawl({
327
+ crawlDirectory: tmpDir,
328
+ cwd: tmpDir,
329
+ ignore,
330
+ cache: false,
331
+ cacheTtl: 0,
332
+ });
333
+
334
+ expect(results).toEqual(
335
+ expect.arrayContaining(['.', 'src/', 'src/main.js']),
336
+ );
337
+ });
338
+
339
+ describe('with in-memory cache', () => {
340
+ beforeEach(() => {
341
+ cache.clear();
342
+ vi.useFakeTimers();
343
+ });
344
+
345
+ afterEach(() => {
346
+ vi.useRealTimers();
347
+ });
348
+
349
+ it('should hit the cache for subsequent crawls', async () => {
350
+ tmpDir = await createTmpDir({ 'file1.js': '' });
351
+ const ignore = loadIgnoreRules({
352
+ projectRoot: tmpDir,
353
+ useGitignore: false,
354
+ useGeminiignore: false,
355
+ ignoreDirs: [],
356
+ });
357
+ const options = {
358
+ crawlDirectory: tmpDir,
359
+ cwd: tmpDir,
360
+ ignore,
361
+ cache: true,
362
+ cacheTtl: 10,
363
+ };
364
+
365
+ const crawlSpy = vi.spyOn(cache, 'read');
366
+
367
+ await crawl(options);
368
+ expect(crawlSpy).toHaveBeenCalledTimes(1);
369
+
370
+ await crawl(options);
371
+ expect(crawlSpy).toHaveBeenCalledTimes(2);
372
+ // fdir should not have been called a second time.
373
+ // We can't spy on it directly, but we can check the cache was hit.
374
+ const cacheKey = cache.getCacheKey(
375
+ options.crawlDirectory,
376
+ options.ignore.getFingerprint(),
377
+ undefined,
378
+ );
379
+ expect(cache.read(cacheKey)).toBeDefined();
380
+ });
381
+
382
+ it('should miss the cache when ignore rules change', async () => {
383
+ tmpDir = await createTmpDir({
384
+ '.gitignore': 'a.txt',
385
+ 'a.txt': '',
386
+ 'b.txt': '',
387
+ });
388
+ const getIgnore = () =>
389
+ loadIgnoreRules({
390
+ projectRoot: tmpDir,
391
+ useGitignore: true,
392
+ useGeminiignore: false,
393
+ ignoreDirs: [],
394
+ });
395
+ const getOptions = (ignore: Ignore) => ({
396
+ crawlDirectory: tmpDir,
397
+ cwd: tmpDir,
398
+ ignore,
399
+ cache: true,
400
+ cacheTtl: 10000,
401
+ });
402
+
403
+ // Initial crawl to populate the cache
404
+ const ignore1 = getIgnore();
405
+ const results1 = await crawl(getOptions(ignore1));
406
+ expect(results1).toEqual(
407
+ expect.arrayContaining(['.', '.gitignore', 'b.txt']),
408
+ );
409
+
410
+ // Modify the ignore file
411
+ await fs.writeFile(path.join(tmpDir, '.gitignore'), 'b.txt');
412
+
413
+ // Second crawl should miss the cache and trigger a recrawl
414
+ const ignore2 = getIgnore();
415
+ const results2 = await crawl(getOptions(ignore2));
416
+ expect(results2).toEqual(
417
+ expect.arrayContaining(['.', '.gitignore', 'a.txt']),
418
+ );
419
+ });
420
+
421
+ it('should miss the cache after TTL expires', async () => {
422
+ tmpDir = await createTmpDir({ 'file1.js': '' });
423
+ const ignore = loadIgnoreRules({
424
+ projectRoot: tmpDir,
425
+ useGitignore: false,
426
+ useGeminiignore: false,
427
+ ignoreDirs: [],
428
+ });
429
+ const options = {
430
+ crawlDirectory: tmpDir,
431
+ cwd: tmpDir,
432
+ ignore,
433
+ cache: true,
434
+ cacheTtl: 10, // 10 seconds
435
+ };
436
+
437
+ const readSpy = vi.spyOn(cache, 'read');
438
+ const writeSpy = vi.spyOn(cache, 'write');
439
+
440
+ await crawl(options);
441
+ expect(readSpy).toHaveBeenCalledTimes(1);
442
+ expect(writeSpy).toHaveBeenCalledTimes(1);
443
+
444
+ // Advance time past the TTL
445
+ await vi.advanceTimersByTimeAsync(11000);
446
+
447
+ await crawl(options);
448
+ expect(readSpy).toHaveBeenCalledTimes(2);
449
+ expect(writeSpy).toHaveBeenCalledTimes(2);
450
+ });
451
+
452
+ it('should miss the cache when maxDepth changes', async () => {
453
+ tmpDir = await createTmpDir({ 'file1.js': '' });
454
+ const ignore = loadIgnoreRules({
455
+ projectRoot: tmpDir,
456
+ useGitignore: false,
457
+ useGeminiignore: false,
458
+ ignoreDirs: [],
459
+ });
460
+ const getOptions = (maxDepth?: number) => ({
461
+ crawlDirectory: tmpDir,
462
+ cwd: tmpDir,
463
+ ignore,
464
+ cache: true,
465
+ cacheTtl: 10000,
466
+ maxDepth,
467
+ });
468
+
469
+ const readSpy = vi.spyOn(cache, 'read');
470
+ const writeSpy = vi.spyOn(cache, 'write');
471
+
472
+ // 1. First crawl with maxDepth: 1
473
+ await crawl(getOptions(1));
474
+ expect(readSpy).toHaveBeenCalledTimes(1);
475
+ expect(writeSpy).toHaveBeenCalledTimes(1);
476
+
477
+ // 2. Second crawl with maxDepth: 2, should be a cache miss
478
+ await crawl(getOptions(2));
479
+ expect(readSpy).toHaveBeenCalledTimes(2);
480
+ expect(writeSpy).toHaveBeenCalledTimes(2);
481
+
482
+ // 3. Third crawl with maxDepth: 1 again, should be a cache hit.
483
+ await crawl(getOptions(1));
484
+ expect(readSpy).toHaveBeenCalledTimes(3);
485
+ expect(writeSpy).toHaveBeenCalledTimes(2); // No new write
486
+ });
487
+ });
488
+
489
+ describe('with maxDepth', () => {
490
+ beforeEach(async () => {
491
+ tmpDir = await createTmpDir({
492
+ 'file-root.txt': '',
493
+ level1: {
494
+ 'file-level1.txt': '',
495
+ level2: {
496
+ 'file-level2.txt': '',
497
+ level3: {
498
+ 'file-level3.txt': '',
499
+ },
500
+ },
501
+ },
502
+ });
503
+ });
504
+
505
+ const getCrawlResults = (maxDepth?: number) => {
506
+ const ignore = loadIgnoreRules({
507
+ projectRoot: tmpDir,
508
+ useGitignore: false,
509
+ useGeminiignore: false,
510
+ ignoreDirs: [],
511
+ });
512
+ return crawl({
513
+ crawlDirectory: tmpDir,
514
+ cwd: tmpDir,
515
+ ignore,
516
+ cache: false,
517
+ cacheTtl: 0,
518
+ maxDepth,
519
+ });
520
+ };
521
+
522
+ it('should only crawl top-level files when maxDepth is 0', async () => {
523
+ const results = await getCrawlResults(0);
524
+ expect(results).toEqual(
525
+ expect.arrayContaining(['.', 'level1/', 'file-root.txt']),
526
+ );
527
+ });
528
+
529
+ it('should crawl one level deep when maxDepth is 1', async () => {
530
+ const results = await getCrawlResults(1);
531
+ expect(results).toEqual(
532
+ expect.arrayContaining([
533
+ '.',
534
+ 'level1/',
535
+ 'level1/level2/',
536
+ 'file-root.txt',
537
+ 'level1/file-level1.txt',
538
+ ]),
539
+ );
540
+ });
541
+
542
+ it('should crawl two levels deep when maxDepth is 2', async () => {
543
+ const results = await getCrawlResults(2);
544
+ expect(results).toEqual(
545
+ expect.arrayContaining([
546
+ '.',
547
+ 'level1/',
548
+ 'level1/level2/',
549
+ 'level1/level2/level3/',
550
+ 'file-root.txt',
551
+ 'level1/file-level1.txt',
552
+ 'level1/level2/file-level2.txt',
553
+ ]),
554
+ );
555
+ });
556
+
557
+ it('should perform a full recursive crawl when maxDepth is undefined', async () => {
558
+ const results = await getCrawlResults(undefined);
559
+ expect(results).toEqual(
560
+ expect.arrayContaining([
561
+ '.',
562
+ 'level1/',
563
+ 'level1/level2/',
564
+ 'level1/level2/level3/',
565
+ 'file-root.txt',
566
+ 'level1/file-level1.txt',
567
+ 'level1/level2/file-level2.txt',
568
+ 'level1/level2/level3/file-level3.txt',
569
+ ]),
570
+ );
571
+ });
572
+ });
573
+ });
projects/ui/qwen-code/packages/core/src/utils/filesearch/crawler.ts ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import path from 'node:path';
8
+ import { fdir } from 'fdir';
9
+ import { Ignore } from './ignore.js';
10
+ import * as cache from './crawlCache.js';
11
+
12
+ export interface CrawlOptions {
13
+ // The directory to start the crawl from.
14
+ crawlDirectory: string;
15
+ // The project's root directory, for path relativity.
16
+ cwd: string;
17
+ // The fdir maxDepth option.
18
+ maxDepth?: number;
19
+ // A pre-configured Ignore instance.
20
+ ignore: Ignore;
21
+ // Caching options.
22
+ cache: boolean;
23
+ cacheTtl: number;
24
+ }
25
+
26
+ function toPosixPath(p: string) {
27
+ return p.split(path.sep).join(path.posix.sep);
28
+ }
29
+
30
+ export async function crawl(options: CrawlOptions): Promise<string[]> {
31
+ if (options.cache) {
32
+ const cacheKey = cache.getCacheKey(
33
+ options.crawlDirectory,
34
+ options.ignore.getFingerprint(),
35
+ options.maxDepth,
36
+ );
37
+ const cachedResults = cache.read(cacheKey);
38
+
39
+ if (cachedResults) {
40
+ return cachedResults;
41
+ }
42
+ }
43
+
44
+ const posixCwd = toPosixPath(options.cwd);
45
+ const posixCrawlDirectory = toPosixPath(options.crawlDirectory);
46
+
47
+ let results: string[];
48
+ try {
49
+ const dirFilter = options.ignore.getDirectoryFilter();
50
+ const api = new fdir()
51
+ .withRelativePaths()
52
+ .withDirs()
53
+ .withPathSeparator('/') // Always use unix style paths
54
+ .exclude((_, dirPath) => {
55
+ const relativePath = path.posix.relative(posixCrawlDirectory, dirPath);
56
+ return dirFilter(`${relativePath}/`);
57
+ });
58
+
59
+ if (options.maxDepth !== undefined) {
60
+ api.withMaxDepth(options.maxDepth);
61
+ }
62
+
63
+ results = await api.crawl(options.crawlDirectory).withPromise();
64
+ } catch (_e) {
65
+ // The directory probably doesn't exist.
66
+ return [];
67
+ }
68
+
69
+ const relativeToCrawlDir = path.posix.relative(posixCwd, posixCrawlDirectory);
70
+
71
+ const relativeToCwdResults = results.map((p) =>
72
+ path.posix.join(relativeToCrawlDir, p),
73
+ );
74
+
75
+ if (options.cache) {
76
+ const cacheKey = cache.getCacheKey(
77
+ options.crawlDirectory,
78
+ options.ignore.getFingerprint(),
79
+ options.maxDepth,
80
+ );
81
+ cache.write(cacheKey, relativeToCwdResults, options.cacheTtl * 1000);
82
+ }
83
+
84
+ return relativeToCwdResults;
85
+ }
projects/ui/qwen-code/packages/core/src/utils/filesearch/fileSearch.test.ts ADDED
@@ -0,0 +1,662 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { describe, it, expect, afterEach, vi } from 'vitest';
8
+ import { FileSearchFactory, AbortError, filter } from './fileSearch.js';
9
+ import { createTmpDir, cleanupTmpDir } from '@qwen-code/qwen-code-test-utils';
10
+
11
+ describe('FileSearch', () => {
12
+ let tmpDir: string;
13
+ afterEach(async () => {
14
+ if (tmpDir) {
15
+ await cleanupTmpDir(tmpDir);
16
+ }
17
+ vi.restoreAllMocks();
18
+ });
19
+
20
+ it('should use .geminiignore rules', async () => {
21
+ tmpDir = await createTmpDir({
22
+ '.geminiignore': 'dist/',
23
+ dist: ['ignored.js'],
24
+ src: ['not-ignored.js'],
25
+ });
26
+
27
+ const fileSearch = FileSearchFactory.create({
28
+ projectRoot: tmpDir,
29
+ useGitignore: false,
30
+ useGeminiignore: true,
31
+ ignoreDirs: [],
32
+ cache: false,
33
+ cacheTtl: 0,
34
+ enableRecursiveFileSearch: true,
35
+ });
36
+
37
+ await fileSearch.initialize();
38
+ const results = await fileSearch.search('');
39
+
40
+ expect(results).toEqual(['src/', '.geminiignore', 'src/not-ignored.js']);
41
+ });
42
+
43
+ it('should combine .gitignore and .geminiignore rules', async () => {
44
+ tmpDir = await createTmpDir({
45
+ '.gitignore': 'dist/',
46
+ '.geminiignore': 'build/',
47
+ dist: ['ignored-by-git.js'],
48
+ build: ['ignored-by-gemini.js'],
49
+ src: ['not-ignored.js'],
50
+ });
51
+
52
+ const fileSearch = FileSearchFactory.create({
53
+ projectRoot: tmpDir,
54
+ useGitignore: true,
55
+ useGeminiignore: true,
56
+ ignoreDirs: [],
57
+ cache: false,
58
+ cacheTtl: 0,
59
+ enableRecursiveFileSearch: true,
60
+ });
61
+
62
+ await fileSearch.initialize();
63
+ const results = await fileSearch.search('');
64
+
65
+ expect(results).toEqual([
66
+ 'src/',
67
+ '.geminiignore',
68
+ '.gitignore',
69
+ 'src/not-ignored.js',
70
+ ]);
71
+ });
72
+
73
+ it('should use ignoreDirs option', async () => {
74
+ tmpDir = await createTmpDir({
75
+ logs: ['some.log'],
76
+ src: ['main.js'],
77
+ });
78
+
79
+ const fileSearch = FileSearchFactory.create({
80
+ projectRoot: tmpDir,
81
+ useGitignore: false,
82
+ useGeminiignore: false,
83
+ ignoreDirs: ['logs'],
84
+ cache: false,
85
+ cacheTtl: 0,
86
+ enableRecursiveFileSearch: true,
87
+ });
88
+
89
+ await fileSearch.initialize();
90
+ const results = await fileSearch.search('');
91
+
92
+ expect(results).toEqual(['src/', 'src/main.js']);
93
+ });
94
+
95
+ it('should handle negated directories', async () => {
96
+ tmpDir = await createTmpDir({
97
+ '.gitignore': ['build/**', '!build/public', '!build/public/**'].join(
98
+ '\n',
99
+ ),
100
+ build: {
101
+ 'private.js': '',
102
+ public: ['index.html'],
103
+ },
104
+ src: ['main.js'],
105
+ });
106
+
107
+ const fileSearch = FileSearchFactory.create({
108
+ projectRoot: tmpDir,
109
+ useGitignore: true,
110
+ useGeminiignore: false,
111
+ ignoreDirs: [],
112
+ cache: false,
113
+ cacheTtl: 0,
114
+ enableRecursiveFileSearch: true,
115
+ });
116
+
117
+ await fileSearch.initialize();
118
+ const results = await fileSearch.search('');
119
+
120
+ expect(results).toEqual([
121
+ 'build/',
122
+ 'build/public/',
123
+ 'src/',
124
+ '.gitignore',
125
+ 'build/public/index.html',
126
+ 'src/main.js',
127
+ ]);
128
+ });
129
+
130
+ it('should filter results with a search pattern', async () => {
131
+ tmpDir = await createTmpDir({
132
+ src: {
133
+ 'main.js': '',
134
+ 'util.ts': '',
135
+ 'style.css': '',
136
+ },
137
+ });
138
+
139
+ const fileSearch = FileSearchFactory.create({
140
+ projectRoot: tmpDir,
141
+ useGitignore: false,
142
+ useGeminiignore: false,
143
+ ignoreDirs: [],
144
+ cache: false,
145
+ cacheTtl: 0,
146
+ enableRecursiveFileSearch: true,
147
+ });
148
+
149
+ await fileSearch.initialize();
150
+ const results = await fileSearch.search('**/*.js');
151
+
152
+ expect(results).toEqual(['src/main.js']);
153
+ });
154
+
155
+ it('should handle root-level file negation', async () => {
156
+ tmpDir = await createTmpDir({
157
+ '.gitignore': ['*.mk', '!Foo.mk'].join('\n'),
158
+ 'bar.mk': '',
159
+ 'Foo.mk': '',
160
+ });
161
+
162
+ const fileSearch = FileSearchFactory.create({
163
+ projectRoot: tmpDir,
164
+ useGitignore: true,
165
+ useGeminiignore: false,
166
+ ignoreDirs: [],
167
+ cache: false,
168
+ cacheTtl: 0,
169
+ enableRecursiveFileSearch: true,
170
+ });
171
+
172
+ await fileSearch.initialize();
173
+ const results = await fileSearch.search('');
174
+
175
+ expect(results).toEqual(['.gitignore', 'Foo.mk']);
176
+ });
177
+
178
+ it('should handle directory negation with glob', async () => {
179
+ tmpDir = await createTmpDir({
180
+ '.gitignore': [
181
+ 'third_party/**',
182
+ '!third_party/foo',
183
+ '!third_party/foo/bar',
184
+ '!third_party/foo/bar/baz_buffer',
185
+ ].join('\n'),
186
+ third_party: {
187
+ foo: {
188
+ bar: {
189
+ baz_buffer: '',
190
+ },
191
+ },
192
+ ignore_this: '',
193
+ },
194
+ });
195
+
196
+ const fileSearch = FileSearchFactory.create({
197
+ projectRoot: tmpDir,
198
+ useGitignore: true,
199
+ useGeminiignore: false,
200
+ ignoreDirs: [],
201
+ cache: false,
202
+ cacheTtl: 0,
203
+ enableRecursiveFileSearch: true,
204
+ });
205
+
206
+ await fileSearch.initialize();
207
+ const results = await fileSearch.search('');
208
+
209
+ expect(results).toEqual([
210
+ 'third_party/',
211
+ 'third_party/foo/',
212
+ 'third_party/foo/bar/',
213
+ '.gitignore',
214
+ 'third_party/foo/bar/baz_buffer',
215
+ ]);
216
+ });
217
+
218
+ it('should correctly handle negated patterns in .gitignore', async () => {
219
+ tmpDir = await createTmpDir({
220
+ '.gitignore': ['dist/**', '!dist/keep.js'].join('\n'),
221
+ dist: ['ignore.js', 'keep.js'],
222
+ src: ['main.js'],
223
+ });
224
+
225
+ const fileSearch = FileSearchFactory.create({
226
+ projectRoot: tmpDir,
227
+ useGitignore: true,
228
+ useGeminiignore: false,
229
+ ignoreDirs: [],
230
+ cache: false,
231
+ cacheTtl: 0,
232
+ enableRecursiveFileSearch: true,
233
+ });
234
+
235
+ await fileSearch.initialize();
236
+ const results = await fileSearch.search('');
237
+
238
+ expect(results).toEqual([
239
+ 'dist/',
240
+ 'src/',
241
+ '.gitignore',
242
+ 'dist/keep.js',
243
+ 'src/main.js',
244
+ ]);
245
+ });
246
+
247
+ // New test cases start here
248
+
249
+ it('should initialize correctly when ignore files are missing', async () => {
250
+ tmpDir = await createTmpDir({
251
+ src: ['file1.js'],
252
+ });
253
+
254
+ const fileSearch = FileSearchFactory.create({
255
+ projectRoot: tmpDir,
256
+ useGitignore: true,
257
+ useGeminiignore: true,
258
+ ignoreDirs: [],
259
+ cache: false,
260
+ cacheTtl: 0,
261
+ enableRecursiveFileSearch: true,
262
+ });
263
+
264
+ // Expect no errors to be thrown during initialization
265
+ await expect(fileSearch.initialize()).resolves.toBeUndefined();
266
+ const results = await fileSearch.search('');
267
+ expect(results).toEqual(['src/', 'src/file1.js']);
268
+ });
269
+
270
+ it('should respect maxResults option in search', async () => {
271
+ tmpDir = await createTmpDir({
272
+ src: {
273
+ 'file1.js': '',
274
+ 'file2.js': '',
275
+ 'file3.js': '',
276
+ 'file4.js': '',
277
+ },
278
+ });
279
+
280
+ const fileSearch = FileSearchFactory.create({
281
+ projectRoot: tmpDir,
282
+ useGitignore: false,
283
+ useGeminiignore: false,
284
+ ignoreDirs: [],
285
+ cache: false,
286
+ cacheTtl: 0,
287
+ enableRecursiveFileSearch: true,
288
+ });
289
+
290
+ await fileSearch.initialize();
291
+ const results = await fileSearch.search('**/*.js', { maxResults: 2 });
292
+
293
+ expect(results).toEqual(['src/file1.js', 'src/file2.js']); // Assuming alphabetical sort
294
+ });
295
+
296
+ it('should use fzf for fuzzy matching when pattern does not contain wildcards', async () => {
297
+ tmpDir = await createTmpDir({
298
+ src: {
299
+ 'main.js': '',
300
+ 'util.ts': '',
301
+ 'style.css': '',
302
+ },
303
+ });
304
+
305
+ const fileSearch = FileSearchFactory.create({
306
+ projectRoot: tmpDir,
307
+ useGitignore: false,
308
+ useGeminiignore: false,
309
+ ignoreDirs: [],
310
+ cache: false,
311
+ cacheTtl: 0,
312
+ enableRecursiveFileSearch: true,
313
+ });
314
+
315
+ await fileSearch.initialize();
316
+ const results = await fileSearch.search('sst');
317
+
318
+ expect(results).toEqual(['src/style.css']);
319
+ });
320
+
321
+ it('should return empty array when no matches are found', async () => {
322
+ tmpDir = await createTmpDir({
323
+ src: ['file1.js'],
324
+ });
325
+
326
+ const fileSearch = FileSearchFactory.create({
327
+ projectRoot: tmpDir,
328
+ useGitignore: false,
329
+ useGeminiignore: false,
330
+ ignoreDirs: [],
331
+ cache: false,
332
+ cacheTtl: 0,
333
+ enableRecursiveFileSearch: true,
334
+ });
335
+
336
+ await fileSearch.initialize();
337
+ const results = await fileSearch.search('nonexistent-file.xyz');
338
+
339
+ expect(results).toEqual([]);
340
+ });
341
+
342
+ it('should throw AbortError when filter is aborted', async () => {
343
+ const controller = new AbortController();
344
+ const dummyPaths = Array.from({ length: 5000 }, (_, i) => `file${i}.js`); // Large array to ensure yielding
345
+
346
+ const filterPromise = filter(dummyPaths, '*.js', controller.signal);
347
+
348
+ // Abort after a short delay to ensure filter has started
349
+ setTimeout(() => controller.abort(), 1);
350
+
351
+ await expect(filterPromise).rejects.toThrow(AbortError);
352
+ });
353
+
354
+ it('should throw an error if search is called before initialization', async () => {
355
+ tmpDir = await createTmpDir({});
356
+ const fileSearch = FileSearchFactory.create({
357
+ projectRoot: tmpDir,
358
+ useGitignore: false,
359
+ useGeminiignore: false,
360
+ ignoreDirs: [],
361
+ cache: false,
362
+ cacheTtl: 0,
363
+ enableRecursiveFileSearch: true,
364
+ });
365
+
366
+ await expect(fileSearch.search('')).rejects.toThrow(
367
+ 'Engine not initialized. Call initialize() first.',
368
+ );
369
+ });
370
+
371
+ it('should handle empty or commented-only ignore files', async () => {
372
+ tmpDir = await createTmpDir({
373
+ '.gitignore': '# This is a comment\n\n \n',
374
+ src: ['main.js'],
375
+ });
376
+
377
+ const fileSearch = FileSearchFactory.create({
378
+ projectRoot: tmpDir,
379
+ useGitignore: true,
380
+ useGeminiignore: false,
381
+ ignoreDirs: [],
382
+ cache: false,
383
+ cacheTtl: 0,
384
+ enableRecursiveFileSearch: true,
385
+ });
386
+
387
+ await fileSearch.initialize();
388
+ const results = await fileSearch.search('');
389
+
390
+ expect(results).toEqual(['src/', '.gitignore', 'src/main.js']);
391
+ });
392
+
393
+ it('should always ignore the .git directory', async () => {
394
+ tmpDir = await createTmpDir({
395
+ '.git': ['config', 'HEAD'],
396
+ src: ['main.js'],
397
+ });
398
+
399
+ const fileSearch = FileSearchFactory.create({
400
+ projectRoot: tmpDir,
401
+ useGitignore: false, // Explicitly disable .gitignore to isolate this rule
402
+ useGeminiignore: false,
403
+ ignoreDirs: [],
404
+ cache: false,
405
+ cacheTtl: 0,
406
+ enableRecursiveFileSearch: true,
407
+ });
408
+
409
+ await fileSearch.initialize();
410
+ const results = await fileSearch.search('');
411
+
412
+ expect(results).toEqual(['src/', 'src/main.js']);
413
+ });
414
+
415
+ it('should be cancellable via AbortSignal', async () => {
416
+ const largeDir: Record<string, string> = {};
417
+ for (let i = 0; i < 100; i++) {
418
+ largeDir[`file${i}.js`] = '';
419
+ }
420
+ tmpDir = await createTmpDir(largeDir);
421
+
422
+ const fileSearch = FileSearchFactory.create({
423
+ projectRoot: tmpDir,
424
+ useGitignore: false,
425
+ useGeminiignore: false,
426
+ ignoreDirs: [],
427
+ cache: false,
428
+ cacheTtl: 0,
429
+ enableRecursiveFileSearch: true,
430
+ });
431
+
432
+ await fileSearch.initialize();
433
+
434
+ const controller = new AbortController();
435
+ const searchPromise = fileSearch.search('**/*.js', {
436
+ signal: controller.signal,
437
+ });
438
+
439
+ // Yield to allow the search to start before aborting.
440
+ await new Promise((resolve) => setImmediate(resolve));
441
+
442
+ controller.abort();
443
+
444
+ await expect(searchPromise).rejects.toThrow(AbortError);
445
+ });
446
+
447
+ it('should leverage ResultCache for bestBaseQuery optimization', async () => {
448
+ tmpDir = await createTmpDir({
449
+ src: {
450
+ 'foo.js': '',
451
+ 'bar.ts': '',
452
+ nested: {
453
+ 'baz.js': '',
454
+ },
455
+ },
456
+ });
457
+
458
+ const fileSearch = FileSearchFactory.create({
459
+ projectRoot: tmpDir,
460
+ useGitignore: false,
461
+ useGeminiignore: false,
462
+ ignoreDirs: [],
463
+ cache: true, // Enable caching for this test
464
+ cacheTtl: 0,
465
+ enableRecursiveFileSearch: true,
466
+ });
467
+
468
+ await fileSearch.initialize();
469
+
470
+ // Perform a broad search to prime the cache
471
+ const broadResults = await fileSearch.search('src/**');
472
+ expect(broadResults).toEqual([
473
+ 'src/',
474
+ 'src/nested/',
475
+ 'src/bar.ts',
476
+ 'src/foo.js',
477
+ 'src/nested/baz.js',
478
+ ]);
479
+
480
+ // Perform a more specific search that should leverage the broad search's cached results
481
+ const specificResults = await fileSearch.search('src/**/*.js');
482
+ expect(specificResults).toEqual(['src/foo.js', 'src/nested/baz.js']);
483
+
484
+ // Although we can't directly inspect ResultCache.hits/misses from here,
485
+ // the correctness of specificResults after a broad search implicitly
486
+ // verifies that the caching mechanism, including bestBaseQuery, is working.
487
+ });
488
+
489
+ it('should be case-insensitive by default', async () => {
490
+ tmpDir = await createTmpDir({
491
+ 'File1.Js': '',
492
+ 'file2.js': '',
493
+ 'FILE3.JS': '',
494
+ 'other.txt': '',
495
+ });
496
+
497
+ const fileSearch = FileSearchFactory.create({
498
+ projectRoot: tmpDir,
499
+ useGitignore: false,
500
+ useGeminiignore: false,
501
+ ignoreDirs: [],
502
+ cache: false,
503
+ cacheTtl: 0,
504
+ enableRecursiveFileSearch: true,
505
+ });
506
+
507
+ await fileSearch.initialize();
508
+
509
+ // Search with a lowercase pattern
510
+ let results = await fileSearch.search('file*.js');
511
+ expect(results).toHaveLength(3);
512
+ expect(results).toEqual(
513
+ expect.arrayContaining(['File1.Js', 'file2.js', 'FILE3.JS']),
514
+ );
515
+
516
+ // Search with an uppercase pattern
517
+ results = await fileSearch.search('FILE*.JS');
518
+ expect(results).toHaveLength(3);
519
+ expect(results).toEqual(
520
+ expect.arrayContaining(['File1.Js', 'file2.js', 'FILE3.JS']),
521
+ );
522
+
523
+ // Search with a mixed-case pattern
524
+ results = await fileSearch.search('FiLe*.Js');
525
+ expect(results).toHaveLength(3);
526
+ expect(results).toEqual(
527
+ expect.arrayContaining(['File1.Js', 'file2.js', 'FILE3.JS']),
528
+ );
529
+ });
530
+
531
+ it('should respect maxResults even when the cache returns an exact match', async () => {
532
+ tmpDir = await createTmpDir({
533
+ 'file1.js': '',
534
+ 'file2.js': '',
535
+ 'file3.js': '',
536
+ 'file4.js': '',
537
+ 'file5.js': '',
538
+ });
539
+
540
+ const fileSearch = FileSearchFactory.create({
541
+ projectRoot: tmpDir,
542
+ useGitignore: false,
543
+ useGeminiignore: false,
544
+ ignoreDirs: [],
545
+ cache: true, // Ensure caching is enabled
546
+ cacheTtl: 10000,
547
+ enableRecursiveFileSearch: true,
548
+ });
549
+
550
+ await fileSearch.initialize();
551
+
552
+ // 1. Perform a broad search to populate the cache with an exact match.
553
+ const initialResults = await fileSearch.search('*.js');
554
+ expect(initialResults).toEqual([
555
+ 'file1.js',
556
+ 'file2.js',
557
+ 'file3.js',
558
+ 'file4.js',
559
+ 'file5.js',
560
+ ]);
561
+
562
+ // 2. Perform the same search again, but this time with a maxResults limit.
563
+ const limitedResults = await fileSearch.search('*.js', { maxResults: 2 });
564
+
565
+ // 3. Assert that the maxResults limit was respected, even with a cache hit.
566
+ expect(limitedResults).toEqual(['file1.js', 'file2.js']);
567
+ });
568
+
569
+ describe('DirectoryFileSearch', () => {
570
+ it('should search for files in the current directory', async () => {
571
+ tmpDir = await createTmpDir({
572
+ 'file1.js': '',
573
+ 'file2.ts': '',
574
+ 'file3.js': '',
575
+ });
576
+
577
+ const fileSearch = FileSearchFactory.create({
578
+ projectRoot: tmpDir,
579
+ useGitignore: false,
580
+ useGeminiignore: false,
581
+ ignoreDirs: [],
582
+ cache: false,
583
+ cacheTtl: 0,
584
+ enableRecursiveFileSearch: false,
585
+ });
586
+
587
+ await fileSearch.initialize();
588
+ const results = await fileSearch.search('*.js');
589
+ expect(results).toEqual(['file1.js', 'file3.js']);
590
+ });
591
+
592
+ it('should search for files in a subdirectory', async () => {
593
+ tmpDir = await createTmpDir({
594
+ 'file1.js': '',
595
+ src: {
596
+ 'file2.js': '',
597
+ 'file3.ts': '',
598
+ },
599
+ });
600
+
601
+ const fileSearch = FileSearchFactory.create({
602
+ projectRoot: tmpDir,
603
+ useGitignore: false,
604
+ useGeminiignore: false,
605
+ ignoreDirs: [],
606
+ cache: false,
607
+ cacheTtl: 0,
608
+ enableRecursiveFileSearch: false,
609
+ });
610
+
611
+ await fileSearch.initialize();
612
+ const results = await fileSearch.search('src/*.js');
613
+ expect(results).toEqual(['src/file2.js']);
614
+ });
615
+
616
+ it('should list all files in a directory', async () => {
617
+ tmpDir = await createTmpDir({
618
+ 'file1.js': '',
619
+ src: {
620
+ 'file2.js': '',
621
+ 'file3.ts': '',
622
+ },
623
+ });
624
+
625
+ const fileSearch = FileSearchFactory.create({
626
+ projectRoot: tmpDir,
627
+ useGitignore: false,
628
+ useGeminiignore: false,
629
+ ignoreDirs: [],
630
+ cache: false,
631
+ cacheTtl: 0,
632
+ enableRecursiveFileSearch: false,
633
+ });
634
+
635
+ await fileSearch.initialize();
636
+ const results = await fileSearch.search('src/');
637
+ expect(results).toEqual(['src/file2.js', 'src/file3.ts']);
638
+ });
639
+
640
+ it('should respect ignore rules', async () => {
641
+ tmpDir = await createTmpDir({
642
+ '.gitignore': '*.js',
643
+ 'file1.js': '',
644
+ 'file2.ts': '',
645
+ });
646
+
647
+ const fileSearch = FileSearchFactory.create({
648
+ projectRoot: tmpDir,
649
+ useGitignore: true,
650
+ useGeminiignore: false,
651
+ ignoreDirs: [],
652
+ cache: false,
653
+ cacheTtl: 0,
654
+ enableRecursiveFileSearch: false,
655
+ });
656
+
657
+ await fileSearch.initialize();
658
+ const results = await fileSearch.search('*');
659
+ expect(results).toEqual(['.gitignore', 'file2.ts']);
660
+ });
661
+ });
662
+ });
projects/ui/qwen-code/packages/core/src/utils/filesearch/fileSearch.ts ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import path from 'node:path';
8
+ import picomatch from 'picomatch';
9
+ import { Ignore, loadIgnoreRules } from './ignore.js';
10
+ import { ResultCache } from './result-cache.js';
11
+ import { crawl } from './crawler.js';
12
+ import { AsyncFzf, FzfResultItem } from 'fzf';
13
+
14
+ export interface FileSearchOptions {
15
+ projectRoot: string;
16
+ ignoreDirs: string[];
17
+ useGitignore: boolean;
18
+ useGeminiignore: boolean;
19
+ cache: boolean;
20
+ cacheTtl: number;
21
+ enableRecursiveFileSearch: boolean;
22
+ maxDepth?: number;
23
+ }
24
+
25
+ export class AbortError extends Error {
26
+ constructor(message = 'Search aborted') {
27
+ super(message);
28
+ this.name = 'AbortError';
29
+ }
30
+ }
31
+
32
+ /**
33
+ * Filters a list of paths based on a given pattern.
34
+ * @param allPaths The list of all paths to filter.
35
+ * @param pattern The picomatch pattern to filter by.
36
+ * @param signal An AbortSignal to cancel the operation.
37
+ * @returns A promise that resolves to the filtered and sorted list of paths.
38
+ */
39
+ export async function filter(
40
+ allPaths: string[],
41
+ pattern: string,
42
+ signal: AbortSignal | undefined,
43
+ ): Promise<string[]> {
44
+ const patternFilter = picomatch(pattern, {
45
+ dot: true,
46
+ contains: true,
47
+ nocase: true,
48
+ });
49
+
50
+ const results: string[] = [];
51
+ for (const [i, p] of allPaths.entries()) {
52
+ // Yield control to the event loop periodically to prevent blocking.
53
+ if (i % 1000 === 0) {
54
+ await new Promise((resolve) => setImmediate(resolve));
55
+ if (signal?.aborted) {
56
+ throw new AbortError();
57
+ }
58
+ }
59
+
60
+ if (patternFilter(p)) {
61
+ results.push(p);
62
+ }
63
+ }
64
+
65
+ results.sort((a, b) => {
66
+ const aIsDir = a.endsWith('/');
67
+ const bIsDir = b.endsWith('/');
68
+
69
+ if (aIsDir && !bIsDir) return -1;
70
+ if (!aIsDir && bIsDir) return 1;
71
+
72
+ // This is 40% faster than localeCompare and the only thing we would really
73
+ // gain from localeCompare is case-sensitive sort
74
+ return a < b ? -1 : a > b ? 1 : 0;
75
+ });
76
+
77
+ return results;
78
+ }
79
+
80
+ export interface SearchOptions {
81
+ signal?: AbortSignal;
82
+ maxResults?: number;
83
+ }
84
+
85
+ export interface FileSearch {
86
+ initialize(): Promise<void>;
87
+ search(pattern: string, options?: SearchOptions): Promise<string[]>;
88
+ }
89
+
90
+ class RecursiveFileSearch implements FileSearch {
91
+ private ignore: Ignore | undefined;
92
+ private resultCache: ResultCache | undefined;
93
+ private allFiles: string[] = [];
94
+ private fzf: AsyncFzf<string[]> | undefined;
95
+
96
+ constructor(private readonly options: FileSearchOptions) {}
97
+
98
+ async initialize(): Promise<void> {
99
+ this.ignore = loadIgnoreRules(this.options);
100
+ this.allFiles = await crawl({
101
+ crawlDirectory: this.options.projectRoot,
102
+ cwd: this.options.projectRoot,
103
+ ignore: this.ignore,
104
+ cache: this.options.cache,
105
+ cacheTtl: this.options.cacheTtl,
106
+ maxDepth: this.options.maxDepth,
107
+ });
108
+ this.buildResultCache();
109
+ }
110
+
111
+ async search(
112
+ pattern: string,
113
+ options: SearchOptions = {},
114
+ ): Promise<string[]> {
115
+ if (!this.resultCache || !this.fzf || !this.ignore) {
116
+ throw new Error('Engine not initialized. Call initialize() first.');
117
+ }
118
+
119
+ pattern = pattern || '*';
120
+
121
+ let filteredCandidates;
122
+ const { files: candidates, isExactMatch } =
123
+ await this.resultCache!.get(pattern);
124
+
125
+ if (isExactMatch) {
126
+ // Use the cached result.
127
+ filteredCandidates = candidates;
128
+ } else {
129
+ let shouldCache = true;
130
+ if (pattern.includes('*')) {
131
+ filteredCandidates = await filter(candidates, pattern, options.signal);
132
+ } else {
133
+ filteredCandidates = await this.fzf
134
+ .find(pattern)
135
+ .then((results: Array<FzfResultItem<string>>) =>
136
+ results.map((entry: FzfResultItem<string>) => entry.item),
137
+ )
138
+ .catch(() => {
139
+ shouldCache = false;
140
+ return [];
141
+ });
142
+ }
143
+
144
+ if (shouldCache) {
145
+ this.resultCache!.set(pattern, filteredCandidates);
146
+ }
147
+ }
148
+
149
+ const fileFilter = this.ignore.getFileFilter();
150
+ const results: string[] = [];
151
+ for (const [i, candidate] of filteredCandidates.entries()) {
152
+ if (i % 1000 === 0) {
153
+ await new Promise((resolve) => setImmediate(resolve));
154
+ if (options.signal?.aborted) {
155
+ throw new AbortError();
156
+ }
157
+ }
158
+
159
+ if (results.length >= (options.maxResults ?? Infinity)) {
160
+ break;
161
+ }
162
+ if (candidate === '.') {
163
+ continue;
164
+ }
165
+ if (!fileFilter(candidate)) {
166
+ results.push(candidate);
167
+ }
168
+ }
169
+ return results;
170
+ }
171
+
172
+ private buildResultCache(): void {
173
+ this.resultCache = new ResultCache(this.allFiles);
174
+ // The v1 algorithm is much faster since it only looks at the first
175
+ // occurence of the pattern. We use it for search spaces that have >20k
176
+ // files, because the v2 algorithm is just too slow in those cases.
177
+ this.fzf = new AsyncFzf(this.allFiles, {
178
+ fuzzy: this.allFiles.length > 20000 ? 'v1' : 'v2',
179
+ });
180
+ }
181
+ }
182
+
183
+ class DirectoryFileSearch implements FileSearch {
184
+ private ignore: Ignore | undefined;
185
+
186
+ constructor(private readonly options: FileSearchOptions) {}
187
+
188
+ async initialize(): Promise<void> {
189
+ this.ignore = loadIgnoreRules(this.options);
190
+ }
191
+
192
+ async search(
193
+ pattern: string,
194
+ options: SearchOptions = {},
195
+ ): Promise<string[]> {
196
+ if (!this.ignore) {
197
+ throw new Error('Engine not initialized. Call initialize() first.');
198
+ }
199
+ pattern = pattern || '*';
200
+
201
+ const dir = pattern.endsWith('/') ? pattern : path.dirname(pattern);
202
+ const results = await crawl({
203
+ crawlDirectory: path.join(this.options.projectRoot, dir),
204
+ cwd: this.options.projectRoot,
205
+ maxDepth: 0,
206
+ ignore: this.ignore,
207
+ cache: this.options.cache,
208
+ cacheTtl: this.options.cacheTtl,
209
+ });
210
+
211
+ const filteredResults = await filter(results, pattern, options.signal);
212
+
213
+ const fileFilter = this.ignore.getFileFilter();
214
+ const finalResults: string[] = [];
215
+ for (const candidate of filteredResults) {
216
+ if (finalResults.length >= (options.maxResults ?? Infinity)) {
217
+ break;
218
+ }
219
+ if (candidate === '.') {
220
+ continue;
221
+ }
222
+ if (!fileFilter(candidate)) {
223
+ finalResults.push(candidate);
224
+ }
225
+ }
226
+ return finalResults;
227
+ }
228
+ }
229
+
230
+ export class FileSearchFactory {
231
+ static create(options: FileSearchOptions): FileSearch {
232
+ if (options.enableRecursiveFileSearch) {
233
+ return new RecursiveFileSearch(options);
234
+ }
235
+ return new DirectoryFileSearch(options);
236
+ }
237
+ }
projects/ui/qwen-code/packages/core/src/utils/filesearch/ignore.test.ts ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { describe, it, expect, afterEach } from 'vitest';
8
+ import { Ignore, loadIgnoreRules } from './ignore.js';
9
+ import { createTmpDir, cleanupTmpDir } from '@qwen-code/qwen-code-test-utils';
10
+
11
+ describe('Ignore', () => {
12
+ describe('getDirectoryFilter', () => {
13
+ it('should ignore directories matching directory patterns', () => {
14
+ const ig = new Ignore().add(['foo/', 'bar/']);
15
+ const dirFilter = ig.getDirectoryFilter();
16
+ expect(dirFilter('foo/')).toBe(true);
17
+ expect(dirFilter('bar/')).toBe(true);
18
+ expect(dirFilter('baz/')).toBe(false);
19
+ });
20
+
21
+ it('should not ignore directories with file patterns', () => {
22
+ const ig = new Ignore().add(['foo.js', '*.log']);
23
+ const dirFilter = ig.getDirectoryFilter();
24
+ expect(dirFilter('foo.js')).toBe(false);
25
+ expect(dirFilter('foo.log')).toBe(false);
26
+ });
27
+ });
28
+
29
+ describe('getFileFilter', () => {
30
+ it('should not ignore files with directory patterns', () => {
31
+ const ig = new Ignore().add(['foo/', 'bar/']);
32
+ const fileFilter = ig.getFileFilter();
33
+ expect(fileFilter('foo')).toBe(false);
34
+ expect(fileFilter('foo/file.txt')).toBe(false);
35
+ });
36
+
37
+ it('should ignore files matching file patterns', () => {
38
+ const ig = new Ignore().add(['*.log', 'foo.js']);
39
+ const fileFilter = ig.getFileFilter();
40
+ expect(fileFilter('foo.log')).toBe(true);
41
+ expect(fileFilter('foo.js')).toBe(true);
42
+ expect(fileFilter('bar.txt')).toBe(false);
43
+ });
44
+ });
45
+
46
+ it('should accumulate patterns across multiple add() calls', () => {
47
+ const ig = new Ignore().add('foo.js');
48
+ ig.add('bar.js');
49
+ const fileFilter = ig.getFileFilter();
50
+ expect(fileFilter('foo.js')).toBe(true);
51
+ expect(fileFilter('bar.js')).toBe(true);
52
+ expect(fileFilter('baz.js')).toBe(false);
53
+ });
54
+
55
+ it('should return a stable and consistent fingerprint', () => {
56
+ const ig1 = new Ignore().add(['foo', '!bar']);
57
+ const ig2 = new Ignore().add('foo\n!bar');
58
+
59
+ // Fingerprints should be identical for the same rules.
60
+ expect(ig1.getFingerprint()).toBe(ig2.getFingerprint());
61
+
62
+ // Adding a new rule should change the fingerprint.
63
+ ig2.add('baz');
64
+ expect(ig1.getFingerprint()).not.toBe(ig2.getFingerprint());
65
+ });
66
+ });
67
+
68
+ describe('loadIgnoreRules', () => {
69
+ let tmpDir: string;
70
+
71
+ afterEach(async () => {
72
+ if (tmpDir) {
73
+ await cleanupTmpDir(tmpDir);
74
+ }
75
+ });
76
+
77
+ it('should load rules from .gitignore', async () => {
78
+ tmpDir = await createTmpDir({
79
+ '.gitignore': '*.log',
80
+ });
81
+ const ignore = loadIgnoreRules({
82
+ projectRoot: tmpDir,
83
+ useGitignore: true,
84
+ useGeminiignore: false,
85
+ ignoreDirs: [],
86
+ });
87
+ const fileFilter = ignore.getFileFilter();
88
+ expect(fileFilter('test.log')).toBe(true);
89
+ expect(fileFilter('test.txt')).toBe(false);
90
+ });
91
+
92
+ it('should load rules from .geminiignore', async () => {
93
+ tmpDir = await createTmpDir({
94
+ '.geminiignore': '*.log',
95
+ });
96
+ const ignore = loadIgnoreRules({
97
+ projectRoot: tmpDir,
98
+ useGitignore: false,
99
+ useGeminiignore: true,
100
+ ignoreDirs: [],
101
+ });
102
+ const fileFilter = ignore.getFileFilter();
103
+ expect(fileFilter('test.log')).toBe(true);
104
+ expect(fileFilter('test.txt')).toBe(false);
105
+ });
106
+
107
+ it('should combine rules from .gitignore and .geminiignore', async () => {
108
+ tmpDir = await createTmpDir({
109
+ '.gitignore': '*.log',
110
+ '.geminiignore': '*.txt',
111
+ });
112
+ const ignore = loadIgnoreRules({
113
+ projectRoot: tmpDir,
114
+ useGitignore: true,
115
+ useGeminiignore: true,
116
+ ignoreDirs: [],
117
+ });
118
+ const fileFilter = ignore.getFileFilter();
119
+ expect(fileFilter('test.log')).toBe(true);
120
+ expect(fileFilter('test.txt')).toBe(true);
121
+ expect(fileFilter('test.md')).toBe(false);
122
+ });
123
+
124
+ it('should add ignoreDirs', async () => {
125
+ tmpDir = await createTmpDir({});
126
+ const ignore = loadIgnoreRules({
127
+ projectRoot: tmpDir,
128
+ useGitignore: false,
129
+ useGeminiignore: false,
130
+ ignoreDirs: ['logs/'],
131
+ });
132
+ const dirFilter = ignore.getDirectoryFilter();
133
+ expect(dirFilter('logs/')).toBe(true);
134
+ expect(dirFilter('src/')).toBe(false);
135
+ });
136
+
137
+ it('should handle missing ignore files gracefully', async () => {
138
+ tmpDir = await createTmpDir({});
139
+ const ignore = loadIgnoreRules({
140
+ projectRoot: tmpDir,
141
+ useGitignore: true,
142
+ useGeminiignore: true,
143
+ ignoreDirs: [],
144
+ });
145
+ const fileFilter = ignore.getFileFilter();
146
+ expect(fileFilter('anyfile.txt')).toBe(false);
147
+ });
148
+
149
+ it('should always add .git to the ignore list', async () => {
150
+ tmpDir = await createTmpDir({});
151
+ const ignore = loadIgnoreRules({
152
+ projectRoot: tmpDir,
153
+ useGitignore: false,
154
+ useGeminiignore: false,
155
+ ignoreDirs: [],
156
+ });
157
+ const dirFilter = ignore.getDirectoryFilter();
158
+ expect(dirFilter('.git/')).toBe(true);
159
+ });
160
+ });
projects/ui/qwen-code/packages/core/src/utils/filesearch/ignore.ts ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import fs from 'node:fs';
8
+ import path from 'node:path';
9
+ import ignore from 'ignore';
10
+ import picomatch from 'picomatch';
11
+
12
+ const hasFileExtension = picomatch('**/*[*.]*');
13
+
14
+ export interface LoadIgnoreRulesOptions {
15
+ projectRoot: string;
16
+ useGitignore: boolean;
17
+ useGeminiignore: boolean;
18
+ ignoreDirs: string[];
19
+ }
20
+
21
+ export function loadIgnoreRules(options: LoadIgnoreRulesOptions): Ignore {
22
+ const ignorer = new Ignore();
23
+ if (options.useGitignore) {
24
+ const gitignorePath = path.join(options.projectRoot, '.gitignore');
25
+ if (fs.existsSync(gitignorePath)) {
26
+ ignorer.add(fs.readFileSync(gitignorePath, 'utf8'));
27
+ }
28
+ }
29
+
30
+ if (options.useGeminiignore) {
31
+ const geminiignorePath = path.join(options.projectRoot, '.geminiignore');
32
+ if (fs.existsSync(geminiignorePath)) {
33
+ ignorer.add(fs.readFileSync(geminiignorePath, 'utf8'));
34
+ }
35
+ }
36
+
37
+ const ignoreDirs = ['.git', ...options.ignoreDirs];
38
+ ignorer.add(
39
+ ignoreDirs.map((dir) => {
40
+ if (dir.endsWith('/')) {
41
+ return dir;
42
+ }
43
+ return `${dir}/`;
44
+ }),
45
+ );
46
+
47
+ return ignorer;
48
+ }
49
+
50
+ export class Ignore {
51
+ private readonly allPatterns: string[] = [];
52
+ private dirIgnorer = ignore();
53
+ private fileIgnorer = ignore();
54
+
55
+ /**
56
+ * Adds one or more ignore patterns.
57
+ * @param patterns A single pattern string or an array of pattern strings.
58
+ * Each pattern can be a glob-like string similar to .gitignore rules.
59
+ * @returns The `Ignore` instance for chaining.
60
+ */
61
+ add(patterns: string | string[]): this {
62
+ if (typeof patterns === 'string') {
63
+ patterns = patterns.split(/\r?\n/);
64
+ }
65
+
66
+ for (const p of patterns) {
67
+ const pattern = p.trim();
68
+
69
+ if (pattern === '' || pattern.startsWith('#')) {
70
+ continue;
71
+ }
72
+
73
+ this.allPatterns.push(pattern);
74
+
75
+ const isPositiveDirPattern =
76
+ pattern.endsWith('/') && !pattern.startsWith('!');
77
+
78
+ if (isPositiveDirPattern) {
79
+ this.dirIgnorer.add(pattern);
80
+ } else {
81
+ // An ambiguous pattern (e.g., "build") could match a file or a
82
+ // directory. To optimize the file system crawl, we use a heuristic:
83
+ // patterns without a dot in the last segment are included in the
84
+ // directory exclusion check.
85
+ //
86
+ // This heuristic can fail. For example, an ignore pattern of "my.assets"
87
+ // intended to exclude a directory will not be treated as a directory
88
+ // pattern because it contains a ".". This results in crawling a
89
+ // directory that should have been excluded, reducing efficiency.
90
+ // Correctness is still maintained. The incorrectly crawled directory
91
+ // will be filtered out by the final ignore check.
92
+ //
93
+ // For maximum crawl efficiency, users should explicitly mark directory
94
+ // patterns with a trailing slash (e.g., "my.assets/").
95
+ this.fileIgnorer.add(pattern);
96
+ if (!hasFileExtension(pattern)) {
97
+ this.dirIgnorer.add(pattern);
98
+ }
99
+ }
100
+ }
101
+
102
+ return this;
103
+ }
104
+
105
+ /**
106
+ * Returns a predicate that matches explicit directory ignore patterns (patterns ending with '/').
107
+ * @returns {(dirPath: string) => boolean}
108
+ */
109
+ getDirectoryFilter(): (dirPath: string) => boolean {
110
+ return (dirPath: string) => this.dirIgnorer.ignores(dirPath);
111
+ }
112
+
113
+ /**
114
+ * Returns a predicate that matches file ignore patterns (all patterns not ending with '/').
115
+ * Note: This may also match directories if a file pattern matches a directory name, but all explicit directory patterns are handled by getDirectoryFilter.
116
+ * @returns {(filePath: string) => boolean}
117
+ */
118
+ getFileFilter(): (filePath: string) => boolean {
119
+ return (filePath: string) => this.fileIgnorer.ignores(filePath);
120
+ }
121
+
122
+ /**
123
+ * Returns a string representing the current set of ignore patterns.
124
+ * This can be used to generate a unique identifier for the ignore configuration,
125
+ * useful for caching purposes.
126
+ * @returns A string fingerprint of the ignore patterns.
127
+ */
128
+ getFingerprint(): string {
129
+ return this.allPatterns.join('\n');
130
+ }
131
+ }
projects/ui/qwen-code/packages/core/src/utils/filesearch/result-cache.test.ts ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+
7
+ import { test, expect } from 'vitest';
8
+ import { ResultCache } from './result-cache.js';
9
+
10
+ test('ResultCache basic usage', async () => {
11
+ const files = [
12
+ 'foo.txt',
13
+ 'bar.js',
14
+ 'baz.md',
15
+ 'subdir/file.txt',
16
+ 'subdir/other.js',
17
+ 'subdir/nested/file.md',
18
+ ];
19
+ const cache = new ResultCache(files);
20
+ const { files: resultFiles, isExactMatch } = await cache.get('*.js');
21
+ expect(resultFiles).toEqual(files);
22
+ expect(isExactMatch).toBe(false);
23
+ });
24
+
25
+ test('ResultCache cache hit/miss', async () => {
26
+ const files = ['foo.txt', 'bar.js', 'baz.md'];
27
+ const cache = new ResultCache(files);
28
+ // First call: miss
29
+ const { files: result1Files, isExactMatch: isExactMatch1 } =
30
+ await cache.get('*.js');
31
+ expect(result1Files).toEqual(files);
32
+ expect(isExactMatch1).toBe(false);
33
+
34
+ // Simulate FileSearch applying the filter and setting the result
35
+ cache.set('*.js', ['bar.js']);
36
+
37
+ // Second call: hit
38
+ const { files: result2Files, isExactMatch: isExactMatch2 } =
39
+ await cache.get('*.js');
40
+ expect(result2Files).toEqual(['bar.js']);
41
+ expect(isExactMatch2).toBe(true);
42
+ });
43
+
44
+ test('ResultCache best base query', async () => {
45
+ const files = ['foo.txt', 'foobar.js', 'baz.md'];
46
+ const cache = new ResultCache(files);
47
+
48
+ // Cache a broader query
49
+ cache.set('foo', ['foo.txt', 'foobar.js']);
50
+
51
+ // Search for a more specific query that starts with the broader one
52
+ const { files: resultFiles, isExactMatch } = await cache.get('foobar');
53
+ expect(resultFiles).toEqual(['foo.txt', 'foobar.js']);
54
+ expect(isExactMatch).toBe(false);
55
+ });