Cuong2004 commited on
Commit
faf5d21
·
1 Parent(s): 5c04a26

Add detailed logging for prompts and input data in agent executor to improve debugging and traceability. This includes user text, guidelines count, and conversation context before sending requests to the LLM.

Browse files
Files changed (1) hide show
  1. src/agent/agent-executor.ts +43 -0
src/agent/agent-executor.ts CHANGED
@@ -167,6 +167,23 @@ Tạo response JSON (CHỈ JSON thuần, không có markdown):
167
  }
168
  }`;
169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  const generations = await this.llm._generate([prompt]);
171
  const response = generations.generations[0][0].text;
172
 
@@ -452,6 +469,32 @@ Hãy tạo response JSON với format sau (CHỈ JSON thuần, không có markdo
452
  }
453
  }`;
454
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
455
  const generations = await this.llm._generate([prompt]);
456
  const response = generations.generations[0][0].text;
457
 
 
167
  }
168
  }`;
169
 
170
+ // Log prompt and input data before sending to LLM
171
+ logger.info('='.repeat(80));
172
+ logger.info('[AGENT] PROMPT SENT TO LLM (Disease Info Query):');
173
+ logger.info(prompt);
174
+ logger.info('='.repeat(80));
175
+ logger.info('[AGENT] INPUT DATA SUMMARY (Disease Info Query):');
176
+ logger.info(`- User text: "${userText}"`);
177
+ logger.info(`- Guidelines count: ${guidelines.length}`);
178
+ if (guidelines.length > 0) {
179
+ guidelines.forEach((g, i) => {
180
+ const content = typeof g === 'string' ? g : (g.content || g.snippet || JSON.stringify(g));
181
+ logger.info(` ${i + 1}. Preview: ${content.substring(0, 200)}...`);
182
+ });
183
+ }
184
+ logger.info(`- Conversation context: ${conversationContext ? 'Yes' : 'No'}`);
185
+ logger.info('='.repeat(80));
186
+
187
  const generations = await this.llm._generate([prompt]);
188
  const response = generations.generations[0][0].text;
189
 
 
469
  }
470
  }`;
471
 
472
+ // Log prompt and input data before sending to LLM
473
+ logger.info('='.repeat(80));
474
+ logger.info('[AGENT] PROMPT SENT TO LLM:');
475
+ logger.info(prompt);
476
+ logger.info('='.repeat(80));
477
+ logger.info('[AGENT] INPUT DATA SUMMARY:');
478
+ logger.info(`- User text: "${userText}"`);
479
+ logger.info(`- CV results count: ${cvResult.top_conditions.length}`);
480
+ if (cvResult.top_conditions.length > 0) {
481
+ cvResult.top_conditions.forEach((c: any, i: number) => {
482
+ logger.info(` ${i + 1}. ${c.name}: ${(c.prob * 100).toFixed(1)}%`);
483
+ });
484
+ }
485
+ logger.info(`- Triage level: ${triageResult.triage}`);
486
+ logger.info(`- Triage reasoning: ${triageResult.reasoning || 'N/A'}`);
487
+ logger.info(`- Red flags: ${triageResult.red_flags?.join(', ') || 'None'}`);
488
+ logger.info(`- Guidelines count: ${guidelines.length}`);
489
+ if (guidelines.length > 0) {
490
+ guidelines.forEach((g, i) => {
491
+ const content = typeof g === 'string' ? g : (g.content || g.snippet || JSON.stringify(g));
492
+ logger.info(` ${i + 1}. Preview: ${content.substring(0, 200)}...`);
493
+ });
494
+ }
495
+ logger.info(`- Conversation context: ${conversationContext ? 'Yes' : 'No'}`);
496
+ logger.info('='.repeat(80));
497
+
498
  const generations = await this.llm._generate([prompt]);
499
  const response = generations.generations[0][0].text;
500