| | import { PromptTemplate } from '@langchain/core/prompts'; |
| | import { BaseMessage, getBufferString } from '@langchain/core/messages'; |
| | import type { GraphEdge } from '@librechat/agents'; |
| |
|
| | const DEFAULT_PROMPT_TEMPLATE = `Based on the following conversation and analysis from previous agents, please provide your insights:\n\n{convo}\n\nPlease add your specific expertise and perspective to this discussion.`; |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | export async function createSequentialChainEdges( |
| | agentIds: string[], |
| | promptTemplate = DEFAULT_PROMPT_TEMPLATE, |
| | ): Promise<GraphEdge[]> { |
| | const edges: GraphEdge[] = []; |
| |
|
| | for (let i = 0; i < agentIds.length - 1; i++) { |
| | const fromAgent = agentIds[i]; |
| | const toAgent = agentIds[i + 1]; |
| |
|
| | edges.push({ |
| | from: fromAgent, |
| | to: toAgent, |
| | edgeType: 'direct', |
| | |
| | prompt: async (messages: BaseMessage[], startIndex: number) => { |
| | |
| | const runMessages = messages.slice(startIndex); |
| | const bufferString = getBufferString(runMessages); |
| | const template = PromptTemplate.fromTemplate(promptTemplate); |
| | const result = await template.invoke({ |
| | convo: bufferString, |
| | }); |
| | return result.value; |
| | }, |
| | |
| | excludeResults: true, |
| | description: `Sequential chain from ${fromAgent} to ${toAgent}`, |
| | }); |
| | } |
| |
|
| | return edges; |
| | } |
| |
|