index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/tracers/event_stream.ts
import { BaseTracer, type Run } from "./base.js"; import { BaseCallbackHandler, BaseCallbackHandlerInput, } from "../callbacks/base.js"; import { IterableReadableStream } from "../utils/stream.js"; import { AIMessageChunk } from "../messages/ai.js"; import { ChatGeneration, Generation, GenerationChunk } from "../outputs.js"; import { BaseMessage } from "../messages/base.js"; /** * Data associated with a StreamEvent. */ export type StreamEventData = { /** * The input passed to the runnable that generated the event. * Inputs will sometimes be available at the *START* of the runnable, and * sometimes at the *END* of the runnable. * If a runnable is able to stream its inputs, then its input by definition * won't be known until the *END* of the runnable when it has finished streaming * its inputs. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any input?: any; /** * The output of the runnable that generated the event. * Outputs will only be available at the *END* of the runnable. * For most runnables, this field can be inferred from the `chunk` field, * though there might be some exceptions for special cased runnables (e.g., like * chat models), which may return more information. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any output?: any; /** * A streaming chunk from the output that generated the event. * chunks support addition in general, and adding them up should result * in the output of the runnable that generated the event. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any chunk?: any; }; /** * A streaming event. * * Schema of a streaming event which is produced from the streamEvents method. */ export type StreamEvent = { /** * Event names are of the format: on_[runnable_type]_(start|stream|end). * * Runnable types are one of: * - llm - used by non chat models * - chat_model - used by chat models * - prompt -- e.g., ChatPromptTemplate * - tool -- LangChain tools * - chain - most Runnables are of this type * * Further, the events are categorized as one of: * - start - when the runnable starts * - stream - when the runnable is streaming * - end - when the runnable ends * * start, stream and end are associated with slightly different `data` payload. * * Please see the documentation for `EventData` for more details. */ event: string; /** The name of the runnable that generated the event. */ name: string; /** * An randomly generated ID to keep track of the execution of the given runnable. * * Each child runnable that gets invoked as part of the execution of a parent runnable * is assigned its own unique ID. */ run_id: string; /** * Tags associated with the runnable that generated this event. * Tags are always inherited from parent runnables. */ tags?: string[]; /** Metadata associated with the runnable that generated this event. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any metadata: Record<string, any>; /** * Event data. * * The contents of the event data depend on the event type. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any data: StreamEventData; }; type RunInfo = { name: string; tags: string[]; // eslint-disable-next-line @typescript-eslint/no-explicit-any metadata: Record<string, any>; runType: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any inputs?: Record<string, any>; }; export interface EventStreamCallbackHandlerInput extends BaseCallbackHandlerInput { autoClose?: boolean; includeNames?: string[]; includeTypes?: string[]; includeTags?: string[]; excludeNames?: string[]; excludeTypes?: string[]; excludeTags?: string[]; } function assignName({ name, serialized, }: { name?: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any serialized?: Record<string, any>; }): string { if (name !== undefined) { return name; } if (serialized?.name !== undefined) { return serialized.name; } else if (serialized?.id !== undefined && Array.isArray(serialized?.id)) { return serialized.id[serialized.id.length - 1]; } return "Unnamed"; } export const isStreamEventsHandler = ( handler: BaseCallbackHandler ): handler is EventStreamCallbackHandler => handler.name === "event_stream_tracer"; /** * Class that extends the `BaseTracer` class from the * `langchain.callbacks.tracers.base` module. It represents a callback * handler that logs the execution of runs and emits `RunLog` instances to a * `RunLogStream`. */ export class EventStreamCallbackHandler extends BaseTracer { protected autoClose = true; protected includeNames?: string[]; protected includeTypes?: string[]; protected includeTags?: string[]; protected excludeNames?: string[]; protected excludeTypes?: string[]; protected excludeTags?: string[]; private runInfoMap: Map<string, RunInfo> = new Map(); private tappedPromises: Map<string, Promise<void>> = new Map(); protected transformStream: TransformStream; public writer: WritableStreamDefaultWriter; public receiveStream: IterableReadableStream<StreamEvent>; name = "event_stream_tracer"; constructor(fields?: EventStreamCallbackHandlerInput) { super({ _awaitHandler: true, ...fields }); this.autoClose = fields?.autoClose ?? true; this.includeNames = fields?.includeNames; this.includeTypes = fields?.includeTypes; this.includeTags = fields?.includeTags; this.excludeNames = fields?.excludeNames; this.excludeTypes = fields?.excludeTypes; this.excludeTags = fields?.excludeTags; this.transformStream = new TransformStream(); this.writer = this.transformStream.writable.getWriter(); this.receiveStream = IterableReadableStream.fromReadableStream( this.transformStream.readable ); } [Symbol.asyncIterator]() { return this.receiveStream; } protected async persistRun(_run: Run): Promise<void> { // This is a legacy method only called once for an entire run tree // and is therefore not useful here } _includeRun(run: RunInfo): boolean { const runTags = run.tags ?? []; let include = this.includeNames === undefined && this.includeTags === undefined && this.includeTypes === undefined; if (this.includeNames !== undefined) { include = include || this.includeNames.includes(run.name); } if (this.includeTypes !== undefined) { include = include || this.includeTypes.includes(run.runType); } if (this.includeTags !== undefined) { include = include || runTags.find((tag) => this.includeTags?.includes(tag)) !== undefined; } if (this.excludeNames !== undefined) { include = include && !this.excludeNames.includes(run.name); } if (this.excludeTypes !== undefined) { include = include && !this.excludeTypes.includes(run.runType); } if (this.excludeTags !== undefined) { include = include && runTags.every((tag) => !this.excludeTags?.includes(tag)); } return include; } async *tapOutputIterable<T>( runId: string, outputStream: AsyncGenerator<T> ): AsyncGenerator<T> { const firstChunk = await outputStream.next(); if (firstChunk.done) { return; } const runInfo = this.runInfoMap.get(runId); // Run has finished, don't issue any stream events. // An example of this is for runnables that use the default // implementation of .stream(), which delegates to .invoke() // and calls .onChainEnd() before passing it to the iterator. if (runInfo === undefined) { yield firstChunk.value; return; } // Match format from handlers below function _formatOutputChunk(eventType: string, data: unknown) { if (eventType === "llm" && typeof data === "string") { return new GenerationChunk({ text: data }); } return data; } let tappedPromise = this.tappedPromises.get(runId); // if we are the first to tap, issue stream events if (tappedPromise === undefined) { let tappedPromiseResolver; tappedPromise = new Promise((resolve) => { tappedPromiseResolver = resolve; }); this.tappedPromises.set(runId, tappedPromise); try { const event: StreamEvent = { event: `on_${runInfo.runType}_stream`, run_id: runId, name: runInfo.name, tags: runInfo.tags, metadata: runInfo.metadata, data: {}, }; await this.send( { ...event, data: { chunk: _formatOutputChunk(runInfo.runType, firstChunk.value), }, }, runInfo ); yield firstChunk.value; for await (const chunk of outputStream) { // Don't yield tool and retriever stream events if (runInfo.runType !== "tool" && runInfo.runType !== "retriever") { await this.send( { ...event, data: { chunk: _formatOutputChunk(runInfo.runType, chunk), }, }, runInfo ); } yield chunk; } } finally { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion tappedPromiseResolver!(); // Don't delete from the promises map to keep track of which runs have been tapped. } } else { // otherwise just pass through yield firstChunk.value; for await (const chunk of outputStream) { yield chunk; } } } async send(payload: StreamEvent, run: RunInfo) { if (this._includeRun(run)) { await this.writer.write(payload); } } async sendEndEvent(payload: StreamEvent, run: RunInfo) { const tappedPromise = this.tappedPromises.get(payload.run_id); if (tappedPromise !== undefined) { void tappedPromise.then(() => { void this.send(payload, run); }); } else { await this.send(payload, run); } } async onLLMStart(run: Run): Promise<void> { const runName = assignName(run); const runType = run.inputs.messages !== undefined ? "chat_model" : "llm"; const runInfo = { tags: run.tags ?? [], metadata: run.extra?.metadata ?? {}, name: runName, runType, inputs: run.inputs, }; this.runInfoMap.set(run.id, runInfo); const eventName = `on_${runType}_start`; await this.send( { event: eventName, data: { input: run.inputs, }, name: runName, tags: run.tags ?? [], run_id: run.id, metadata: run.extra?.metadata ?? {}, }, runInfo ); } async onLLMNewToken( run: Run, token: string, // eslint-disable-next-line @typescript-eslint/no-explicit-any kwargs?: { chunk: any } ): Promise<void> { const runInfo = this.runInfoMap.get(run.id); let chunk; let eventName; if (runInfo === undefined) { throw new Error(`onLLMNewToken: Run ID ${run.id} not found in run map.`); } // Top-level streaming events are covered by tapOutputIterable if (this.runInfoMap.size === 1) { return; } if (runInfo.runType === "chat_model") { eventName = "on_chat_model_stream"; if (kwargs?.chunk === undefined) { chunk = new AIMessageChunk({ content: token, id: `run-${run.id}` }); } else { chunk = kwargs.chunk.message; } } else if (runInfo.runType === "llm") { eventName = "on_llm_stream"; if (kwargs?.chunk === undefined) { chunk = new GenerationChunk({ text: token }); } else { chunk = kwargs.chunk; } } else { throw new Error(`Unexpected run type ${runInfo.runType}`); } await this.send( { event: eventName, data: { chunk, }, run_id: run.id, name: runInfo.name, tags: runInfo.tags, metadata: runInfo.metadata, }, runInfo ); } async onLLMEnd(run: Run): Promise<void> { const runInfo = this.runInfoMap.get(run.id); this.runInfoMap.delete(run.id); let eventName: string; if (runInfo === undefined) { throw new Error(`onLLMEnd: Run ID ${run.id} not found in run map.`); } const generations: ChatGeneration[][] | Generation[][] | undefined = run.outputs?.generations; // eslint-disable-next-line @typescript-eslint/no-explicit-any let output: BaseMessage | Record<string, any> | undefined; if (runInfo.runType === "chat_model") { for (const generation of generations ?? []) { if (output !== undefined) { break; } output = (generation[0] as ChatGeneration | undefined)?.message; } eventName = "on_chat_model_end"; } else if (runInfo.runType === "llm") { output = { generations: generations?.map((generation) => { return generation.map((chunk) => { return { text: chunk.text, generationInfo: chunk.generationInfo, }; }); }), llmOutput: run.outputs?.llmOutput ?? {}, }; eventName = "on_llm_end"; } else { throw new Error(`onLLMEnd: Unexpected run type: ${runInfo.runType}`); } await this.sendEndEvent( { event: eventName, data: { output, input: runInfo.inputs, }, run_id: run.id, name: runInfo.name, tags: runInfo.tags, metadata: runInfo.metadata, }, runInfo ); } async onChainStart(run: Run): Promise<void> { const runName = assignName(run); const runType = run.run_type ?? "chain"; const runInfo: RunInfo = { tags: run.tags ?? [], metadata: run.extra?.metadata ?? {}, name: runName, runType: run.run_type, }; let eventData: StreamEventData = {}; // Workaround Runnable core code not sending input when transform streaming. if (run.inputs.input === "" && Object.keys(run.inputs).length === 1) { eventData = {}; runInfo.inputs = {}; } else if (run.inputs.input !== undefined) { eventData.input = run.inputs.input; runInfo.inputs = run.inputs.input; } else { eventData.input = run.inputs; runInfo.inputs = run.inputs; } this.runInfoMap.set(run.id, runInfo); await this.send( { event: `on_${runType}_start`, data: eventData, name: runName, tags: run.tags ?? [], run_id: run.id, metadata: run.extra?.metadata ?? {}, }, runInfo ); } async onChainEnd(run: Run): Promise<void> { const runInfo = this.runInfoMap.get(run.id); this.runInfoMap.delete(run.id); if (runInfo === undefined) { throw new Error(`onChainEnd: Run ID ${run.id} not found in run map.`); } const eventName = `on_${run.run_type}_end`; const inputs = run.inputs ?? runInfo.inputs ?? {}; const outputs = run.outputs?.output ?? run.outputs; const data: StreamEventData = { output: outputs, input: inputs, }; if (inputs.input && Object.keys(inputs).length === 1) { data.input = inputs.input; runInfo.inputs = inputs.input; } await this.sendEndEvent( { event: eventName, data, run_id: run.id, name: runInfo.name, tags: runInfo.tags, metadata: runInfo.metadata ?? {}, }, runInfo ); } async onToolStart(run: Run): Promise<void> { const runName = assignName(run); const runInfo = { tags: run.tags ?? [], metadata: run.extra?.metadata ?? {}, name: runName, runType: "tool", inputs: run.inputs ?? {}, }; this.runInfoMap.set(run.id, runInfo); await this.send( { event: "on_tool_start", data: { input: run.inputs ?? {}, }, name: runName, run_id: run.id, tags: run.tags ?? [], metadata: run.extra?.metadata ?? {}, }, runInfo ); } async onToolEnd(run: Run): Promise<void> { const runInfo = this.runInfoMap.get(run.id); this.runInfoMap.delete(run.id); if (runInfo === undefined) { throw new Error(`onToolEnd: Run ID ${run.id} not found in run map.`); } if (runInfo.inputs === undefined) { throw new Error( `onToolEnd: Run ID ${run.id} is a tool call, and is expected to have traced inputs.` ); } const output = run.outputs?.output === undefined ? run.outputs : run.outputs.output; await this.sendEndEvent( { event: "on_tool_end", data: { output, input: runInfo.inputs, }, run_id: run.id, name: runInfo.name, tags: runInfo.tags, metadata: runInfo.metadata, }, runInfo ); } async onRetrieverStart(run: Run): Promise<void> { const runName = assignName(run); const runType = "retriever"; const runInfo = { tags: run.tags ?? [], metadata: run.extra?.metadata ?? {}, name: runName, runType, inputs: { query: run.inputs.query, }, }; this.runInfoMap.set(run.id, runInfo); await this.send( { event: "on_retriever_start", data: { input: { query: run.inputs.query, }, }, name: runName, tags: run.tags ?? [], run_id: run.id, metadata: run.extra?.metadata ?? {}, }, runInfo ); } async onRetrieverEnd(run: Run): Promise<void> { const runInfo = this.runInfoMap.get(run.id); this.runInfoMap.delete(run.id); if (runInfo === undefined) { throw new Error(`onRetrieverEnd: Run ID ${run.id} not found in run map.`); } await this.sendEndEvent( { event: "on_retriever_end", data: { output: run.outputs?.documents ?? run.outputs, input: runInfo.inputs, }, run_id: run.id, name: runInfo.name, tags: runInfo.tags, metadata: runInfo.metadata, }, runInfo ); } // eslint-disable-next-line @typescript-eslint/no-explicit-any async handleCustomEvent(eventName: string, data: any, runId: string) { const runInfo = this.runInfoMap.get(runId); if (runInfo === undefined) { throw new Error( `handleCustomEvent: Run ID ${runId} not found in run map.` ); } await this.send( { event: "on_custom_event", run_id: runId, name: eventName, tags: runInfo.tags, metadata: runInfo.metadata, data, }, runInfo ); } async finish() { const pendingPromises = [...this.tappedPromises.values()]; void Promise.all(pendingPromises).finally(() => { void this.writer.close(); }); } }
0
lc_public_repos/langchainjs/langchain-core/src/tracers
lc_public_repos/langchainjs/langchain-core/src/tracers/tests/langsmith_interop.test.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ /* eslint-disable no-process-env */ /* eslint-disable no-promise-executor-return */ import { jest, test, beforeEach, afterEach, afterAll, expect, } from "@jest/globals"; import { traceable } from "langsmith/traceable"; import { Client } from "langsmith"; import { RunnableLambda } from "../../runnables/base.js"; import { BaseMessage, HumanMessage } from "../../messages/index.js"; import { setDefaultLangChainClientSingleton } from "../../singletons/tracer.js"; let fetchMock: any; const originalTracingEnvValue = process.env.LANGCHAIN_TRACING_V2; const client = new Client({ autoBatchTracing: false, }); beforeEach(() => { fetchMock = jest.spyOn(global, "fetch").mockImplementation(() => Promise.resolve({ ok: true, text: () => "", json: () => { return {}; }, } as any) ); setDefaultLangChainClientSingleton(client); process.env.LANGCHAIN_TRACING_V2 = "true"; }); afterEach(() => { jest.restoreAllMocks(); }); afterAll(() => { process.env.LANGCHAIN_TRACING_V2 = originalTracingEnvValue; }); test.each(["true", "false"])( "traceables nested within runnables with background callbacks %s", async (value) => { process.env.LANGCHAIN_CALLBACKS_BACKGROUND = value; const aiGreet = traceable( async (msg: BaseMessage, name = "world") => { await new Promise((resolve) => setTimeout(resolve, 300)); return msg.content + name; }, { name: "aiGreet", tracingEnabled: true, client } ); const root = RunnableLambda.from(async (messages: BaseMessage[]) => { const lastMsg = messages.at(-1) as HumanMessage; const greetOne = await aiGreet(lastMsg, "David"); return [greetOne]; }); await root.invoke([new HumanMessage({ content: "Hello!" })]); const relevantCalls = fetchMock.mock.calls.filter((call: any) => { return call[0].startsWith("https://api.smith.langchain.com/runs"); }); expect(relevantCalls.length).toEqual(4); const firstCallParams = JSON.parse((relevantCalls[0][1] as any).body); const secondCallParams = JSON.parse((relevantCalls[1][1] as any).body); const thirdCallParams = JSON.parse((relevantCalls[2][1] as any).body); const fourthCallParams = JSON.parse((relevantCalls[3][1] as any).body); expect(firstCallParams).toMatchObject({ id: firstCallParams.id, name: "RunnableLambda", start_time: expect.any(Number), serialized: { lc: 1, type: "not_implemented", id: ["langchain_core", "runnables", "RunnableLambda"], }, events: [{ name: "start", time: expect.any(String) }], inputs: { input: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, ], }, execution_order: 1, child_execution_order: 1, run_type: "chain", extra: expect.any(Object), tags: [], trace_id: firstCallParams.id, dotted_order: expect.any(String), }); expect(secondCallParams).toMatchObject({ id: expect.any(String), name: "aiGreet", start_time: expect.any(Number), run_type: "chain", extra: expect.any(Object), serialized: {}, inputs: { args: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, "David", ], }, child_runs: [], parent_run_id: firstCallParams.id, trace_id: firstCallParams.id, dotted_order: expect.stringContaining(`${firstCallParams.dotted_order}.`), tags: [], }); expect(thirdCallParams).toMatchObject({ end_time: expect.any(Number), inputs: { args: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, "David", ], }, outputs: { outputs: "Hello!David" }, parent_run_id: firstCallParams.id, extra: expect.any(Object), dotted_order: secondCallParams.dotted_order, trace_id: firstCallParams.id, tags: [], }); expect(fourthCallParams).toMatchObject({ end_time: expect.any(Number), outputs: { output: ["Hello!David"] }, events: [ { name: "start", time: expect.any(String) }, { name: "end", time: expect.any(String) }, ], inputs: { input: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, ], }, trace_id: firstCallParams.id, dotted_order: firstCallParams.dotted_order, }); } ); test.each(["true", "false"])( "traceables nested within runnables with a context var set and with background callbacks %s", async (value) => { const { setContextVariable, getContextVariable } = await import( "../../context.js" ); process.env.LANGCHAIN_CALLBACKS_BACKGROUND = value; setContextVariable("foo", "bar"); const aiGreet = traceable( async (msg: BaseMessage, name = "world") => { await new Promise((resolve) => setTimeout(resolve, 300)); expect(getContextVariable("foo")).toEqual("baz"); return msg.content + name; }, { name: "aiGreet", tracingEnabled: true, client } ); const root = RunnableLambda.from(async (messages: BaseMessage[]) => { const lastMsg = messages.at(-1) as HumanMessage; expect(getContextVariable("foo")).toEqual("bar"); setContextVariable("foo", "baz"); const greetOne = await aiGreet(lastMsg, "David"); return [greetOne]; }); await root.invoke([new HumanMessage({ content: "Hello!" })]); const relevantCalls = fetchMock.mock.calls.filter((call: any) => { return call[0].startsWith("https://api.smith.langchain.com/runs"); }); expect(relevantCalls.length).toEqual(4); const firstCallParams = JSON.parse((relevantCalls[0][1] as any).body); const secondCallParams = JSON.parse((relevantCalls[1][1] as any).body); const thirdCallParams = JSON.parse((relevantCalls[2][1] as any).body); const fourthCallParams = JSON.parse((relevantCalls[3][1] as any).body); expect(firstCallParams).toMatchObject({ id: firstCallParams.id, name: "RunnableLambda", start_time: expect.any(Number), serialized: { lc: 1, type: "not_implemented", id: ["langchain_core", "runnables", "RunnableLambda"], }, events: [{ name: "start", time: expect.any(String) }], inputs: { input: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, ], }, execution_order: 1, child_execution_order: 1, run_type: "chain", extra: expect.any(Object), tags: [], trace_id: firstCallParams.id, dotted_order: expect.any(String), }); expect(secondCallParams).toMatchObject({ id: expect.any(String), name: "aiGreet", start_time: expect.any(Number), run_type: "chain", extra: expect.any(Object), serialized: {}, inputs: { args: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, "David", ], }, child_runs: [], parent_run_id: firstCallParams.id, trace_id: firstCallParams.id, dotted_order: expect.stringContaining(`${firstCallParams.dotted_order}.`), tags: [], }); expect(thirdCallParams).toMatchObject({ end_time: expect.any(Number), inputs: { args: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, "David", ], }, outputs: { outputs: "Hello!David" }, parent_run_id: firstCallParams.id, extra: expect.any(Object), dotted_order: secondCallParams.dotted_order, trace_id: firstCallParams.id, tags: [], }); expect(fourthCallParams).toMatchObject({ end_time: expect.any(Number), outputs: { output: ["Hello!David"] }, events: [ { name: "start", time: expect.any(String) }, { name: "end", time: expect.any(String) }, ], inputs: { input: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, ], }, trace_id: firstCallParams.id, dotted_order: firstCallParams.dotted_order, }); } ); test.each(["true", "false"])( "streaming traceables nested within runnables with background callbacks %s", async (value) => { process.env.LANGCHAIN_CALLBACKS_BACKGROUND = value; const aiGreet = traceable( async function* (msg: BaseMessage, name = "world") { const res = msg.content + name; await new Promise((resolve) => setTimeout(resolve, 300)); for (const letter of res.split("")) { yield letter; } }, { name: "aiGreet" } ); const root = RunnableLambda.from(async function* (messages: BaseMessage[]) { const lastMsg = messages.at(-1) as HumanMessage; yield* aiGreet(lastMsg, "David"); }); const stream = await root.stream([new HumanMessage({ content: "Hello!" })]); for await (const _ of stream) { // Just consume iterator } const relevantCalls = fetchMock.mock.calls.filter((call: any) => { return call[0].startsWith("https://api.smith.langchain.com/runs"); }); expect(relevantCalls.length).toEqual(4); const firstCallParams = JSON.parse((relevantCalls[0][1] as any).body); const secondCallParams = JSON.parse((relevantCalls[1][1] as any).body); const thirdCallParams = JSON.parse((relevantCalls[2][1] as any).body); const fourthCallParams = JSON.parse((relevantCalls[3][1] as any).body); expect(firstCallParams).toMatchObject({ id: firstCallParams.id, name: "RunnableLambda", start_time: expect.any(Number), serialized: { lc: 1, type: "not_implemented", id: ["langchain_core", "runnables", "RunnableLambda"], }, events: [{ name: "start", time: expect.any(String) }], inputs: { input: "", }, execution_order: 1, child_execution_order: 1, run_type: "chain", extra: expect.any(Object), tags: [], trace_id: firstCallParams.id, dotted_order: expect.any(String), }); expect(secondCallParams).toMatchObject({ id: expect.any(String), name: "aiGreet", start_time: expect.any(Number), run_type: "chain", extra: expect.any(Object), serialized: {}, inputs: { args: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, "David", ], }, child_runs: [], parent_run_id: firstCallParams.id, trace_id: firstCallParams.id, dotted_order: expect.stringContaining(`${firstCallParams.dotted_order}.`), tags: [], }); expect(thirdCallParams).toMatchObject({ end_time: expect.any(Number), inputs: { args: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, "David", ], }, outputs: { outputs: ["H", "e", "l", "l", "o", "!", "D", "a", "v", "i", "d"], }, parent_run_id: firstCallParams.id, extra: expect.any(Object), dotted_order: secondCallParams.dotted_order, trace_id: firstCallParams.id, tags: [], }); expect(fourthCallParams).toMatchObject({ end_time: expect.any(Number), outputs: { output: "Hello!David" }, events: [ { name: "start", time: expect.any(String) }, { name: "end", time: expect.any(String) }, ], inputs: { input: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, ], }, trace_id: firstCallParams.id, dotted_order: firstCallParams.dotted_order, }); } ); test.each(["true", "false"])( "runnables nested within traceables with background callbacks %s", async (value) => { process.env.LANGCHAIN_CALLBACKS_BACKGROUND = value; const nested = RunnableLambda.from(async (messages: BaseMessage[]) => { const lastMsg = messages.at(-1) as HumanMessage; await new Promise((resolve) => setTimeout(resolve, 300)); return [lastMsg.content]; }); const aiGreet = traceable( async (msg: BaseMessage, name = "world") => { const contents = await nested.invoke([msg]); return contents[0] + name; }, { name: "aiGreet", tracingEnabled: true, client } ); await aiGreet(new HumanMessage({ content: "Hello!" }), "mitochondria"); const relevantCalls = fetchMock.mock.calls.filter((call: any) => { return call[0].startsWith("https://api.smith.langchain.com/runs"); }); expect(relevantCalls.length).toEqual(4); const firstCallParams = JSON.parse((relevantCalls[0][1] as any).body); const secondCallParams = JSON.parse((relevantCalls[1][1] as any).body); const thirdCallParams = JSON.parse((relevantCalls[2][1] as any).body); const fourthCallParams = JSON.parse((relevantCalls[3][1] as any).body); expect(firstCallParams).toMatchObject({ id: firstCallParams.id, name: "aiGreet", start_time: expect.any(Number), run_type: "chain", extra: expect.any(Object), serialized: {}, inputs: { args: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, "mitochondria", ], }, child_runs: [], trace_id: firstCallParams.id, dotted_order: firstCallParams.dotted_order, tags: [], }); expect(secondCallParams).toMatchObject({ id: secondCallParams.id, name: "RunnableLambda", parent_run_id: firstCallParams.id, start_time: expect.any(Number), serialized: { lc: 1, type: "not_implemented", id: ["langchain_core", "runnables", "RunnableLambda"], }, events: [{ name: "start", time: expect.any(String) }], inputs: { input: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, ], }, execution_order: 2, child_execution_order: 2, run_type: "chain", extra: expect.any(Object), tags: [], trace_id: firstCallParams.id, dotted_order: expect.stringContaining(`${firstCallParams.dotted_order}.`), }); expect(thirdCallParams).toMatchObject({ end_time: expect.any(Number), outputs: { output: ["Hello!"] }, events: [ { name: "start", time: expect.any(String) }, { name: "end", time: expect.any(String) }, ], inputs: { input: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, ], }, trace_id: firstCallParams.id, dotted_order: expect.stringContaining(`${firstCallParams.dotted_order}.`), parent_run_id: firstCallParams.id, }); expect(fourthCallParams).toMatchObject({ end_time: expect.any(Number), inputs: { args: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, "mitochondria", ], }, outputs: { outputs: "Hello!mitochondria" }, extra: expect.any(Object), dotted_order: firstCallParams.dotted_order, trace_id: firstCallParams.id, tags: [], }); } ); test.each(["true", "false"])( "runnables nested within traceables and a context var set with background callbacks %s", async (value) => { const { setContextVariable, getContextVariable } = await import( "../../context.js" ); process.env.LANGCHAIN_CALLBACKS_BACKGROUND = value; setContextVariable("foo", "bar"); const nested = RunnableLambda.from(async (messages: BaseMessage[]) => { const lastMsg = messages.at(-1) as HumanMessage; await new Promise((resolve) => setTimeout(resolve, 300)); expect(getContextVariable("foo")).toEqual("bar"); return [lastMsg.content]; }); const aiGreet = traceable( async (msg: BaseMessage, name = "world") => { const contents = await nested.invoke([msg]); expect(getContextVariable("foo")).toEqual("bar"); return contents[0] + name; }, { name: "aiGreet", tracingEnabled: true, client } ); await aiGreet(new HumanMessage({ content: "Hello!" }), "mitochondria"); const relevantCalls = fetchMock.mock.calls.filter((call: any) => { return call[0].startsWith("https://api.smith.langchain.com/runs"); }); expect(relevantCalls.length).toEqual(4); const firstCallParams = JSON.parse((relevantCalls[0][1] as any).body); const secondCallParams = JSON.parse((relevantCalls[1][1] as any).body); const thirdCallParams = JSON.parse((relevantCalls[2][1] as any).body); const fourthCallParams = JSON.parse((relevantCalls[3][1] as any).body); expect(firstCallParams).toMatchObject({ id: firstCallParams.id, name: "aiGreet", start_time: expect.any(Number), run_type: "chain", extra: expect.any(Object), serialized: {}, inputs: { args: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, "mitochondria", ], }, child_runs: [], trace_id: firstCallParams.id, dotted_order: firstCallParams.dotted_order, tags: [], }); expect(secondCallParams).toMatchObject({ id: secondCallParams.id, name: "RunnableLambda", parent_run_id: firstCallParams.id, start_time: expect.any(Number), serialized: { lc: 1, type: "not_implemented", id: ["langchain_core", "runnables", "RunnableLambda"], }, events: [{ name: "start", time: expect.any(String) }], inputs: { input: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, ], }, execution_order: 2, child_execution_order: 2, run_type: "chain", extra: expect.any(Object), tags: [], trace_id: firstCallParams.id, dotted_order: expect.stringContaining(`${firstCallParams.dotted_order}.`), }); expect(thirdCallParams).toMatchObject({ end_time: expect.any(Number), outputs: { output: ["Hello!"] }, events: [ { name: "start", time: expect.any(String) }, { name: "end", time: expect.any(String) }, ], inputs: { input: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, ], }, trace_id: firstCallParams.id, dotted_order: expect.stringContaining(`${firstCallParams.dotted_order}.`), parent_run_id: firstCallParams.id, }); expect(fourthCallParams).toMatchObject({ end_time: expect.any(Number), inputs: { args: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, "mitochondria", ], }, outputs: { outputs: "Hello!mitochondria" }, extra: expect.any(Object), dotted_order: firstCallParams.dotted_order, trace_id: firstCallParams.id, tags: [], }); } ); test.each(["true", "false"])( "streaming runnables nested within traceables with background callbacks %s", async (value) => { process.env.LANGCHAIN_CALLBACKS_BACKGROUND = value; const nested = RunnableLambda.from(async function* ( messages: BaseMessage[] ) { const lastMsg = messages.at(-1) as HumanMessage; await new Promise((resolve) => setTimeout(resolve, 300)); for (const letter of (lastMsg.content as string).split("")) { yield letter; } }); const aiGreet = traceable( async function* (msg: BaseMessage, name = "world") { for await (const chunk of await nested.stream([msg])) { yield chunk; } for (const letter of name.split("")) { yield letter; } }, { name: "aiGreet", tracingEnabled: true, client } ); for await (const _ of aiGreet( new HumanMessage({ content: "Hello!" }), "mitochondria" )) { // Just consume iterator } const relevantCalls = fetchMock.mock.calls.filter((call: any) => { return call[0].startsWith("https://api.smith.langchain.com/runs"); }); expect(relevantCalls.length).toEqual(4); const firstCallParams = JSON.parse((relevantCalls[0][1] as any).body); const secondCallParams = JSON.parse((relevantCalls[1][1] as any).body); const thirdCallParams = JSON.parse((relevantCalls[2][1] as any).body); const fourthCallParams = JSON.parse((relevantCalls[3][1] as any).body); expect(firstCallParams).toMatchObject({ id: firstCallParams.id, name: "aiGreet", start_time: expect.any(Number), run_type: "chain", extra: expect.any(Object), serialized: {}, inputs: { args: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, "mitochondria", ], }, child_runs: [], trace_id: firstCallParams.id, dotted_order: firstCallParams.dotted_order, tags: [], }); expect(secondCallParams).toMatchObject({ id: secondCallParams.id, name: "RunnableLambda", parent_run_id: firstCallParams.id, start_time: expect.any(Number), serialized: { lc: 1, type: "not_implemented", id: ["langchain_core", "runnables", "RunnableLambda"], }, events: [{ name: "start", time: expect.any(String) }], inputs: { input: "", }, execution_order: 2, child_execution_order: 2, run_type: "chain", extra: expect.any(Object), tags: [], trace_id: firstCallParams.id, dotted_order: expect.stringContaining(`${firstCallParams.dotted_order}.`), }); expect(thirdCallParams).toMatchObject({ end_time: expect.any(Number), outputs: { output: "Hello!" }, events: [ { name: "start", time: expect.any(String) }, { name: "end", time: expect.any(String) }, ], inputs: { input: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, ], }, trace_id: firstCallParams.id, dotted_order: expect.stringContaining(`${firstCallParams.dotted_order}.`), parent_run_id: firstCallParams.id, }); expect(fourthCallParams).toMatchObject({ end_time: expect.any(Number), inputs: { args: [ { lc: 1, type: "constructor", id: ["langchain_core", "messages", "HumanMessage"], kwargs: { content: "Hello!", additional_kwargs: {}, response_metadata: {}, }, }, "mitochondria", ], }, outputs: { outputs: [ "H", "e", "l", "l", "o", "!", "m", "i", "t", "o", "c", "h", "o", "n", "d", "r", "i", "a", ], }, extra: expect.any(Object), dotted_order: firstCallParams.dotted_order, trace_id: firstCallParams.id, tags: [], }); } );
0
lc_public_repos/langchainjs/langchain-core/src/tracers
lc_public_repos/langchainjs/langchain-core/src/tracers/tests/tracer.test.ts
import { test, expect, jest } from "@jest/globals"; import * as uuid from "uuid"; import { Serialized } from "../../load/serializable.js"; import { Document } from "../../documents/document.js"; import { Run } from "../base.js"; import { HumanMessage } from "../../messages/index.js"; import { FakeTracer } from "../../utils/testing/index.js"; const _DATE = 1620000000000; Date.now = jest.fn(() => _DATE); const serialized: Serialized = { lc: 1, type: "constructor", id: ["test"], kwargs: {}, }; test("Test LLMRun", async () => { const tracer = new FakeTracer(); const runId = uuid.v4(); await tracer.handleLLMStart(serialized, ["test"], runId); await tracer.handleLLMEnd({ generations: [] }, runId); expect(tracer.runs.length).toBe(1); const run = tracer.runs[0]; const compareRun: Run = { id: runId, name: "test", start_time: _DATE, end_time: _DATE, execution_order: 1, child_execution_order: 1, serialized, events: [ { name: "start", time: "2021-05-03T00:00:00.000Z", }, { name: "end", time: "2021-05-03T00:00:00.000Z", }, ], inputs: { prompts: ["test"] }, run_type: "llm", outputs: { generations: [] }, child_runs: [], extra: {}, tags: [], dotted_order: `20210503T000000000001Z${runId}`, trace_id: runId, }; expect(run).toEqual(compareRun); }); test("Test Chat Model Run", async () => { const tracer = new FakeTracer(); const runId = uuid.v4(); const messages = [[new HumanMessage("Avast")]]; await tracer.handleChatModelStart(serialized, messages, runId); await tracer.handleLLMEnd({ generations: [] }, runId); expect(tracer.runs.length).toBe(1); const run = tracer.runs[0]; expect(run).toMatchInlineSnapshot( { id: expect.any(String), }, ` { "child_execution_order": 1, "child_runs": [], "dotted_order": "20210503T000000000001Z${runId}", "end_time": 1620000000000, "events": [ { "name": "start", "time": "2021-05-03T00:00:00.000Z", }, { "name": "end", "time": "2021-05-03T00:00:00.000Z", }, ], "execution_order": 1, "extra": {}, "id": Any<String>, "inputs": { "messages": [ [ { "id": [ "langchain_core", "messages", "HumanMessage", ], "kwargs": { "additional_kwargs": {}, "content": "Avast", "response_metadata": {}, }, "lc": 1, "type": "constructor", }, ], ], }, "name": "test", "outputs": { "generations": [], }, "parent_run_id": undefined, "run_type": "llm", "serialized": { "id": [ "test", ], "kwargs": {}, "lc": 1, "type": "constructor", }, "start_time": 1620000000000, "tags": [], "trace_id": "${runId}", } ` ); }); test("Test LLM Run no start", async () => { const tracer = new FakeTracer(); const runId = uuid.v4(); await expect(tracer.handleLLMEnd({ generations: [] }, runId)).rejects.toThrow( "No LLM run to end" ); }); test("Test Chain Run", async () => { const tracer = new FakeTracer(); const runId = uuid.v4(); const compareRun: Run = { id: runId, name: "test", start_time: _DATE, end_time: _DATE, execution_order: 1, child_execution_order: 1, serialized, events: [ { name: "start", time: "2021-05-03T00:00:00.000Z", }, { name: "end", time: "2021-05-03T00:00:00.000Z", }, ], inputs: { foo: "bar" }, outputs: { foo: "bar" }, run_type: "chain", child_runs: [], extra: {}, tags: [], dotted_order: `20210503T000000000001Z${runId}`, trace_id: runId, }; await tracer.handleChainStart(serialized, { foo: "bar" }, runId); await tracer.handleChainEnd({ foo: "bar" }, runId); expect(tracer.runs.length).toBe(1); const run = tracer.runs[0]; expect(run).toEqual(compareRun); }); test("Test Tool Run", async () => { const tracer = new FakeTracer(); const runId = uuid.v4(); const compareRun: Run = { id: runId, name: "test", start_time: _DATE, end_time: _DATE, execution_order: 1, child_execution_order: 1, serialized, events: [ { name: "start", time: "2021-05-03T00:00:00.000Z", }, { name: "end", time: "2021-05-03T00:00:00.000Z", }, ], inputs: { input: "test" }, outputs: { output: "output" }, run_type: "tool", child_runs: [], extra: {}, tags: [], dotted_order: `20210503T000000000001Z${runId}`, trace_id: runId, }; await tracer.handleToolStart(serialized, "test", runId); await tracer.handleToolEnd("output", runId); expect(tracer.runs.length).toBe(1); const run = tracer.runs[0]; expect(run).toEqual(compareRun); }); test("Test Retriever Run", async () => { const tracer = new FakeTracer(); const runId = uuid.v4(); const document = new Document({ pageContent: "test", metadata: { test: "test" }, }); const compareRun: Run = { id: runId, name: "test", start_time: _DATE, end_time: _DATE, execution_order: 1, child_execution_order: 1, serialized, events: [ { name: "start", time: "2021-05-03T00:00:00.000Z", }, { name: "end", time: "2021-05-03T00:00:00.000Z", }, ], inputs: { query: "bar" }, outputs: { documents: [document] }, run_type: "retriever", child_runs: [], extra: {}, tags: [], dotted_order: `20210503T000000000001Z${runId}`, trace_id: runId, }; await tracer.handleRetrieverStart(serialized, "bar", runId); await tracer.handleRetrieverEnd([document], runId); expect(tracer.runs.length).toBe(1); const run = tracer.runs[0]; expect(run).toEqual(compareRun); }); test("Test nested runs", async () => { const tracer = new FakeTracer(); const chainRunId = uuid.v4(); const toolRunId = uuid.v4(); const llmRunId = uuid.v4(); await tracer.handleChainStart(serialized, { foo: "bar" }, chainRunId); await tracer.handleToolStart( { ...serialized, id: ["test_tool"] }, "test", toolRunId, chainRunId ); await tracer.handleLLMStart( { ...serialized, id: ["test_llm_child_run"] }, ["test"], llmRunId, toolRunId ); await tracer.handleLLMEnd({ generations: [[]] }, llmRunId); await tracer.handleToolEnd("output", toolRunId); const llmRunId2 = uuid.v4(); await tracer.handleLLMStart( { ...serialized, id: ["test_llm2"] }, ["test"], llmRunId2, chainRunId ); await tracer.handleLLMEnd({ generations: [[]] }, llmRunId2); await tracer.handleChainEnd({ foo: "bar" }, chainRunId); const compareRun: Run = { child_runs: [ { id: toolRunId, name: "test_tool", parent_run_id: chainRunId, child_runs: [ { id: llmRunId, name: "test_llm_child_run", parent_run_id: toolRunId, end_time: 1620000000000, execution_order: 3, child_execution_order: 3, inputs: { prompts: ["test"] }, outputs: { generations: [[]], }, serialized: { ...serialized, id: ["test_llm_child_run"] }, events: [ { name: "start", time: "2021-05-03T00:00:00.000Z", }, { name: "end", time: "2021-05-03T00:00:00.000Z", }, ], start_time: 1620000000000, run_type: "llm", child_runs: [], extra: {}, tags: [], dotted_order: `20210503T000000000001Z${chainRunId}.20210503T000000000002Z${toolRunId}.20210503T000000000003Z${llmRunId}`, trace_id: chainRunId, }, ], end_time: 1620000000000, execution_order: 2, child_execution_order: 3, outputs: { output: "output" }, serialized: { ...serialized, id: ["test_tool"] }, events: [ { name: "start", time: "2021-05-03T00:00:00.000Z", }, { name: "end", time: "2021-05-03T00:00:00.000Z", }, ], start_time: 1620000000000, inputs: { input: "test" }, run_type: "tool", extra: {}, tags: [], dotted_order: `20210503T000000000001Z${chainRunId}.20210503T000000000002Z${toolRunId}`, trace_id: chainRunId, }, { id: llmRunId2, name: "test_llm2", parent_run_id: chainRunId, end_time: 1620000000000, execution_order: 4, child_execution_order: 4, inputs: { prompts: ["test"] }, outputs: { generations: [[]], }, serialized: { ...serialized, id: ["test_llm2"] }, events: [ { name: "start", time: "2021-05-03T00:00:00.000Z", }, { name: "end", time: "2021-05-03T00:00:00.000Z", }, ], start_time: 1620000000000, run_type: "llm", child_runs: [], extra: {}, tags: [], dotted_order: `20210503T000000000001Z${chainRunId}.20210503T000000000004Z${llmRunId2}`, trace_id: chainRunId, }, ], id: chainRunId, end_time: 1620000000000, execution_order: 1, child_execution_order: 4, inputs: { foo: "bar", }, outputs: { foo: "bar", }, events: [ { name: "start", time: "2021-05-03T00:00:00.000Z", }, { name: "end", time: "2021-05-03T00:00:00.000Z", }, ], name: "test", serialized, start_time: 1620000000000, run_type: "chain", extra: {}, tags: [], parent_run_id: undefined, dotted_order: `20210503T000000000001Z${chainRunId}`, trace_id: chainRunId, }; expect(tracer.runs.length).toBe(1); expect(tracer.runs[0]).toEqual(compareRun); const llmRunId3 = uuid.v4(); await tracer.handleLLMStart(serialized, ["test"], llmRunId3); await tracer.handleLLMEnd({ generations: [[]] }, llmRunId3); expect(tracer.runs.length).toBe(2); });
0
lc_public_repos/langchainjs/langchain-core/src/tracers
lc_public_repos/langchainjs/langchain-core/src/tracers/tests/langchain_tracer.int.test.ts
/* eslint-disable no-process-env */ import * as uuid from "uuid"; import { test, expect } from "@jest/globals"; import { LangChainTracer, Run } from "../tracer_langchain.js"; import { Serialized } from "../../load/serializable.js"; import { HumanMessage } from "../../messages/index.js"; const serialized: Serialized = { lc: 1, type: "constructor", id: ["test"], kwargs: {}, }; test("LangChain V2 tracer does not throw errors for its methods", async () => { const tracer = new LangChainTracer({ projectName: `JS Int Test - ${uuid.v4()}`, }); const chainRunId = uuid.v4(); const toolRunId = uuid.v4(); const llmRunId = uuid.v4(); const chatRunId = uuid.v4(); await tracer.handleChainStart(serialized, { foo: "bar" }, chainRunId); await tracer.handleToolStart(serialized, "test", toolRunId, chainRunId); await tracer.handleLLMStart(serialized, ["test"], llmRunId, toolRunId); await tracer.handleLLMEnd({ generations: [[]] }, llmRunId); await tracer.handleChatModelStart( serialized, [[new HumanMessage("I'm a human.")]], chatRunId ); await tracer.handleLLMEnd({ generations: [[]] }, chatRunId); await tracer.handleToolEnd("output", toolRunId); const llmRunId2 = uuid.v4(); await tracer.handleLLMStart(serialized, ["test"], llmRunId2, chainRunId); await tracer.handleLLMEnd({ generations: [[]] }, llmRunId2); await tracer.handleChainEnd({ foo: "bar" }, chainRunId); const llmRunId3 = uuid.v4(); await tracer.handleLLMStart(serialized, ["test"], llmRunId3); await tracer.handleLLMEnd({ generations: [[]] }, llmRunId3); }); class FakeTracer extends LangChainTracer { createOperations: { [id: string]: Run } = {}; updateOperations: { [id: string]: Run } = {}; async onRunCreate(run: Run): Promise<void> { this.createOperations[run.id] = run; } async onRunUpdate(run: Run): Promise<void> { this.updateOperations[run.id] = run; } } test("LangChain V2 tracer creates and updates runs with trace_id and dotted_order", async () => { const tracer = new FakeTracer({ projectName: `JS Int Test - ${uuid.v4()}`, }); const chainRunId = uuid.v4(); const llmRunId = uuid.v4(); await tracer.handleChainStart(serialized, { foo: "bar" }, chainRunId); await tracer.handleLLMStart(serialized, ["test"], llmRunId, chainRunId); await tracer.handleLLMEnd({ generations: [[]] }, llmRunId); await tracer.handleChainEnd({ foo: "bar" }, chainRunId); expect(tracer.createOperations[chainRunId].trace_id).toBeDefined(); expect(tracer.createOperations[chainRunId].trace_id).toEqual(chainRunId); expect(tracer.createOperations[chainRunId].dotted_order).toBeDefined(); expect(tracer.updateOperations[llmRunId].trace_id).toBeDefined(); expect(tracer.updateOperations[llmRunId].trace_id).toEqual(chainRunId); expect(tracer.updateOperations[llmRunId].dotted_order).toBeDefined(); });
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/structured_query/index.ts
export * from "./base.js"; export * from "./functional.js"; export * from "./ir.js"; export * from "./utils.js";
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/structured_query/base.ts
import { VectorStore } from "../vectorstores.js"; import { Comparator, Comparators, Comparison, Operation, Operator, Operators, StructuredQuery, Visitor, VisitorComparisonResult, VisitorOperationResult, VisitorResult, VisitorStructuredQueryResult, } from "./ir.js"; import { isFilterEmpty, castValue } from "./utils.js"; /** * Options object for the BasicTranslator class. Specifies the allowed * operators and comparators. */ export type TranslatorOpts = { allowedOperators: Operator[]; allowedComparators: Comparator[]; }; /** * Abstract class that provides a blueprint for creating specific * translator classes. Defines two abstract methods: formatFunction and * mergeFilters. */ export abstract class BaseTranslator< T extends VectorStore = VectorStore > extends Visitor<T> { /** * Formats a given function (either an operator or a comparator) into a * string. * @param func The function to format. * @returns Formatted string representation of the function. */ abstract formatFunction(func: Operator | Comparator): string; /** * Merges two filters into one, using a specified merge type. * @param defaultFilter The default filter. * @param generatedFilter The generated filter. * @param mergeType The type of merge to perform. Can be 'and', 'or', or 'replace'. * @param forceDefaultFilter If true, the default filter will be used even if the generated filter is not empty. * @returns The merged filter, or undefined if both filters are empty. */ abstract mergeFilters( defaultFilter: this["VisitStructuredQueryOutput"]["filter"] | undefined, generatedFilter: this["VisitStructuredQueryOutput"]["filter"] | undefined, mergeType?: "and" | "or" | "replace", forceDefaultFilter?: boolean ): this["VisitStructuredQueryOutput"]["filter"] | undefined; } /** * Class that extends the BaseTranslator class and provides concrete * implementations for the abstract methods. Also declares three types: * VisitOperationOutput, VisitComparisonOutput, and * VisitStructuredQueryOutput, which are used as the return types for the * visitOperation, visitComparison, and visitStructuredQuery methods * respectively. */ export class BasicTranslator< T extends VectorStore = VectorStore > extends BaseTranslator<T> { declare VisitOperationOutput: VisitorOperationResult; declare VisitComparisonOutput: VisitorComparisonResult; declare VisitStructuredQueryOutput: VisitorStructuredQueryResult; allowedOperators: Operator[]; allowedComparators: Comparator[]; constructor(opts?: TranslatorOpts) { super(); this.allowedOperators = opts?.allowedOperators ?? [ Operators.and, Operators.or, ]; this.allowedComparators = opts?.allowedComparators ?? [ Comparators.eq, Comparators.ne, Comparators.gt, Comparators.gte, Comparators.lt, Comparators.lte, ]; } formatFunction(func: Operator | Comparator): string { if (func in Comparators) { if ( this.allowedComparators.length > 0 && this.allowedComparators.indexOf(func as Comparator) === -1 ) { throw new Error( `Comparator ${func} not allowed. Allowed operators: ${this.allowedComparators.join( ", " )}` ); } } else if (func in Operators) { if ( this.allowedOperators.length > 0 && this.allowedOperators.indexOf(func as Operator) === -1 ) { throw new Error( `Operator ${func} not allowed. Allowed operators: ${this.allowedOperators.join( ", " )}` ); } } else { throw new Error("Unknown comparator or operator"); } return `$${func}`; } /** * Visits an operation and returns a result. * @param operation The operation to visit. * @returns The result of visiting the operation. */ visitOperation(operation: Operation): this["VisitOperationOutput"] { const args = operation.args?.map((arg) => arg.accept(this) ) as VisitorResult[]; return { [this.formatFunction(operation.operator)]: args, }; } /** * Visits a comparison and returns a result. * @param comparison The comparison to visit. * @returns The result of visiting the comparison. */ visitComparison(comparison: Comparison): this["VisitComparisonOutput"] { return { [comparison.attribute]: { [this.formatFunction(comparison.comparator)]: castValue( comparison.value ), }, }; } /** * Visits a structured query and returns a result. * @param query The structured query to visit. * @returns The result of visiting the structured query. */ visitStructuredQuery( query: StructuredQuery ): this["VisitStructuredQueryOutput"] { let nextArg = {}; if (query.filter) { nextArg = { filter: query.filter.accept(this), }; } return nextArg; } mergeFilters( defaultFilter: VisitorStructuredQueryResult["filter"] | undefined, generatedFilter: VisitorStructuredQueryResult["filter"] | undefined, mergeType = "and", forceDefaultFilter = false ): VisitorStructuredQueryResult["filter"] | undefined { if (isFilterEmpty(defaultFilter) && isFilterEmpty(generatedFilter)) { return undefined; } if (isFilterEmpty(defaultFilter) || mergeType === "replace") { if (isFilterEmpty(generatedFilter)) { return undefined; } return generatedFilter; } if (isFilterEmpty(generatedFilter)) { if (forceDefaultFilter) { return defaultFilter; } if (mergeType === "and") { return undefined; } return defaultFilter; } if (mergeType === "and") { return { $and: [defaultFilter, generatedFilter], }; } else if (mergeType === "or") { return { $or: [defaultFilter, generatedFilter], }; } else { throw new Error("Unknown merge type"); } } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/structured_query/ir.ts
import { VectorStore } from "../vectorstores.js"; /** * Represents logical AND operator. */ export type AND = "and"; /** * Represents logical OR operator. */ export type OR = "or"; /** * Represents logical NOT operator. */ export type NOT = "not"; /** * Represents a logical operator which can be AND, OR, or NOT. */ export type Operator = AND | OR | NOT; /** * Represents equality comparison operator. */ export type EQ = "eq"; /** * Represents inequality comparison operator. */ export type NE = "ne"; /** * Represents less than comparison operator. */ export type LT = "lt"; /** * Represents greater than comparison operator. */ export type GT = "gt"; /** * Represents less than or equal to comparison operator. */ export type LTE = "lte"; /** * Represents greater than or equal to comparison operator. */ export type GTE = "gte"; /** * Represents a comparison operator which can be EQ, NE, LT, GT, LTE, or * GTE. */ export type Comparator = EQ | NE | LT | GT | LTE | GTE; export const Operators: { [key: string]: Operator } = { and: "and", or: "or", not: "not", }; export const Comparators: { [key: string]: Comparator } = { eq: "eq", ne: "ne", lt: "lt", gt: "gt", lte: "lte", gte: "gte", }; /** * Represents the result of visiting an operation or comparison * expression. */ export type VisitorResult = VisitorOperationResult | VisitorComparisonResult; /** * Represents the result of visiting an operation expression. */ export type VisitorOperationResult = { [operator: string]: VisitorResult[]; }; /** * Represents the result of visiting a comparison expression. */ export type VisitorComparisonResult = { [attr: string]: { [comparator: string]: string | number; }; }; /** * Represents the result of visiting a structured query expression. */ export type VisitorStructuredQueryResult = { filter?: VisitorComparisonResult | VisitorOperationResult; }; /** * Abstract class for visiting expressions. Subclasses must implement * visitOperation, visitComparison, and visitStructuredQuery methods. */ export abstract class Visitor<T extends VectorStore = VectorStore> { declare VisitOperationOutput: object; declare VisitComparisonOutput: object; declare VisitStructuredQueryOutput: { filter?: T["FilterType"] }; abstract allowedOperators: Operator[]; abstract allowedComparators: Comparator[]; abstract visitOperation(operation: Operation): this["VisitOperationOutput"]; abstract visitComparison( comparison: Comparison ): this["VisitComparisonOutput"]; abstract visitStructuredQuery( structuredQuery: StructuredQuery ): this["VisitStructuredQueryOutput"]; } /** * Abstract class representing an expression. Subclasses must implement * the exprName property and the accept method. */ export abstract class Expression { abstract exprName: "Operation" | "Comparison" | "StructuredQuery"; accept(visitor: Visitor) { if (this.exprName === "Operation") { return visitor.visitOperation(this as unknown as Operation); } else if (this.exprName === "Comparison") { return visitor.visitComparison(this as unknown as Comparison); } else if (this.exprName === "StructuredQuery") { return visitor.visitStructuredQuery(this as unknown as StructuredQuery); } else { throw new Error("Unknown Expression type"); } } } /** * Abstract class representing a filter directive. It extends the * Expression class. */ export abstract class FilterDirective extends Expression {} /** * Class representing a comparison filter directive. It extends the * FilterDirective class. */ export class Comparison extends FilterDirective { exprName = "Comparison" as const; constructor( public comparator: Comparator, public attribute: string, public value: string | number ) { super(); } } /** * Class representing an operation filter directive. It extends the * FilterDirective class. */ export class Operation extends FilterDirective { exprName = "Operation" as const; constructor(public operator: Operator, public args?: FilterDirective[]) { super(); } } /** * Class representing a structured query expression. It extends the * Expression class. */ export class StructuredQuery extends Expression { exprName = "StructuredQuery" as const; constructor(public query: string, public filter?: FilterDirective) { super(); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/structured_query/utils.ts
/** * Checks if the provided argument is an object and not an array. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any export function isObject(obj: any): obj is object { return obj && typeof obj === "object" && !Array.isArray(obj); } /** * Checks if a provided filter is empty. The filter can be a function, an * object, a string, or undefined. */ export function isFilterEmpty( // eslint-disable-next-line @typescript-eslint/no-explicit-any filter: ((q: any) => any) | object | string | undefined ): filter is undefined { if (!filter) return true; // for Milvus if (typeof filter === "string" && filter.length > 0) { return false; } if (typeof filter === "function") { return false; } return isObject(filter) && Object.keys(filter).length === 0; } /** * Checks if the provided value is an integer. */ export function isInt(value: unknown): boolean { if (typeof value === "number") { return value % 1 === 0; } else if (typeof value === "string") { const numberValue = parseInt(value, 10); return ( !Number.isNaN(numberValue) && numberValue % 1 === 0 && numberValue.toString() === value ); } return false; } /** * Checks if the provided value is a floating-point number. */ export function isFloat(value: unknown): boolean { if (typeof value === "number") { return value % 1 !== 0; } else if (typeof value === "string") { const numberValue = parseFloat(value); return ( !Number.isNaN(numberValue) && numberValue % 1 !== 0 && numberValue.toString() === value ); } return false; } /** * Checks if the provided value is a string that cannot be parsed into a * number. */ export function isString(value: unknown): boolean { return ( typeof value === "string" && (Number.isNaN(parseFloat(value)) || parseFloat(value).toString() !== value) ); } /** * Casts a value that might be string or number to actual string or number. * Since LLM might return back an integer/float as a string, we need to cast * it back to a number, as many vector databases can't handle number as string * values as a comparator. */ export function castValue(input: unknown): string | number { let value; if (isString(input)) { value = input as string; } else if (isInt(input)) { value = parseInt(input as string, 10); } else if (isFloat(input)) { value = parseFloat(input as string); } else { throw new Error("Unsupported value type"); } return value; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/structured_query/functional.ts
import { Document } from "../documents/document.js"; import { Comparator, Comparators, Comparison, Operation, Operator, Operators, StructuredQuery, } from "./ir.js"; import { BaseTranslator } from "./base.js"; import { castValue, isFilterEmpty } from "./utils.js"; /** * A type alias for an object that maps comparison operators to string or * number values. This is used in the comparison functions to determine * the result of a comparison operation. */ type ValueType = { eq: string | number; ne: string | number; lt: string | number; lte: string | number; gt: string | number; gte: string | number; }; /** * A type alias for a function that takes a `Document` as an argument and * returns a boolean. This function is used as a filter for documents. */ export type FunctionFilter = (document: Document) => boolean; /** * A class that extends `BaseTranslator` to translate structured queries * into functional filters. * @example * ```typescript * const functionalTranslator = new FunctionalTranslator(); * const relevantDocuments = await functionalTranslator.getRelevantDocuments( * "Which movies are rated higher than 8.5?", * ); * ``` */ export class FunctionalTranslator extends BaseTranslator { declare VisitOperationOutput: FunctionFilter; declare VisitComparisonOutput: FunctionFilter; declare VisitStructuredQueryOutput: | { filter: FunctionFilter } | { [k: string]: never }; allowedOperators: Operator[] = [Operators.and, Operators.or]; allowedComparators: Comparator[] = [ Comparators.eq, Comparators.ne, Comparators.gt, Comparators.gte, Comparators.lt, Comparators.lte, ]; formatFunction(): string { throw new Error("Not implemented"); } /** * Returns a function that performs a comparison based on the provided * comparator. * @param comparator The comparator to base the comparison function on. * @returns A function that takes two arguments and returns a boolean based on the comparison. */ getComparatorFunction<C extends Comparator>( comparator: Comparator ): (a: string | number, b: ValueType[C]) => boolean { switch (comparator) { case Comparators.eq: { return (a: string | number, b: ValueType[C]) => a === b; } case Comparators.ne: { return (a: string | number, b: ValueType[C]) => a !== b; } case Comparators.gt: { return (a: string | number, b: ValueType[C]) => a > b; } case Comparators.gte: { return (a: string | number, b: ValueType[C]) => a >= b; } case Comparators.lt: { return (a: string | number, b: ValueType[C]) => a < b; } case Comparators.lte: { return (a: string | number, b: ValueType[C]) => a <= b; } default: { throw new Error("Unknown comparator"); } } } /** * Returns a function that performs an operation based on the provided * operator. * @param operator The operator to base the operation function on. * @returns A function that takes two boolean arguments and returns a boolean based on the operation. */ getOperatorFunction(operator: Operator): (a: boolean, b: boolean) => boolean { switch (operator) { case Operators.and: { return (a, b) => a && b; } case Operators.or: { return (a, b) => a || b; } default: { throw new Error("Unknown operator"); } } } /** * Visits the operation part of a structured query and translates it into * a functional filter. * @param operation The operation part of a structured query. * @returns A function that takes a `Document` as an argument and returns a boolean based on the operation. */ visitOperation(operation: Operation): this["VisitOperationOutput"] { const { operator, args } = operation; if (this.allowedOperators.includes(operator)) { const operatorFunction = this.getOperatorFunction(operator); return (document: Document) => { if (!args) { return true; } return args.reduce((acc, arg) => { const result = arg.accept(this); if (typeof result === "function") { return operatorFunction(acc, result(document)); } else { throw new Error("Filter is not a function"); } }, true); }; } else { throw new Error("Operator not allowed"); } } /** * Visits the comparison part of a structured query and translates it into * a functional filter. * @param comparison The comparison part of a structured query. * @returns A function that takes a `Document` as an argument and returns a boolean based on the comparison. */ visitComparison(comparison: Comparison): this["VisitComparisonOutput"] { const { comparator, attribute, value } = comparison; const undefinedTrue = [Comparators.ne]; if (this.allowedComparators.includes(comparator)) { const comparatorFunction = this.getComparatorFunction(comparator); return (document: Document) => { const documentValue = document.metadata[attribute]; if (documentValue === undefined) { if (undefinedTrue.includes(comparator)) { return true; } return false; } return comparatorFunction(documentValue, castValue(value)); }; } else { throw new Error("Comparator not allowed"); } } /** * Visits a structured query and translates it into a functional filter. * @param query The structured query to translate. * @returns An object containing a `filter` property, which is a function that takes a `Document` as an argument and returns a boolean based on the structured query. */ visitStructuredQuery( query: StructuredQuery ): this["VisitStructuredQueryOutput"] { if (!query.filter) { return {}; } const filterFunction = query.filter?.accept(this); if (typeof filterFunction !== "function") { throw new Error("Structured query filter is not a function"); } return { filter: filterFunction as FunctionFilter }; } /** * Merges two filters into one, based on the specified merge type. * @param defaultFilter The default filter function. * @param generatedFilter The generated filter function. * @param mergeType The type of merge to perform. Can be 'and', 'or', or 'replace'. Default is 'and'. * @returns A function that takes a `Document` as an argument and returns a boolean based on the merged filters, or `undefined` if both filters are empty. */ mergeFilters( defaultFilter: FunctionFilter, generatedFilter: FunctionFilter, mergeType = "and" ): FunctionFilter | undefined { if (isFilterEmpty(defaultFilter) && isFilterEmpty(generatedFilter)) { return undefined; } if (isFilterEmpty(defaultFilter) || mergeType === "replace") { if (isFilterEmpty(generatedFilter)) { return undefined; } return generatedFilter; } if (isFilterEmpty(generatedFilter)) { if (mergeType === "and") { return undefined; } return defaultFilter; } if (mergeType === "and") { return (document: Document) => defaultFilter(document) && generatedFilter(document); } else if (mergeType === "or") { return (document: Document) => defaultFilter(document) || generatedFilter(document); } else { throw new Error("Unknown merge type"); } } }
0
lc_public_repos/langchainjs/langchain-core/src/structured_query
lc_public_repos/langchainjs/langchain-core/src/structured_query/tests/utils.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { castValue, isFloat, isInt, isString } from "../utils.js"; test("Casting values correctly", () => { const stringString = [ "string", "test", "this is a string", " ", "\n\n\n\n\n\n", `asdf zxcv`, ]; const intString = [ "1a", "2b", "3c", "a4", `123 asdf`, ]; const floatString = ["1.1a", "2.2b", "3.3c", "c4.4"]; const intInt = ["1", 2, 3]; const floatFloat = ["1.1", 2.2, 3.3]; stringString.map(castValue).forEach((value) => { expect(typeof value).toBe("string"); expect(isString(value)).toBe(true); }); intString.map(castValue).forEach((value) => { expect(typeof value).toBe("string"); expect(isString(value)).toBe(true); expect(isInt(value)).toBe(false); }); floatString.map(castValue).forEach((value) => { expect(typeof value).toBe("string"); expect(isString(value)).toBe(true); expect(isFloat(value)).toBe(false); }); intInt.map(castValue).forEach((value) => { expect(typeof value).toBe("number"); expect(isInt(value)).toBe(true); }); floatFloat.map(castValue).forEach((value) => { expect(typeof value).toBe("number"); expect(isFloat(value)).toBe(true); }); });
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/errors/index.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ /* eslint-disable no-param-reassign */ export type LangChainErrorCodes = | "INVALID_PROMPT_INPUT" | "INVALID_TOOL_RESULTS" | "MESSAGE_COERCION_FAILURE" | "MODEL_AUTHENTICATION" | "MODEL_NOT_FOUND" | "MODEL_RATE_LIMIT" | "OUTPUT_PARSING_FAILURE"; export function addLangChainErrorFields( error: any, lc_error_code: LangChainErrorCodes ) { (error as any).lc_error_code = lc_error_code; error.message = `${error.message}\n\nTroubleshooting URL: https://js.langchain.com/docs/troubleshooting/errors/${lc_error_code}/\n`; return error; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/example_selectors/conditional.ts
import type { BaseChatModel } from "../language_models/chat_models.js"; import type { BasePromptTemplate } from "../prompts/base.js"; import type { BaseLanguageModelInterface } from "../language_models/base.js"; import type { BaseLLM } from "../language_models/llms.js"; import type { PartialValues } from "../utils/types/index.js"; export type BaseGetPromptAsyncOptions = { partialVariables?: PartialValues; }; /** * Abstract class that defines the interface for selecting a prompt for a * given language model. */ export abstract class BasePromptSelector { /** * Abstract method that must be implemented by any class that extends * `BasePromptSelector`. It takes a language model as an argument and * returns a prompt template. * @param llm The language model for which to get a prompt. * @returns A prompt template. */ abstract getPrompt(llm: BaseLanguageModelInterface): BasePromptTemplate; /** * Asynchronous version of `getPrompt` that also accepts an options object * for partial variables. * @param llm The language model for which to get a prompt. * @param options Optional object for partial variables. * @returns A Promise that resolves to a prompt template. */ async getPromptAsync( llm: BaseLanguageModelInterface, options?: BaseGetPromptAsyncOptions ): Promise<BasePromptTemplate> { const prompt = this.getPrompt(llm); return prompt.partial(options?.partialVariables ?? {}); } } /** * Concrete implementation of `BasePromptSelector` that selects a prompt * based on a set of conditions. It has a default prompt that it returns * if none of the conditions are met. */ export class ConditionalPromptSelector extends BasePromptSelector { defaultPrompt: BasePromptTemplate; conditionals: Array< [ condition: (llm: BaseLanguageModelInterface) => boolean, prompt: BasePromptTemplate ] >; constructor( default_prompt: BasePromptTemplate, conditionals: Array< [ condition: (llm: BaseLanguageModelInterface) => boolean, prompt: BasePromptTemplate ] > = [] ) { super(); this.defaultPrompt = default_prompt; this.conditionals = conditionals; } /** * Method that selects a prompt based on a set of conditions. If none of * the conditions are met, it returns the default prompt. * @param llm The language model for which to get a prompt. * @returns A prompt template. */ getPrompt(llm: BaseLanguageModelInterface): BasePromptTemplate { for (const [condition, prompt] of this.conditionals) { if (condition(llm)) { return prompt; } } return this.defaultPrompt; } } /** * Type guard function that checks if a given language model is of type * `BaseLLM`. */ export function isLLM(llm: BaseLanguageModelInterface): llm is BaseLLM { return llm._modelType() === "base_llm"; } /** * Type guard function that checks if a given language model is of type * `BaseChatModel`. */ export function isChatModel( llm: BaseLanguageModelInterface ): llm is BaseChatModel { return llm._modelType() === "base_chat_model"; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/example_selectors/length_based.ts
import { Example } from "../prompts/base.js"; import { BaseExampleSelector } from "./base.js"; import { PromptTemplate } from "../prompts/prompt.js"; /** * Calculates the length of a text based on the number of words and lines. */ function getLengthBased(text: string): number { return text.split(/\n| /).length; } /** * Interface for the input parameters of the LengthBasedExampleSelector * class. */ export interface LengthBasedExampleSelectorInput { examplePrompt: PromptTemplate; maxLength?: number; getTextLength?: (text: string) => number; } /** * A specialized example selector that selects examples based on their * length, ensuring that the total length of the selected examples does * not exceed a specified maximum length. * @example * ```typescript * const exampleSelector = new LengthBasedExampleSelector( * [ * { input: "happy", output: "sad" }, * { input: "tall", output: "short" }, * { input: "energetic", output: "lethargic" }, * { input: "sunny", output: "gloomy" }, * { input: "windy", output: "calm" }, * ], * { * examplePrompt: new PromptTemplate({ * inputVariables: ["input", "output"], * template: "Input: {input}\nOutput: {output}", * }), * maxLength: 25, * }, * ); * const dynamicPrompt = new FewShotPromptTemplate({ * exampleSelector, * examplePrompt: new PromptTemplate({ * inputVariables: ["input", "output"], * template: "Input: {input}\nOutput: {output}", * }), * prefix: "Give the antonym of every input", * suffix: "Input: {adjective}\nOutput:", * inputVariables: ["adjective"], * }); * console.log(dynamicPrompt.format({ adjective: "big" })); * console.log( * dynamicPrompt.format({ * adjective: * "big and huge and massive and large and gigantic and tall and much much much much much bigger than everything else", * }), * ); * ``` */ export class LengthBasedExampleSelector extends BaseExampleSelector { protected examples: Example[] = []; examplePrompt!: PromptTemplate; getTextLength: (text: string) => number = getLengthBased; maxLength = 2048; exampleTextLengths: number[] = []; constructor(data: LengthBasedExampleSelectorInput) { super(data); this.examplePrompt = data.examplePrompt; this.maxLength = data.maxLength ?? 2048; this.getTextLength = data.getTextLength ?? getLengthBased; } /** * Adds an example to the list of examples and calculates its length. * @param example The example to be added. * @returns Promise that resolves when the example has been added and its length calculated. */ async addExample(example: Example): Promise<void> { this.examples.push(example); const stringExample = await this.examplePrompt.format(example); this.exampleTextLengths.push(this.getTextLength(stringExample)); } /** * Calculates the lengths of the examples. * @param v Array of lengths of the examples. * @param values Instance of LengthBasedExampleSelector. * @returns Promise that resolves with an array of lengths of the examples. */ async calculateExampleTextLengths( v: number[], values: LengthBasedExampleSelector ): Promise<number[]> { if (v.length > 0) { return v; } const { examples, examplePrompt } = values; const stringExamples = await Promise.all( examples.map((eg: Example) => examplePrompt.format(eg)) ); return stringExamples.map((eg: string) => this.getTextLength(eg)); } /** * Selects examples until the total length of the selected examples * reaches the maxLength. * @param inputVariables The input variables for the examples. * @returns Promise that resolves with an array of selected examples. */ async selectExamples(inputVariables: Example): Promise<Example[]> { const inputs = Object.values(inputVariables).join(" "); let remainingLength = this.maxLength - this.getTextLength(inputs); let i = 0; const examples: Example[] = []; while (remainingLength > 0 && i < this.examples.length) { const newLength = remainingLength - this.exampleTextLengths[i]; if (newLength < 0) { break; } else { examples.push(this.examples[i]); remainingLength = newLength; } i += 1; } return examples; } /** * Creates a new instance of LengthBasedExampleSelector and adds a list of * examples to it. * @param examples Array of examples to be added. * @param args Input parameters for the LengthBasedExampleSelector. * @returns Promise that resolves with a new instance of LengthBasedExampleSelector with the examples added. */ static async fromExamples( examples: Example[], args: LengthBasedExampleSelectorInput ) { const selector = new LengthBasedExampleSelector(args); await Promise.all(examples.map((eg) => selector.addExample(eg))); return selector; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/example_selectors/index.ts
export * from "./base.js"; export * from "./conditional.js"; export * from "./length_based.js"; export * from "./semantic_similarity.js";
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/example_selectors/base.ts
import { Serializable } from "../load/serializable.js"; import type { Example } from "../prompts/base.js"; /** * Base class for example selectors. */ export abstract class BaseExampleSelector extends Serializable { lc_namespace = ["langchain_core", "example_selectors", "base"]; /** * Adds an example to the example selector. * @param example The example to add to the example selector. * @returns A Promise that resolves to void or a string. */ abstract addExample(example: Example): Promise<void | string>; /** * Selects examples from the example selector given the input variables. * @param input_variables The input variables to select examples with. * @returns A Promise that resolves to an array of selected examples. */ abstract selectExamples(input_variables: Example): Promise<Example[]>; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/example_selectors/semantic_similarity.ts
import type { Embeddings } from "../embeddings.js"; import type { VectorStoreInterface, VectorStoreRetrieverInterface, VectorStore, } from "../vectorstores.js"; import type { Example } from "../prompts/base.js"; import { Document } from "../documents/document.js"; import { BaseExampleSelector } from "./base.js"; function sortedValues<T>(values: Record<string, T>): T[] { return Object.keys(values) .sort() .map((key) => values[key]); } /** * Interface for the input data of the SemanticSimilarityExampleSelector * class. */ export type SemanticSimilarityExampleSelectorInput< V extends VectorStoreInterface = VectorStoreInterface > = | { vectorStore: V; k?: number; filter?: V["FilterType"]; exampleKeys?: string[]; inputKeys?: string[]; vectorStoreRetriever?: never; } | { vectorStoreRetriever: VectorStoreRetrieverInterface<V>; exampleKeys?: string[]; inputKeys?: string[]; vectorStore?: never; k?: never; filter?: never; }; /** * Class that selects examples based on semantic similarity. It extends * the BaseExampleSelector class. * @example * ```typescript * const exampleSelector = await SemanticSimilarityExampleSelector.fromExamples( * [ * { input: "happy", output: "sad" }, * { input: "tall", output: "short" }, * { input: "energetic", output: "lethargic" }, * { input: "sunny", output: "gloomy" }, * { input: "windy", output: "calm" }, * ], * new OpenAIEmbeddings(), * HNSWLib, * { k: 1 }, * ); * const dynamicPrompt = new FewShotPromptTemplate({ * exampleSelector, * examplePrompt: PromptTemplate.fromTemplate( * "Input: {input}\nOutput: {output}", * ), * prefix: "Give the antonym of every input", * suffix: "Input: {adjective}\nOutput:", * inputVariables: ["adjective"], * }); * console.log(await dynamicPrompt.format({ adjective: "rainy" })); * ``` */ export class SemanticSimilarityExampleSelector< V extends VectorStoreInterface = VectorStoreInterface > extends BaseExampleSelector { vectorStoreRetriever: VectorStoreRetrieverInterface<V>; exampleKeys?: string[]; inputKeys?: string[]; constructor(data: SemanticSimilarityExampleSelectorInput<V>) { super(data); this.exampleKeys = data.exampleKeys; this.inputKeys = data.inputKeys; if (data.vectorStore !== undefined) { this.vectorStoreRetriever = data.vectorStore.asRetriever({ k: data.k ?? 4, filter: data.filter, }); } else if (data.vectorStoreRetriever) { this.vectorStoreRetriever = data.vectorStoreRetriever; } else { throw new Error( `You must specify one of "vectorStore" and "vectorStoreRetriever".` ); } } /** * Method that adds a new example to the vectorStore. The example is * converted to a string and added to the vectorStore as a document. * @param example The example to be added to the vectorStore. * @returns Promise that resolves when the example has been added to the vectorStore. */ async addExample(example: Example): Promise<void> { const inputKeys = this.inputKeys ?? Object.keys(example); const stringExample = sortedValues( inputKeys.reduce( (acc, key) => ({ ...acc, [key]: example[key] }), {} as Example ) ).join(" "); await this.vectorStoreRetriever.addDocuments([ new Document({ pageContent: stringExample, metadata: example, }), ]); } /** * Method that selects which examples to use based on semantic similarity. * It performs a similarity search in the vectorStore using the input * variables and returns the examples with the highest similarity. * @param inputVariables The input variables used for the similarity search. * @returns Promise that resolves with an array of the selected examples. */ async selectExamples<T>( inputVariables: Record<string, T> ): Promise<Example[]> { const inputKeys = this.inputKeys ?? Object.keys(inputVariables); const query = sortedValues( inputKeys.reduce( (acc, key) => ({ ...acc, [key]: inputVariables[key] }), {} as Record<string, T> ) ).join(" "); const exampleDocs = await this.vectorStoreRetriever.invoke(query); const examples = exampleDocs.map((doc) => doc.metadata); if (this.exampleKeys) { // If example keys are provided, filter examples to those keys. return examples.map((example) => (this.exampleKeys as string[]).reduce( (acc, key) => ({ ...acc, [key]: example[key] }), {} ) ); } return examples; } /** * Static method that creates a new instance of * SemanticSimilarityExampleSelector. It takes a list of examples, an * instance of Embeddings, a VectorStore class, and an options object as * parameters. It converts the examples to strings, creates a VectorStore * from the strings and the embeddings, and returns a new * SemanticSimilarityExampleSelector with the created VectorStore and the * options provided. * @param examples The list of examples to be used. * @param embeddings The instance of Embeddings to be used. * @param vectorStoreCls The VectorStore class to be used. * @param options The options object for the SemanticSimilarityExampleSelector. * @returns Promise that resolves with a new instance of SemanticSimilarityExampleSelector. */ static async fromExamples<C extends typeof VectorStore>( examples: Record<string, string>[], embeddings: Embeddings, vectorStoreCls: C, options: { k?: number; inputKeys?: string[]; } & Parameters<C["fromTexts"]>[3] = {} ): Promise<SemanticSimilarityExampleSelector> { const inputKeys = options.inputKeys ?? null; const stringExamples = examples.map((example) => sortedValues( inputKeys ? inputKeys.reduce( (acc, key) => ({ ...acc, [key]: example[key] }), {} as Record<string, string> ) : example ).join(" ") ); const vectorStore = await vectorStoreCls.fromTexts( stringExamples, examples, // metadatas embeddings, options ); return new SemanticSimilarityExampleSelector({ vectorStore, k: options.k ?? 4, exampleKeys: options.exampleKeys, inputKeys: options.inputKeys, }); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/runnables/wrappers.ts
import { IterableReadableStream } from "../utils/stream.js"; export function convertToHttpEventStream(stream: AsyncGenerator) { const encoder = new TextEncoder(); const finalStream = new ReadableStream<Uint8Array>({ async start(controller) { for await (const chunk of stream) { controller.enqueue( encoder.encode(`event: data\ndata: ${JSON.stringify(chunk)}\n\n`) ); } controller.enqueue(encoder.encode("event: end\n\n")); controller.close(); }, }); return IterableReadableStream.fromReadableStream(finalStream); }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/runnables/branch.ts
import { Runnable, RunnableLike, _coerceToDict, _coerceToRunnable, } from "./base.js"; import { RunnableConfig, getCallbackManagerForConfig, patchConfig, } from "./config.js"; import { CallbackManagerForChainRun } from "../callbacks/manager.js"; import { concat } from "../utils/stream.js"; /** * Type for a branch in the RunnableBranch. It consists of a condition * runnable and a branch runnable. The condition runnable is used to * determine whether the branch should be executed, and the branch runnable * is executed if the condition is true. */ export type Branch<RunInput, RunOutput> = [ Runnable<RunInput, boolean>, Runnable<RunInput, RunOutput> ]; export type BranchLike<RunInput, RunOutput> = [ RunnableLike<RunInput, boolean>, RunnableLike<RunInput, RunOutput> ]; /** * Class that represents a runnable branch. The RunnableBranch is * initialized with an array of branches and a default branch. When invoked, * it evaluates the condition of each branch in order and executes the * corresponding branch if the condition is true. If none of the conditions * are true, it executes the default branch. * @example * ```typescript * const branch = RunnableBranch.from([ * [ * (x: { topic: string; question: string }) => * x.topic.toLowerCase().includes("anthropic"), * anthropicChain, * ], * [ * (x: { topic: string; question: string }) => * x.topic.toLowerCase().includes("langchain"), * langChainChain, * ], * generalChain, * ]); * * const fullChain = RunnableSequence.from([ * { * topic: classificationChain, * question: (input: { question: string }) => input.question, * }, * branch, * ]); * * const result = await fullChain.invoke({ * question: "how do I use LangChain?", * }); * ``` */ // eslint-disable-next-line @typescript-eslint/no-explicit-any export class RunnableBranch<RunInput = any, RunOutput = any> extends Runnable< RunInput, RunOutput > { static lc_name() { return "RunnableBranch"; } lc_namespace = ["langchain_core", "runnables"]; lc_serializable = true; default: Runnable<RunInput, RunOutput>; branches: Branch<RunInput, RunOutput>[]; constructor(fields: { branches: Branch<RunInput, RunOutput>[]; default: Runnable<RunInput, RunOutput>; }) { super(fields); this.branches = fields.branches; this.default = fields.default; } /** * Convenience method for instantiating a RunnableBranch from * RunnableLikes (objects, functions, or Runnables). * * Each item in the input except for the last one should be a * tuple with two items. The first is a "condition" RunnableLike that * returns "true" if the second RunnableLike in the tuple should run. * * The final item in the input should be a RunnableLike that acts as a * default branch if no other branches match. * * @example * ```ts * import { RunnableBranch } from "@langchain/core/runnables"; * * const branch = RunnableBranch.from([ * [(x: number) => x > 0, (x: number) => x + 1], * [(x: number) => x < 0, (x: number) => x - 1], * (x: number) => x * ]); * ``` * @param branches An array where the every item except the last is a tuple of [condition, runnable] * pairs. The last item is a default runnable which is invoked if no other condition matches. * @returns A new RunnableBranch. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any static from<RunInput = any, RunOutput = any>( branches: [ ...BranchLike<RunInput, RunOutput>[], RunnableLike<RunInput, RunOutput> ] ) { if (branches.length < 1) { throw new Error("RunnableBranch requires at least one branch"); } const branchLikes = branches.slice(0, -1) as BranchLike< RunInput, RunOutput >[]; const coercedBranches: Branch<RunInput, RunOutput>[] = branchLikes.map( ([condition, runnable]) => [ _coerceToRunnable(condition), _coerceToRunnable(runnable), ] ); const defaultBranch = _coerceToRunnable( branches[branches.length - 1] as RunnableLike<RunInput, RunOutput> ); return new this({ branches: coercedBranches, default: defaultBranch, }); } async _invoke( input: RunInput, config?: Partial<RunnableConfig>, runManager?: CallbackManagerForChainRun ): Promise<RunOutput> { let result; for (let i = 0; i < this.branches.length; i += 1) { const [condition, branchRunnable] = this.branches[i]; const conditionValue = await condition.invoke( input, patchConfig(config, { callbacks: runManager?.getChild(`condition:${i + 1}`), }) ); if (conditionValue) { result = await branchRunnable.invoke( input, patchConfig(config, { callbacks: runManager?.getChild(`branch:${i + 1}`), }) ); break; } } if (!result) { result = await this.default.invoke( input, patchConfig(config, { callbacks: runManager?.getChild("branch:default"), }) ); } return result; } async invoke( input: RunInput, config: RunnableConfig = {} ): Promise<RunOutput> { return this._callWithConfig(this._invoke, input, config); } async *_streamIterator(input: RunInput, config?: Partial<RunnableConfig>) { const callbackManager_ = await getCallbackManagerForConfig(config); const runManager = await callbackManager_?.handleChainStart( this.toJSON(), _coerceToDict(input, "input"), config?.runId, undefined, undefined, undefined, config?.runName ); let finalOutput; let finalOutputSupported = true; let stream; try { for (let i = 0; i < this.branches.length; i += 1) { const [condition, branchRunnable] = this.branches[i]; const conditionValue = await condition.invoke( input, patchConfig(config, { callbacks: runManager?.getChild(`condition:${i + 1}`), }) ); if (conditionValue) { stream = await branchRunnable.stream( input, patchConfig(config, { callbacks: runManager?.getChild(`branch:${i + 1}`), }) ); for await (const chunk of stream) { yield chunk; if (finalOutputSupported) { if (finalOutput === undefined) { finalOutput = chunk; } else { try { finalOutput = concat(finalOutput, chunk); } catch (e) { finalOutput = undefined; finalOutputSupported = false; } } } } break; } } if (stream === undefined) { stream = await this.default.stream( input, patchConfig(config, { callbacks: runManager?.getChild("branch:default"), }) ); for await (const chunk of stream) { yield chunk; if (finalOutputSupported) { if (finalOutput === undefined) { finalOutput = chunk; } else { try { finalOutput = concat(finalOutput, chunk as RunOutput); } catch (e) { finalOutput = undefined; finalOutputSupported = false; } } } } } } catch (e) { await runManager?.handleChainError(e); throw e; } await runManager?.handleChainEnd(finalOutput ?? {}); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/runnables/history.ts
import { BaseChatMessageHistory, BaseListChatMessageHistory, } from "../chat_history.js"; import { AIMessage, BaseMessage, HumanMessage, isBaseMessage, } from "../messages/index.js"; import { Run } from "../tracers/base.js"; import { Runnable, RunnableBinding, type RunnableBindingArgs, RunnableLambda, } from "./base.js"; import { RunnableConfig } from "./config.js"; import { RunnablePassthrough } from "./passthrough.js"; type GetSessionHistoryCallable = ( // eslint-disable-next-line @typescript-eslint/no-explicit-any ...args: Array<any> ) => | Promise<BaseChatMessageHistory | BaseListChatMessageHistory> | BaseChatMessageHistory | BaseListChatMessageHistory; export interface RunnableWithMessageHistoryInputs<RunInput, RunOutput> extends Omit<RunnableBindingArgs<RunInput, RunOutput>, "bound" | "config"> { runnable: Runnable<RunInput, RunOutput>; getMessageHistory: GetSessionHistoryCallable; inputMessagesKey?: string; outputMessagesKey?: string; historyMessagesKey?: string; config?: RunnableConfig; } /** * Wraps a LCEL chain and manages history. It appends input messages * and chain outputs as history, and adds the current history messages to * the chain input. * @example * ```typescript * // yarn add @langchain/anthropic @langchain/community @upstash/redis * * import { * ChatPromptTemplate, * MessagesPlaceholder, * } from "@langchain/core/prompts"; * import { ChatAnthropic } from "@langchain/anthropic"; * import { UpstashRedisChatMessageHistory } from "@langchain/community/stores/message/upstash_redis"; * // For demos, you can also use an in-memory store: * // import { ChatMessageHistory } from "langchain/stores/message/in_memory"; * * const prompt = ChatPromptTemplate.fromMessages([ * ["system", "You're an assistant who's good at {ability}"], * new MessagesPlaceholder("history"), * ["human", "{question}"], * ]); * * const chain = prompt.pipe(new ChatAnthropic({})); * * const chainWithHistory = new RunnableWithMessageHistory({ * runnable: chain, * getMessageHistory: (sessionId) => * new UpstashRedisChatMessageHistory({ * sessionId, * config: { * url: process.env.UPSTASH_REDIS_REST_URL!, * token: process.env.UPSTASH_REDIS_REST_TOKEN!, * }, * }), * inputMessagesKey: "question", * historyMessagesKey: "history", * }); * * const result = await chainWithHistory.invoke( * { * ability: "math", * question: "What does cosine mean?", * }, * { * configurable: { * sessionId: "some_string_identifying_a_user", * }, * } * ); * * const result2 = await chainWithHistory.invoke( * { * ability: "math", * question: "What's its inverse?", * }, * { * configurable: { * sessionId: "some_string_identifying_a_user", * }, * } * ); * ``` */ export class RunnableWithMessageHistory< RunInput, RunOutput > extends RunnableBinding<RunInput, RunOutput> { runnable: Runnable<RunInput, RunOutput>; inputMessagesKey?: string; outputMessagesKey?: string; historyMessagesKey?: string; getMessageHistory: GetSessionHistoryCallable; constructor(fields: RunnableWithMessageHistoryInputs<RunInput, RunOutput>) { let historyChain: Runnable = RunnableLambda.from((input, options) => this._enterHistory(input, options ?? {}) ).withConfig({ runName: "loadHistory" }); const messagesKey = fields.historyMessagesKey ?? fields.inputMessagesKey; if (messagesKey) { historyChain = RunnablePassthrough.assign({ [messagesKey]: historyChain, }).withConfig({ runName: "insertHistory" }); } const bound = historyChain .pipe( fields.runnable.withListeners({ onEnd: (run, config) => this._exitHistory(run, config ?? {}), }) ) .withConfig({ runName: "RunnableWithMessageHistory" }); const config = fields.config ?? {}; super({ ...fields, config, bound, }); this.runnable = fields.runnable; this.getMessageHistory = fields.getMessageHistory; this.inputMessagesKey = fields.inputMessagesKey; this.outputMessagesKey = fields.outputMessagesKey; this.historyMessagesKey = fields.historyMessagesKey; } _getInputMessages( // eslint-disable-next-line @typescript-eslint/no-explicit-any inputValue: string | BaseMessage | Array<BaseMessage> | Record<string, any> ): Array<BaseMessage> { let parsedInputValue; if ( typeof inputValue === "object" && !Array.isArray(inputValue) && !isBaseMessage(inputValue) ) { let key; if (this.inputMessagesKey) { key = this.inputMessagesKey; } else if (Object.keys(inputValue).length === 1) { key = Object.keys(inputValue)[0]; } else { key = "input"; } if (Array.isArray(inputValue[key]) && Array.isArray(inputValue[key][0])) { parsedInputValue = inputValue[key][0]; } else { parsedInputValue = inputValue[key]; } } else { parsedInputValue = inputValue; } if (typeof parsedInputValue === "string") { return [new HumanMessage(parsedInputValue)]; } else if (Array.isArray(parsedInputValue)) { return parsedInputValue; } else if (isBaseMessage(parsedInputValue)) { return [parsedInputValue]; } else { throw new Error( `Expected a string, BaseMessage, or array of BaseMessages.\nGot ${JSON.stringify( parsedInputValue, null, 2 )}` ); } } _getOutputMessages( // eslint-disable-next-line @typescript-eslint/no-explicit-any outputValue: string | BaseMessage | Array<BaseMessage> | Record<string, any> ): Array<BaseMessage> { let parsedOutputValue; if ( !Array.isArray(outputValue) && !isBaseMessage(outputValue) && typeof outputValue !== "string" ) { let key; if (this.outputMessagesKey !== undefined) { key = this.outputMessagesKey; } else if (Object.keys(outputValue).length === 1) { key = Object.keys(outputValue)[0]; } else { key = "output"; } // If you are wrapping a chat model directly // The output is actually this weird generations object if (outputValue.generations !== undefined) { parsedOutputValue = outputValue.generations[0][0].message; } else { parsedOutputValue = outputValue[key]; } } else { parsedOutputValue = outputValue; } if (typeof parsedOutputValue === "string") { return [new AIMessage(parsedOutputValue)]; } else if (Array.isArray(parsedOutputValue)) { return parsedOutputValue; } else if (isBaseMessage(parsedOutputValue)) { return [parsedOutputValue]; } else { throw new Error( `Expected a string, BaseMessage, or array of BaseMessages. Received: ${JSON.stringify( parsedOutputValue, null, 2 )}` ); } } async _enterHistory( // eslint-disable-next-line @typescript-eslint/no-explicit-any input: any, kwargs?: RunnableConfig ): Promise<BaseMessage[]> { const history = kwargs?.configurable?.messageHistory; const messages = await history.getMessages(); if (this.historyMessagesKey === undefined) { return messages.concat(this._getInputMessages(input)); } return messages; } async _exitHistory(run: Run, config: RunnableConfig): Promise<void> { const history = config.configurable?.messageHistory; // Get input messages let inputs; // Chat model inputs are nested arrays if (Array.isArray(run.inputs) && Array.isArray(run.inputs[0])) { inputs = run.inputs[0]; } else { inputs = run.inputs; } let inputMessages = this._getInputMessages(inputs); // If historic messages were prepended to the input messages, remove them to // avoid adding duplicate messages to history. if (this.historyMessagesKey === undefined) { const existingMessages = await history.getMessages(); inputMessages = inputMessages.slice(existingMessages.length); } // Get output messages const outputValue = run.outputs; if (!outputValue) { throw new Error( `Output values from 'Run' undefined. Run: ${JSON.stringify( run, null, 2 )}` ); } const outputMessages = this._getOutputMessages(outputValue); await history.addMessages([...inputMessages, ...outputMessages]); } async _mergeConfig(...configs: Array<RunnableConfig | undefined>) { const config = await super._mergeConfig(...configs); // Extract sessionId if (!config.configurable || !config.configurable.sessionId) { const exampleInput = { [this.inputMessagesKey ?? "input"]: "foo", }; const exampleConfig = { configurable: { sessionId: "123" } }; throw new Error( `sessionId is required. Pass it in as part of the config argument to .invoke() or .stream()\n` + `eg. chain.invoke(${JSON.stringify(exampleInput)}, ${JSON.stringify( exampleConfig )})` ); } // attach messageHistory const { sessionId } = config.configurable; config.configurable.messageHistory = await this.getMessageHistory( sessionId ); return config; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/runnables/types.ts
import type { z } from "zod"; import type { SerializableInterface } from "../load/serializable.js"; import type { BaseCallbackConfig } from "../callbacks/manager.js"; import type { IterableReadableStreamInterface } from "../types/stream.js"; export type RunnableBatchOptions = { /** @deprecated Pass in via the standard runnable config object instead */ maxConcurrency?: number; returnExceptions?: boolean; }; export type RunnableIOSchema = { name?: string; schema: z.ZodType; }; /** * Base interface implemented by all runnables. * Used for cross-compatibility between different versions of LangChain core. * * Should not change on patch releases. */ export interface RunnableInterface< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput = any, CallOptions extends RunnableConfig = RunnableConfig > extends SerializableInterface { lc_serializable: boolean; invoke(input: RunInput, options?: Partial<CallOptions>): Promise<RunOutput>; batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & { returnExceptions?: false } ): Promise<RunOutput[]>; batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & { returnExceptions: true } ): Promise<(RunOutput | Error)[]>; batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]>; stream( input: RunInput, options?: Partial<CallOptions> ): Promise<IterableReadableStreamInterface<RunOutput>>; transform( generator: AsyncGenerator<RunInput>, options: Partial<CallOptions> ): AsyncGenerator<RunOutput>; getName(suffix?: string): string; } export interface Edge { source: string; target: string; data?: string; conditional?: boolean; } export interface Node { id: string; name: string; data: RunnableIOSchema | RunnableInterface; // eslint-disable-next-line @typescript-eslint/no-explicit-any metadata?: Record<string, any>; } export interface RunnableConfig< // eslint-disable-next-line @typescript-eslint/no-explicit-any ConfigurableFieldType extends Record<string, any> = Record<string, any> > extends BaseCallbackConfig { /** * Runtime values for attributes previously made configurable on this Runnable, * or sub-Runnables. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any configurable?: ConfigurableFieldType; /** * Maximum number of times a call can recurse. If not provided, defaults to 25. */ recursionLimit?: number; /** Maximum number of parallel calls to make. */ maxConcurrency?: number; /** * Timeout for this call in milliseconds. */ timeout?: number; /** * Abort signal for this call. * If provided, the call will be aborted when the signal is aborted. * @see https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal */ signal?: AbortSignal; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/runnables/iter.ts
import type { RunnableConfig } from "../runnables/types.js"; import { AsyncLocalStorageProviderSingleton } from "../singletons/index.js"; import { pickRunnableConfigKeys } from "./config.js"; export function isIterableIterator( thing: unknown ): thing is IterableIterator<unknown> { return ( typeof thing === "object" && thing !== null && typeof (thing as Generator)[Symbol.iterator] === "function" && // avoid detecting array/set as iterator typeof (thing as Generator).next === "function" ); } export const isIterator = (x: unknown): x is Iterator<unknown> => x != null && typeof x === "object" && "next" in x && typeof x.next === "function"; export function isAsyncIterable( thing: unknown ): thing is AsyncIterable<unknown> { return ( typeof thing === "object" && thing !== null && typeof (thing as AsyncIterable<unknown>)[Symbol.asyncIterator] === "function" ); } export function* consumeIteratorInContext<T>( context: Partial<RunnableConfig> | undefined, iter: IterableIterator<T> ): IterableIterator<T> { while (true) { const { value, done } = AsyncLocalStorageProviderSingleton.runWithConfig( pickRunnableConfigKeys(context), iter.next.bind(iter), true ); if (done) { break; } else { yield value; } } } export async function* consumeAsyncIterableInContext<T>( context: Partial<RunnableConfig> | undefined, iter: AsyncIterable<T> ): AsyncIterableIterator<T> { const iterator = iter[Symbol.asyncIterator](); while (true) { const { value, done } = await AsyncLocalStorageProviderSingleton.runWithConfig( pickRunnableConfigKeys(context), iterator.next.bind(iter), true ); if (done) { break; } else { yield value; } } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/runnables/router.ts
import { Runnable, type RunnableBatchOptions } from "./base.js"; import { IterableReadableStream } from "../utils/stream.js"; import { ensureConfig, type RunnableConfig } from "./config.js"; export type RouterInput = { key: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any input: any; }; /** * A runnable that routes to a set of runnables based on Input['key']. * Returns the output of the selected runnable. * @example * ```typescript * import { RouterRunnable, RunnableLambda } from "@langchain/core/runnables"; * * const router = new RouterRunnable({ * runnables: { * toUpperCase: RunnableLambda.from((text: string) => text.toUpperCase()), * reverseText: RunnableLambda.from((text: string) => * text.split("").reverse().join("") * ), * }, * }); * * // Invoke the 'reverseText' runnable * const result1 = router.invoke({ key: "reverseText", input: "Hello World" }); * * // "dlroW olleH" * * // Invoke the 'toUpperCase' runnable * const result2 = router.invoke({ key: "toUpperCase", input: "Hello World" }); * * // "HELLO WORLD" * ``` */ export class RouterRunnable< RunInput extends RouterInput, RunnableInput, RunOutput > extends Runnable<RunInput, RunOutput> { static lc_name() { return "RouterRunnable"; } lc_namespace = ["langchain_core", "runnables"]; lc_serializable = true; runnables: Record<string, Runnable<RunnableInput, RunOutput>>; constructor(fields: { runnables: Record<string, Runnable<RunnableInput, RunOutput>>; }) { super(fields); this.runnables = fields.runnables; } async invoke( input: RunInput, options?: Partial<RunnableConfig> ): Promise<RunOutput> { const { key, input: actualInput } = input; const runnable = this.runnables[key]; if (runnable === undefined) { throw new Error(`No runnable associated with key "${key}".`); } return runnable.invoke(actualInput, ensureConfig(options)); } async batch( inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions & { returnExceptions?: false } ): Promise<RunOutput[]>; async batch( inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions & { returnExceptions: true } ): Promise<(RunOutput | Error)[]>; async batch( inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]>; async batch( inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]> { const keys = inputs.map((input) => input.key); const actualInputs = inputs.map((input) => input.input); const missingKey = keys.find((key) => this.runnables[key] === undefined); if (missingKey !== undefined) { throw new Error(`One or more keys do not have a corresponding runnable.`); } const runnables = keys.map((key) => this.runnables[key]); const optionsList = this._getOptionsList(options ?? {}, inputs.length); const maxConcurrency = optionsList[0]?.maxConcurrency ?? batchOptions?.maxConcurrency; const batchSize = maxConcurrency && maxConcurrency > 0 ? maxConcurrency : inputs.length; const batchResults = []; for (let i = 0; i < actualInputs.length; i += batchSize) { const batchPromises = actualInputs .slice(i, i + batchSize) .map((actualInput, i) => runnables[i].invoke(actualInput, optionsList[i]) ); const batchResult = await Promise.all(batchPromises); batchResults.push(batchResult); } return batchResults.flat(); } async stream( input: RunInput, options?: Partial<RunnableConfig> ): Promise<IterableReadableStream<RunOutput>> { const { key, input: actualInput } = input; const runnable = this.runnables[key]; if (runnable === undefined) { throw new Error(`No runnable associated with key "${key}".`); } return runnable.stream(actualInput, options); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/runnables/config.ts
import { CallbackManager, ensureHandler } from "../callbacks/manager.js"; import { AsyncLocalStorageProviderSingleton } from "../singletons/index.js"; import { RunnableConfig } from "./types.js"; export const DEFAULT_RECURSION_LIMIT = 25; export { type RunnableConfig }; export async function getCallbackManagerForConfig(config?: RunnableConfig) { return CallbackManager._configureSync( config?.callbacks, undefined, config?.tags, undefined, config?.metadata ); } export function mergeConfigs<CallOptions extends RunnableConfig>( ...configs: (CallOptions | RunnableConfig | undefined | null)[] ): Partial<CallOptions> { // We do not want to call ensureConfig on the empty state here as this may cause // double loading of callbacks if async local storage is being used. const copy: Partial<CallOptions> = {}; for (const options of configs.filter((c): c is CallOptions => !!c)) { for (const key of Object.keys(options)) { if (key === "metadata") { copy[key] = { ...copy[key], ...options[key] }; } else if (key === "tags") { const baseKeys: string[] = copy[key] ?? []; copy[key] = [...new Set(baseKeys.concat(options[key] ?? []))]; } else if (key === "configurable") { copy[key] = { ...copy[key], ...options[key] }; } else if (key === "timeout") { if (copy.timeout === undefined) { copy.timeout = options.timeout; } else if (options.timeout !== undefined) { copy.timeout = Math.min(copy.timeout, options.timeout); } } else if (key === "signal") { if (copy.signal === undefined) { copy.signal = options.signal; } else if (options.signal !== undefined) { if ("any" in AbortSignal) { // eslint-disable-next-line @typescript-eslint/no-explicit-any copy.signal = (AbortSignal as any).any([ copy.signal, options.signal, ]); } else { copy.signal = options.signal; } } } else if (key === "callbacks") { const baseCallbacks = copy.callbacks; const providedCallbacks = options.callbacks; // callbacks can be either undefined, Array<handler> or manager // so merging two callbacks values has 6 cases if (Array.isArray(providedCallbacks)) { if (!baseCallbacks) { copy.callbacks = providedCallbacks; } else if (Array.isArray(baseCallbacks)) { copy.callbacks = baseCallbacks.concat(providedCallbacks); } else { // baseCallbacks is a manager const manager = baseCallbacks.copy(); for (const callback of providedCallbacks) { manager.addHandler(ensureHandler(callback), true); } copy.callbacks = manager; } } else if (providedCallbacks) { // providedCallbacks is a manager if (!baseCallbacks) { copy.callbacks = providedCallbacks; } else if (Array.isArray(baseCallbacks)) { const manager = providedCallbacks.copy(); for (const callback of baseCallbacks) { manager.addHandler(ensureHandler(callback), true); } copy.callbacks = manager; } else { // baseCallbacks is also a manager copy.callbacks = new CallbackManager( providedCallbacks._parentRunId, { handlers: baseCallbacks.handlers.concat( providedCallbacks.handlers ), inheritableHandlers: baseCallbacks.inheritableHandlers.concat( providedCallbacks.inheritableHandlers ), tags: Array.from( new Set(baseCallbacks.tags.concat(providedCallbacks.tags)) ), inheritableTags: Array.from( new Set( baseCallbacks.inheritableTags.concat( providedCallbacks.inheritableTags ) ) ), metadata: { ...baseCallbacks.metadata, ...providedCallbacks.metadata, }, } ); } } } else { const typedKey = key as keyof CallOptions; copy[typedKey] = options[typedKey] ?? copy[typedKey]; } } } return copy as Partial<CallOptions>; } const PRIMITIVES = new Set(["string", "number", "boolean"]); /** * Ensure that a passed config is an object with all required keys present. */ export function ensureConfig<CallOptions extends RunnableConfig>( config?: CallOptions ): CallOptions { const implicitConfig = AsyncLocalStorageProviderSingleton.getRunnableConfig(); let empty: RunnableConfig = { tags: [], metadata: {}, recursionLimit: 25, runId: undefined, }; if (implicitConfig) { // Don't allow runId and runName to be loaded implicitly, as this can cause // child runs to improperly inherit their parents' run ids. // eslint-disable-next-line @typescript-eslint/no-unused-vars const { runId, runName, ...rest } = implicitConfig; empty = Object.entries(rest).reduce( // eslint-disable-next-line @typescript-eslint/no-explicit-any (currentConfig: Record<string, any>, [key, value]) => { if (value !== undefined) { // eslint-disable-next-line no-param-reassign currentConfig[key] = value; } return currentConfig; }, empty ); } if (config) { empty = Object.entries(config).reduce( // eslint-disable-next-line @typescript-eslint/no-explicit-any (currentConfig: Record<string, any>, [key, value]) => { if (value !== undefined) { // eslint-disable-next-line no-param-reassign currentConfig[key] = value; } return currentConfig; }, empty ); } if (empty?.configurable) { for (const key of Object.keys(empty.configurable)) { if ( PRIMITIVES.has(typeof empty.configurable[key]) && !empty.metadata?.[key] ) { if (!empty.metadata) { empty.metadata = {}; } empty.metadata[key] = empty.configurable[key]; } } } if (empty.timeout !== undefined) { if (empty.timeout <= 0) { throw new Error("Timeout must be a positive number"); } const timeoutSignal = AbortSignal.timeout(empty.timeout); if (empty.signal !== undefined) { if ("any" in AbortSignal) { // eslint-disable-next-line @typescript-eslint/no-explicit-any empty.signal = (AbortSignal as any).any([empty.signal, timeoutSignal]); } } else { empty.signal = timeoutSignal; } delete empty.timeout; } return empty as CallOptions; } /** * Helper function that patches runnable configs with updated properties. */ export function patchConfig<CallOptions extends RunnableConfig>( config: Partial<CallOptions> = {}, { callbacks, maxConcurrency, recursionLimit, runName, configurable, runId, }: RunnableConfig = {} ): Partial<CallOptions> { const newConfig = ensureConfig(config); if (callbacks !== undefined) { /** * If we're replacing callbacks we need to unset runName * since that should apply only to the same run as the original callbacks */ delete newConfig.runName; newConfig.callbacks = callbacks; } if (recursionLimit !== undefined) { newConfig.recursionLimit = recursionLimit; } if (maxConcurrency !== undefined) { newConfig.maxConcurrency = maxConcurrency; } if (runName !== undefined) { newConfig.runName = runName; } if (configurable !== undefined) { newConfig.configurable = { ...newConfig.configurable, ...configurable }; } if (runId !== undefined) { delete newConfig.runId; } return newConfig; } // eslint-disable-next-line @typescript-eslint/no-explicit-any export function pickRunnableConfigKeys<CallOptions extends Record<string, any>>( config?: CallOptions ): Partial<RunnableConfig> | undefined { return config ? { configurable: config.configurable, recursionLimit: config.recursionLimit, callbacks: config.callbacks, tags: config.tags, metadata: config.metadata, maxConcurrency: config.maxConcurrency, timeout: config.timeout, signal: config.signal, } : undefined; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/runnables/index.ts
export { type RunnableFunc, type RunnableLike, type RunnableRetryFailedAttemptHandler, Runnable, type RunnableBindingArgs, RunnableBinding, RunnableEach, RunnableRetry, RunnableSequence, RunnableMap, RunnableParallel, RunnableLambda, RunnableWithFallbacks, RunnableAssign, RunnablePick, _coerceToRunnable, RunnableToolLike, type RunnableToolLikeArgs, } from "./base.js"; export { type RunnableBatchOptions, type RunnableInterface, type RunnableIOSchema, } from "./types.js"; export { type RunnableConfig, getCallbackManagerForConfig, patchConfig, ensureConfig, mergeConfigs, pickRunnableConfigKeys, } from "./config.js"; export { RunnablePassthrough } from "./passthrough.js"; export { type RouterInput, RouterRunnable } from "./router.js"; export { RunnableBranch, type Branch, type BranchLike } from "./branch.js"; export { type RunnableWithMessageHistoryInputs, RunnableWithMessageHistory, } from "./history.js";
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/runnables/graph_mermaid.ts
import { Edge, Node } from "./types.js"; function _escapeNodeLabel(nodeLabel: string): string { // Escapes the node label for Mermaid syntax. return nodeLabel.replace(/[^a-zA-Z-_0-9]/g, "_"); } const MARKDOWN_SPECIAL_CHARS = ["*", "_", "`"]; function _generateMermaidGraphStyles( nodeColors: Record<string, string> ): string { let styles = ""; for (const [className, color] of Object.entries(nodeColors)) { styles += `\tclassDef ${className} ${color};\n`; } return styles; } /** * Draws a Mermaid graph using the provided graph data */ export function drawMermaid( nodes: Record<string, Node>, edges: Edge[], config?: { firstNode?: string; lastNode?: string; curveStyle?: string; withStyles?: boolean; nodeColors?: Record<string, string>; wrapLabelNWords?: number; } ): string { const { firstNode, lastNode, nodeColors, withStyles = true, curveStyle = "linear", wrapLabelNWords = 9, } = config ?? {}; // Initialize Mermaid graph configuration let mermaidGraph = withStyles ? `%%{init: {'flowchart': {'curve': '${curveStyle}'}}}%%\ngraph TD;\n` : "graph TD;\n"; if (withStyles) { // Node formatting templates const defaultClassLabel = "default"; const formatDict: Record<string, string> = { [defaultClassLabel]: "{0}({1})", }; if (firstNode !== undefined) { formatDict[firstNode] = "{0}([{1}]):::first"; } if (lastNode !== undefined) { formatDict[lastNode] = "{0}([{1}]):::last"; } // Add nodes to the graph for (const [key, node] of Object.entries(nodes)) { const nodeName = node.name.split(":").pop() ?? ""; const label = MARKDOWN_SPECIAL_CHARS.some( (char) => nodeName.startsWith(char) && nodeName.endsWith(char) ) ? `<p>${nodeName}</p>` : nodeName; let finalLabel = label; if (Object.keys(node.metadata ?? {}).length) { finalLabel += `<hr/><small><em>${Object.entries(node.metadata ?? {}) .map(([k, v]) => `${k} = ${v}`) .join("\n")}</em></small>`; } const nodeLabel = (formatDict[key] ?? formatDict[defaultClassLabel]) .replace("{0}", _escapeNodeLabel(key)) .replace("{1}", finalLabel); mermaidGraph += `\t${nodeLabel}\n`; } } // Group edges by their common prefixes const edgeGroups: Record<string, Edge[]> = {}; for (const edge of edges) { const srcParts = edge.source.split(":"); const tgtParts = edge.target.split(":"); const commonPrefix = srcParts .filter((src, i) => src === tgtParts[i]) .join(":"); if (!edgeGroups[commonPrefix]) { edgeGroups[commonPrefix] = []; } edgeGroups[commonPrefix].push(edge); } const seenSubgraphs = new Set<string>(); function addSubgraph(edges: Edge[], prefix: string): void { const selfLoop = edges.length === 1 && edges[0].source === edges[0].target; if (prefix && !selfLoop) { const subgraph = prefix.split(":").pop()!; if (seenSubgraphs.has(subgraph)) { throw new Error( `Found duplicate subgraph '${subgraph}' -- this likely means that ` + "you're reusing a subgraph node with the same name. " + "Please adjust your graph to have subgraph nodes with unique names." ); } seenSubgraphs.add(subgraph); mermaidGraph += `\tsubgraph ${subgraph}\n`; } for (const edge of edges) { const { source, target, data, conditional } = edge; let edgeLabel = ""; if (data !== undefined) { let edgeData = data; const words = edgeData.split(" "); if (words.length > wrapLabelNWords) { edgeData = Array.from( { length: Math.ceil(words.length / wrapLabelNWords) }, (_, i) => words .slice(i * wrapLabelNWords, (i + 1) * wrapLabelNWords) .join(" ") ).join("&nbsp;<br>&nbsp;"); } edgeLabel = conditional ? ` -. &nbsp;${edgeData}&nbsp; .-> ` : ` -- &nbsp;${edgeData}&nbsp; --> `; } else { edgeLabel = conditional ? " -.-> " : " --> "; } mermaidGraph += `\t${_escapeNodeLabel( source )}${edgeLabel}${_escapeNodeLabel(target)};\n`; } // Recursively add nested subgraphs for (const nestedPrefix in edgeGroups) { if (nestedPrefix.startsWith(`${prefix}:`) && nestedPrefix !== prefix) { addSubgraph(edgeGroups[nestedPrefix], nestedPrefix); } } if (prefix && !selfLoop) { mermaidGraph += "\tend\n"; } } // Start with the top-level edges (no common prefix) addSubgraph(edgeGroups[""] ?? [], ""); // Add remaining subgraphs for (const prefix in edgeGroups) { if (!prefix.includes(":") && prefix !== "") { addSubgraph(edgeGroups[prefix], prefix); } } // Add custom styles for nodes if (withStyles) { mermaidGraph += _generateMermaidGraphStyles(nodeColors ?? {}); } return mermaidGraph; } /** * Renders Mermaid graph using the Mermaid.INK API. */ export async function drawMermaidPng( mermaidSyntax: string, config?: { backgroundColor?: string; } ) { let { backgroundColor = "white" } = config ?? {}; // Use btoa for compatibility, assume ASCII const mermaidSyntaxEncoded = btoa(mermaidSyntax); // Check if the background color is a hexadecimal color code using regex if (backgroundColor !== undefined) { const hexColorPattern = /^#(?:[0-9a-fA-F]{3}){1,2}$/; if (!hexColorPattern.test(backgroundColor)) { backgroundColor = `!${backgroundColor}`; } } const imageUrl = `https://mermaid.ink/img/${mermaidSyntaxEncoded}?bgColor=${backgroundColor}`; const res = await fetch(imageUrl); if (!res.ok) { throw new Error( [ `Failed to render the graph using the Mermaid.INK API.`, `Status code: ${res.status}`, `Status text: ${res.statusText}`, ].join("\n") ); } const content = await res.blob(); return content; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/runnables/base.ts
import { z } from "zod"; import pRetry from "p-retry"; import { v4 as uuidv4 } from "uuid"; import { type TraceableFunction, isTraceableFunction, } from "langsmith/singletons/traceable"; import type { RunnableInterface, RunnableBatchOptions, RunnableConfig, } from "./types.js"; import { CallbackManagerForChainRun } from "../callbacks/manager.js"; import { LogStreamCallbackHandler, LogStreamCallbackHandlerInput, RunLog, RunLogPatch, isLogStreamHandler, } from "../tracers/log_stream.js"; import { EventStreamCallbackHandler, EventStreamCallbackHandlerInput, StreamEvent, StreamEventData, isStreamEventsHandler, } from "../tracers/event_stream.js"; import { Serializable } from "../load/serializable.js"; import { IterableReadableStream, concat, atee, pipeGeneratorWithSetup, AsyncGeneratorWithSetup, } from "../utils/stream.js"; import { raceWithSignal } from "../utils/signal.js"; import { DEFAULT_RECURSION_LIMIT, ensureConfig, getCallbackManagerForConfig, mergeConfigs, patchConfig, pickRunnableConfigKeys, } from "./config.js"; import { AsyncCaller } from "../utils/async_caller.js"; import { Run } from "../tracers/base.js"; import { RootListenersTracer } from "../tracers/root_listener.js"; import { _RootEventFilter, isRunnableInterface } from "./utils.js"; import { AsyncLocalStorageProviderSingleton } from "../singletons/index.js"; import { Graph } from "./graph.js"; import { convertToHttpEventStream } from "./wrappers.js"; import { consumeAsyncIterableInContext, consumeIteratorInContext, isAsyncIterable, isIterableIterator, isIterator, } from "./iter.js"; import { _isToolCall, ToolInputParsingException } from "../tools/utils.js"; import { ToolCall } from "../messages/tool.js"; export { type RunnableInterface, RunnableBatchOptions }; export type RunnableFunc< RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig > = ( input: RunInput, options: | CallOptions // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any> // eslint-disable-next-line @typescript-eslint/no-explicit-any | (Record<string, any> & CallOptions) ) => RunOutput | Promise<RunOutput>; export type RunnableMapLike<RunInput, RunOutput> = { [K in keyof RunOutput]: RunnableLike<RunInput, RunOutput[K]>; }; // eslint-disable-next-line @typescript-eslint/no-explicit-any export type RunnableLike< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput = any, CallOptions extends RunnableConfig = RunnableConfig > = | RunnableInterface<RunInput, RunOutput, CallOptions> | RunnableFunc<RunInput, RunOutput, CallOptions> | RunnableMapLike<RunInput, RunOutput>; export type RunnableRetryFailedAttemptHandler = ( // eslint-disable-next-line @typescript-eslint/no-explicit-any error: any, // eslint-disable-next-line @typescript-eslint/no-explicit-any input: any // eslint-disable-next-line @typescript-eslint/no-explicit-any ) => any; // eslint-disable-next-line @typescript-eslint/no-explicit-any export function _coerceToDict(value: any, defaultKey: string) { return value && !Array.isArray(value) && // eslint-disable-next-line no-instanceof/no-instanceof !(value instanceof Date) && typeof value === "object" ? value : { [defaultKey]: value }; } /** * A Runnable is a generic unit of work that can be invoked, batched, streamed, and/or * transformed. */ export abstract class Runnable< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput = any, CallOptions extends RunnableConfig = RunnableConfig > extends Serializable implements RunnableInterface<RunInput, RunOutput, CallOptions> { protected lc_runnable = true; name?: string; getName(suffix?: string): string { const name = // eslint-disable-next-line @typescript-eslint/no-explicit-any this.name ?? (this.constructor as any).lc_name() ?? this.constructor.name; return suffix ? `${name}${suffix}` : name; } abstract invoke( input: RunInput, options?: Partial<CallOptions> ): Promise<RunOutput>; /** * Bind arguments to a Runnable, returning a new Runnable. * @param kwargs * @returns A new RunnableBinding that, when invoked, will apply the bound args. */ bind( kwargs: Partial<CallOptions> ): Runnable<RunInput, RunOutput, CallOptions> { // eslint-disable-next-line @typescript-eslint/no-use-before-define return new RunnableBinding({ bound: this, kwargs, config: {} }); } /** * Return a new Runnable that maps a list of inputs to a list of outputs, * by calling invoke() with each input. */ map(): Runnable<RunInput[], RunOutput[], CallOptions> { // eslint-disable-next-line @typescript-eslint/no-use-before-define return new RunnableEach({ bound: this }); } /** * Add retry logic to an existing runnable. * @param kwargs * @returns A new RunnableRetry that, when invoked, will retry according to the parameters. */ withRetry(fields?: { stopAfterAttempt?: number; onFailedAttempt?: RunnableRetryFailedAttemptHandler; }): RunnableRetry<RunInput, RunOutput, CallOptions> { // eslint-disable-next-line @typescript-eslint/no-use-before-define return new RunnableRetry({ bound: this, kwargs: {}, config: {}, maxAttemptNumber: fields?.stopAfterAttempt, ...fields, }); } /** * Bind config to a Runnable, returning a new Runnable. * @param config New configuration parameters to attach to the new runnable. * @returns A new RunnableBinding with a config matching what's passed. */ withConfig( config: RunnableConfig ): Runnable<RunInput, RunOutput, CallOptions> { // eslint-disable-next-line @typescript-eslint/no-use-before-define return new RunnableBinding({ bound: this, config, kwargs: {}, }); } /** * Create a new runnable from the current one that will try invoking * other passed fallback runnables if the initial invocation fails. * @param fields.fallbacks Other runnables to call if the runnable errors. * @returns A new RunnableWithFallbacks. */ withFallbacks( fields: | { fallbacks: Runnable<RunInput, RunOutput>[]; } | Runnable<RunInput, RunOutput>[] ): RunnableWithFallbacks<RunInput, RunOutput> { const fallbacks = Array.isArray(fields) ? fields : fields.fallbacks; // eslint-disable-next-line @typescript-eslint/no-use-before-define return new RunnableWithFallbacks<RunInput, RunOutput>({ runnable: this, fallbacks, }); } protected _getOptionsList<O extends CallOptions & { runType?: string }>( options: Partial<O> | Partial<O>[], length = 0 ): Partial<O>[] { if (Array.isArray(options) && options.length !== length) { throw new Error( `Passed "options" must be an array with the same length as the inputs, but got ${options.length} options for ${length} inputs` ); } if (Array.isArray(options)) { return options.map(ensureConfig); } if (length > 1 && !Array.isArray(options) && options.runId) { console.warn( "Provided runId will be used only for the first element of the batch." ); const subsequent = Object.fromEntries( Object.entries(options).filter(([key]) => key !== "runId") ); return Array.from({ length }, (_, i) => ensureConfig(i === 0 ? options : subsequent) ) as Partial<O>[]; } return Array.from({ length }, () => ensureConfig(options)); } /** * Default implementation of batch, which calls invoke N times. * Subclasses should override this method if they can batch more efficiently. * @param inputs Array of inputs to each batch call. * @param options Either a single call options object to apply to each batch call or an array for each call. * @param batchOptions.returnExceptions Whether to return errors rather than throwing on the first one * @returns An array of RunOutputs, or mixed RunOutputs and errors if batchOptions.returnExceptions is set */ async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & { returnExceptions?: false } ): Promise<RunOutput[]>; async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & { returnExceptions: true } ): Promise<(RunOutput | Error)[]>; async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]>; async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]> { const configList = this._getOptionsList(options ?? {}, inputs.length); const maxConcurrency = configList[0]?.maxConcurrency ?? batchOptions?.maxConcurrency; const caller = new AsyncCaller({ maxConcurrency, onFailedAttempt: (e) => { throw e; }, }); const batchCalls = inputs.map((input, i) => caller.call(async () => { try { const result = await this.invoke(input, configList[i]); return result; } catch (e) { if (batchOptions?.returnExceptions) { return e as Error; } throw e; } }) ); return Promise.all(batchCalls); } /** * Default streaming implementation. * Subclasses should override this method if they support streaming output. * @param input * @param options */ async *_streamIterator( input: RunInput, options?: Partial<CallOptions> ): AsyncGenerator<RunOutput> { yield this.invoke(input, options); } /** * Stream output in chunks. * @param input * @param options * @returns A readable stream that is also an iterable. */ async stream( input: RunInput, options?: Partial<CallOptions> ): Promise<IterableReadableStream<RunOutput>> { // Buffer the first streamed chunk to allow for initial errors // to surface immediately. const config = ensureConfig(options); const wrappedGenerator = new AsyncGeneratorWithSetup({ generator: this._streamIterator(input, config), config, }); await wrappedGenerator.setup; return IterableReadableStream.fromAsyncGenerator(wrappedGenerator); } protected _separateRunnableConfigFromCallOptions( options?: Partial<CallOptions> ): [RunnableConfig, Omit<Partial<CallOptions>, keyof RunnableConfig>] { let runnableConfig; if (options === undefined) { runnableConfig = ensureConfig(options); } else { runnableConfig = ensureConfig({ callbacks: options.callbacks, tags: options.tags, metadata: options.metadata, runName: options.runName, configurable: options.configurable, recursionLimit: options.recursionLimit, maxConcurrency: options.maxConcurrency, runId: options.runId, timeout: options.timeout, signal: options.signal, }); } const callOptions = { ...(options as Partial<CallOptions>) }; delete callOptions.callbacks; delete callOptions.tags; delete callOptions.metadata; delete callOptions.runName; delete callOptions.configurable; delete callOptions.recursionLimit; delete callOptions.maxConcurrency; delete callOptions.runId; delete callOptions.timeout; delete callOptions.signal; return [runnableConfig, callOptions]; } protected async _callWithConfig<T extends RunInput>( func: | ((input: T) => Promise<RunOutput>) | (( input: T, config?: Partial<CallOptions>, runManager?: CallbackManagerForChainRun ) => Promise<RunOutput>), input: T, options?: Partial<CallOptions> & { runType?: string } ) { const config = ensureConfig(options); const callbackManager_ = await getCallbackManagerForConfig(config); const runManager = await callbackManager_?.handleChainStart( this.toJSON(), _coerceToDict(input, "input"), config.runId, config?.runType, undefined, undefined, config?.runName ?? this.getName() ); delete config.runId; let output; try { const promise = func.call(this, input, config, runManager); output = await raceWithSignal(promise, options?.signal); } catch (e) { await runManager?.handleChainError(e); throw e; } await runManager?.handleChainEnd(_coerceToDict(output, "output")); return output; } /** * Internal method that handles batching and configuration for a runnable * It takes a function, input values, and optional configuration, and * returns a promise that resolves to the output values. * @param func The function to be executed for each input value. * @param input The input values to be processed. * @param config Optional configuration for the function execution. * @returns A promise that resolves to the output values. */ async _batchWithConfig<T extends RunInput>( func: ( inputs: T[], options?: Partial<CallOptions>[], runManagers?: (CallbackManagerForChainRun | undefined)[], batchOptions?: RunnableBatchOptions ) => Promise<(RunOutput | Error)[]>, inputs: T[], options?: | Partial<CallOptions & { runType?: string }> | Partial<CallOptions & { runType?: string }>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]> { const optionsList = this._getOptionsList(options ?? {}, inputs.length); const callbackManagers = await Promise.all( optionsList.map(getCallbackManagerForConfig) ); const runManagers = await Promise.all( callbackManagers.map(async (callbackManager, i) => { const handleStartRes = await callbackManager?.handleChainStart( this.toJSON(), _coerceToDict(inputs[i], "input"), optionsList[i].runId, optionsList[i].runType, undefined, undefined, optionsList[i].runName ?? this.getName() ); delete optionsList[i].runId; return handleStartRes; }) ); let outputs: (RunOutput | Error)[]; try { const promise = func.call( this, inputs, optionsList, runManagers, batchOptions ); outputs = await raceWithSignal(promise, optionsList?.[0]?.signal); } catch (e) { await Promise.all( runManagers.map((runManager) => runManager?.handleChainError(e)) ); throw e; } await Promise.all( runManagers.map((runManager) => runManager?.handleChainEnd(_coerceToDict(outputs, "output")) ) ); return outputs; } /** * Helper method to transform an Iterator of Input values into an Iterator of * Output values, with callbacks. * Use this to implement `stream()` or `transform()` in Runnable subclasses. */ protected async *_transformStreamWithConfig< I extends RunInput, O extends RunOutput >( inputGenerator: AsyncGenerator<I>, transformer: ( generator: AsyncGenerator<I>, runManager?: CallbackManagerForChainRun, options?: Partial<CallOptions> ) => AsyncGenerator<O>, options?: Partial<CallOptions> & { runType?: string } ): AsyncGenerator<O> { let finalInput: I | undefined; let finalInputSupported = true; let finalOutput: O | undefined; let finalOutputSupported = true; const config = ensureConfig(options); const callbackManager_ = await getCallbackManagerForConfig(config); async function* wrapInputForTracing() { for await (const chunk of inputGenerator) { if (finalInputSupported) { if (finalInput === undefined) { finalInput = chunk; } else { try { // eslint-disable-next-line @typescript-eslint/no-explicit-any finalInput = concat(finalInput, chunk as any); } catch { finalInput = undefined; finalInputSupported = false; } } } yield chunk; } } let runManager: CallbackManagerForChainRun | undefined; try { const pipe = await pipeGeneratorWithSetup( transformer.bind(this), wrapInputForTracing(), async () => callbackManager_?.handleChainStart( this.toJSON(), { input: "" }, config.runId, config.runType, undefined, undefined, config.runName ?? this.getName() ), options?.signal, config ); delete config.runId; runManager = pipe.setup; const streamEventsHandler = runManager?.handlers.find( isStreamEventsHandler ); let iterator = pipe.output; if (streamEventsHandler !== undefined && runManager !== undefined) { iterator = streamEventsHandler.tapOutputIterable( runManager.runId, iterator ); } const streamLogHandler = runManager?.handlers.find(isLogStreamHandler); if (streamLogHandler !== undefined && runManager !== undefined) { iterator = streamLogHandler.tapOutputIterable( runManager.runId, iterator ); } for await (const chunk of iterator) { yield chunk; if (finalOutputSupported) { if (finalOutput === undefined) { finalOutput = chunk; } else { try { // eslint-disable-next-line @typescript-eslint/no-explicit-any finalOutput = concat(finalOutput, chunk as any); } catch { finalOutput = undefined; finalOutputSupported = false; } } } } } catch (e) { await runManager?.handleChainError(e, undefined, undefined, undefined, { inputs: _coerceToDict(finalInput, "input"), }); throw e; } await runManager?.handleChainEnd( finalOutput ?? {}, undefined, undefined, undefined, { inputs: _coerceToDict(finalInput, "input") } ); } getGraph(_?: RunnableConfig): Graph { const graph = new Graph(); // TODO: Add input schema for runnables const inputNode = graph.addNode({ name: `${this.getName()}Input`, schema: z.any(), }); const runnableNode = graph.addNode(this); // TODO: Add output schemas for runnables const outputNode = graph.addNode({ name: `${this.getName()}Output`, schema: z.any(), }); graph.addEdge(inputNode, runnableNode); graph.addEdge(runnableNode, outputNode); return graph; } /** * Create a new runnable sequence that runs each individual runnable in series, * piping the output of one runnable into another runnable or runnable-like. * @param coerceable A runnable, function, or object whose values are functions or runnables. * @returns A new runnable sequence. */ pipe<NewRunOutput>( coerceable: RunnableLike<RunOutput, NewRunOutput> ): Runnable<RunInput, Exclude<NewRunOutput, Error>> { // eslint-disable-next-line @typescript-eslint/no-use-before-define return new RunnableSequence({ first: this, last: _coerceToRunnable(coerceable), }); } /** * Pick keys from the dict output of this runnable. Returns a new runnable. */ pick(keys: string | string[]): Runnable { // eslint-disable-next-line @typescript-eslint/no-use-before-define return this.pipe(new RunnablePick(keys) as Runnable); } /** * Assigns new fields to the dict output of this runnable. Returns a new runnable. */ assign( mapping: RunnableMapLike<Record<string, unknown>, Record<string, unknown>> ): Runnable { return this.pipe( // eslint-disable-next-line @typescript-eslint/no-use-before-define new RunnableAssign( // eslint-disable-next-line @typescript-eslint/no-use-before-define new RunnableMap<Record<string, unknown>>({ steps: mapping }) ) as Runnable ); } /** * Default implementation of transform, which buffers input and then calls stream. * Subclasses should override this method if they can start producing output while * input is still being generated. * @param generator * @param options */ async *transform( generator: AsyncGenerator<RunInput>, options: Partial<CallOptions> ): AsyncGenerator<RunOutput> { let finalChunk; for await (const chunk of generator) { if (finalChunk === undefined) { finalChunk = chunk; } else { // Make a best effort to gather, for any type that supports concat. // This method should throw an error if gathering fails. // eslint-disable-next-line @typescript-eslint/no-explicit-any finalChunk = concat(finalChunk, chunk as any); } } yield* this._streamIterator(finalChunk, ensureConfig(options)); } /** * Stream all output from a runnable, as reported to the callback system. * This includes all inner runs of LLMs, Retrievers, Tools, etc. * Output is streamed as Log objects, which include a list of * jsonpatch ops that describe how the state of the run has changed in each * step, and the final state of the run. * The jsonpatch ops can be applied in order to construct state. * @param input * @param options * @param streamOptions */ async *streamLog( input: RunInput, options?: Partial<CallOptions>, streamOptions?: Omit<LogStreamCallbackHandlerInput, "autoClose"> ): AsyncGenerator<RunLogPatch> { const logStreamCallbackHandler = new LogStreamCallbackHandler({ ...streamOptions, autoClose: false, _schemaFormat: "original", }); const config = ensureConfig(options); yield* this._streamLog(input, logStreamCallbackHandler, config); } protected async *_streamLog( input: RunInput, logStreamCallbackHandler: LogStreamCallbackHandler, config: Partial<CallOptions> ): AsyncGenerator<RunLogPatch> { const { callbacks } = config; if (callbacks === undefined) { // eslint-disable-next-line no-param-reassign config.callbacks = [logStreamCallbackHandler]; } else if (Array.isArray(callbacks)) { // eslint-disable-next-line no-param-reassign config.callbacks = callbacks.concat([logStreamCallbackHandler]); } else { const copiedCallbacks = callbacks.copy(); copiedCallbacks.addHandler(logStreamCallbackHandler, true); // eslint-disable-next-line no-param-reassign config.callbacks = copiedCallbacks; } const runnableStreamPromise = this.stream(input, config); async function consumeRunnableStream() { try { const runnableStream = await runnableStreamPromise; for await (const chunk of runnableStream) { const patch = new RunLogPatch({ ops: [ { op: "add", path: "/streamed_output/-", value: chunk, }, ], }); await logStreamCallbackHandler.writer.write(patch); } } finally { await logStreamCallbackHandler.writer.close(); } } const runnableStreamConsumePromise = consumeRunnableStream(); try { for await (const log of logStreamCallbackHandler) { yield log; } } finally { await runnableStreamConsumePromise; } } /** * Generate a stream of events emitted by the internal steps of the runnable. * * Use to create an iterator over StreamEvents that provide real-time information * about the progress of the runnable, including StreamEvents from intermediate * results. * * A StreamEvent is a dictionary with the following schema: * * - `event`: string - Event names are of the format: on_[runnable_type]_(start|stream|end). * - `name`: string - The name of the runnable that generated the event. * - `run_id`: string - Randomly generated ID associated with the given execution of * the runnable that emitted the event. A child runnable that gets invoked as part of the execution of a * parent runnable is assigned its own unique ID. * - `tags`: string[] - The tags of the runnable that generated the event. * - `metadata`: Record<string, any> - The metadata of the runnable that generated the event. * - `data`: Record<string, any> * * Below is a table that illustrates some events that might be emitted by various * chains. Metadata fields have been omitted from the table for brevity. * Chain definitions have been included after the table. * * **ATTENTION** This reference table is for the V2 version of the schema. * * ```md * +----------------------+-----------------------------+------------------------------------------+ * | event | input | output/chunk | * +======================+=============================+==========================================+ * | on_chat_model_start | {"messages": BaseMessage[]} | | * +----------------------+-----------------------------+------------------------------------------+ * | on_chat_model_stream | | AIMessageChunk("hello") | * +----------------------+-----------------------------+------------------------------------------+ * | on_chat_model_end | {"messages": BaseMessage[]} | AIMessageChunk("hello world") | * +----------------------+-----------------------------+------------------------------------------+ * | on_llm_start | {'input': 'hello'} | | * +----------------------+-----------------------------+------------------------------------------+ * | on_llm_stream | | 'Hello' | * +----------------------+-----------------------------+------------------------------------------+ * | on_llm_end | 'Hello human!' | | * +----------------------+-----------------------------+------------------------------------------+ * | on_chain_start | | | * +----------------------+-----------------------------+------------------------------------------+ * | on_chain_stream | | "hello world!" | * +----------------------+-----------------------------+------------------------------------------+ * | on_chain_end | [Document(...)] | "hello world!, goodbye world!" | * +----------------------+-----------------------------+------------------------------------------+ * | on_tool_start | {"x": 1, "y": "2"} | | * +----------------------+-----------------------------+------------------------------------------+ * | on_tool_end | | {"x": 1, "y": "2"} | * +----------------------+-----------------------------+------------------------------------------+ * | on_retriever_start | {"query": "hello"} | | * +----------------------+-----------------------------+------------------------------------------+ * | on_retriever_end | {"query": "hello"} | [Document(...), ..] | * +----------------------+-----------------------------+------------------------------------------+ * | on_prompt_start | {"question": "hello"} | | * +----------------------+-----------------------------+------------------------------------------+ * | on_prompt_end | {"question": "hello"} | ChatPromptValue(messages: BaseMessage[]) | * +----------------------+-----------------------------+------------------------------------------+ * ``` * * The "on_chain_*" events are the default for Runnables that don't fit one of the above categories. * * In addition to the standard events above, users can also dispatch custom events. * * Custom events will be only be surfaced with in the `v2` version of the API! * * A custom event has following format: * * ```md * +-----------+------+------------------------------------------------------------+ * | Attribute | Type | Description | * +===========+======+============================================================+ * | name | str | A user defined name for the event. | * +-----------+------+------------------------------------------------------------+ * | data | Any | The data associated with the event. This can be anything. | * +-----------+------+------------------------------------------------------------+ * ``` * * Here's an example: * * ```ts * import { RunnableLambda } from "@langchain/core/runnables"; * import { dispatchCustomEvent } from "@langchain/core/callbacks/dispatch"; * // Use this import for web environments that don't support "async_hooks" * // and manually pass config to child runs. * // import { dispatchCustomEvent } from "@langchain/core/callbacks/dispatch/web"; * * const slowThing = RunnableLambda.from(async (someInput: string) => { * // Placeholder for some slow operation * await new Promise((resolve) => setTimeout(resolve, 100)); * await dispatchCustomEvent("progress_event", { * message: "Finished step 1 of 2", * }); * await new Promise((resolve) => setTimeout(resolve, 100)); * return "Done"; * }); * * const eventStream = await slowThing.streamEvents("hello world", { * version: "v2", * }); * * for await (const event of eventStream) { * if (event.event === "on_custom_event") { * console.log(event); * } * } * ``` */ streamEvents( input: RunInput, options: Partial<CallOptions> & { version: "v1" | "v2" }, streamOptions?: Omit<EventStreamCallbackHandlerInput, "autoClose"> ): IterableReadableStream<StreamEvent>; streamEvents( input: RunInput, options: Partial<CallOptions> & { version: "v1" | "v2"; encoding: "text/event-stream"; }, streamOptions?: Omit<EventStreamCallbackHandlerInput, "autoClose"> ): IterableReadableStream<Uint8Array>; streamEvents( input: RunInput, options: Partial<CallOptions> & { version: "v1" | "v2"; encoding?: "text/event-stream" | undefined; }, streamOptions?: Omit<EventStreamCallbackHandlerInput, "autoClose"> ): IterableReadableStream<StreamEvent | Uint8Array> { let stream; if (options.version === "v1") { stream = this._streamEventsV1(input, options, streamOptions); } else if (options.version === "v2") { stream = this._streamEventsV2(input, options, streamOptions); } else { throw new Error( `Only versions "v1" and "v2" of the schema are currently supported.` ); } if (options.encoding === "text/event-stream") { return convertToHttpEventStream(stream); } else { return IterableReadableStream.fromAsyncGenerator(stream); } } private async *_streamEventsV2( input: RunInput, options: Partial<CallOptions> & { version: "v1" | "v2" }, streamOptions?: Omit<EventStreamCallbackHandlerInput, "autoClose"> ): AsyncGenerator<StreamEvent> { const eventStreamer = new EventStreamCallbackHandler({ ...streamOptions, autoClose: false, }); const config = ensureConfig(options); const runId = config.runId ?? uuidv4(); config.runId = runId; const callbacks = config.callbacks; if (callbacks === undefined) { config.callbacks = [eventStreamer]; } else if (Array.isArray(callbacks)) { config.callbacks = callbacks.concat(eventStreamer); } else { const copiedCallbacks = callbacks.copy(); copiedCallbacks.addHandler(eventStreamer, true); // eslint-disable-next-line no-param-reassign config.callbacks = copiedCallbacks; } // Call the runnable in streaming mode, // add each chunk to the output stream const outerThis = this; async function consumeRunnableStream() { try { const runnableStream = await outerThis.stream(input, config); const tappedStream = eventStreamer.tapOutputIterable( runId, runnableStream ); // eslint-disable-next-line @typescript-eslint/no-unused-vars for await (const _ of tappedStream) { // Just iterate so that the callback handler picks up events } } finally { await eventStreamer.finish(); } } const runnableStreamConsumePromise = consumeRunnableStream(); let firstEventSent = false; let firstEventRunId; try { for await (const event of eventStreamer) { // This is a work-around an issue where the inputs into the // chain are not available until the entire input is consumed. // As a temporary solution, we'll modify the input to be the input // that was passed into the chain. if (!firstEventSent) { event.data.input = input; firstEventSent = true; firstEventRunId = event.run_id; yield event; continue; } if (event.run_id === firstEventRunId && event.event.endsWith("_end")) { // If it's the end event corresponding to the root runnable // we dont include the input in the event since it's guaranteed // to be included in the first event. if (event.data?.input) { delete event.data.input; } } yield event; } } finally { await runnableStreamConsumePromise; } } private async *_streamEventsV1( input: RunInput, options: Partial<CallOptions> & { version: "v1" | "v2" }, streamOptions?: Omit<LogStreamCallbackHandlerInput, "autoClose"> ): AsyncGenerator<StreamEvent> { let runLog; let hasEncounteredStartEvent = false; const config = ensureConfig(options); const rootTags = config.tags ?? []; const rootMetadata = config.metadata ?? {}; const rootName = config.runName ?? this.getName(); const logStreamCallbackHandler = new LogStreamCallbackHandler({ ...streamOptions, autoClose: false, _schemaFormat: "streaming_events", }); const rootEventFilter = new _RootEventFilter({ ...streamOptions, }); const logStream = this._streamLog(input, logStreamCallbackHandler, config); for await (const log of logStream) { if (!runLog) { runLog = RunLog.fromRunLogPatch(log); } else { runLog = runLog.concat(log); } if (runLog.state === undefined) { throw new Error( `Internal error: "streamEvents" state is missing. Please open a bug report.` ); } // Yield the start event for the root runnable if it hasn't been seen. // The root run is never filtered out if (!hasEncounteredStartEvent) { hasEncounteredStartEvent = true; const state = { ...runLog.state }; const event: StreamEvent = { run_id: state.id, event: `on_${state.type}_start`, name: rootName, tags: rootTags, metadata: rootMetadata, data: { input, }, }; if (rootEventFilter.includeEvent(event, state.type)) { yield event; } } const paths = log.ops .filter((op) => op.path.startsWith("/logs/")) .map((op) => op.path.split("/")[2]); const dedupedPaths = [...new Set(paths)]; for (const path of dedupedPaths) { let eventType; let data: StreamEventData = {}; const logEntry = runLog.state.logs[path]; if (logEntry.end_time === undefined) { if (logEntry.streamed_output.length > 0) { eventType = "stream"; } else { eventType = "start"; } } else { eventType = "end"; } if (eventType === "start") { // Include the inputs with the start event if they are available. // Usually they will NOT be available for components that operate // on streams, since those components stream the input and // don't know its final value until the end of the stream. if (logEntry.inputs !== undefined) { data.input = logEntry.inputs; } } else if (eventType === "end") { if (logEntry.inputs !== undefined) { data.input = logEntry.inputs; } data.output = logEntry.final_output; } else if (eventType === "stream") { const chunkCount = logEntry.streamed_output.length; if (chunkCount !== 1) { throw new Error( `Expected exactly one chunk of streamed output, got ${chunkCount} instead. Encountered in: "${logEntry.name}"` ); } data = { chunk: logEntry.streamed_output[0] }; // Clean up the stream, we don't need it anymore. // And this avoids duplicates as well! logEntry.streamed_output = []; } yield { event: `on_${logEntry.type}_${eventType}`, name: logEntry.name, run_id: logEntry.id, tags: logEntry.tags, metadata: logEntry.metadata, data, }; } // Finally, we take care of the streaming output from the root chain // if there is any. const { state } = runLog; if (state.streamed_output.length > 0) { const chunkCount = state.streamed_output.length; if (chunkCount !== 1) { throw new Error( `Expected exactly one chunk of streamed output, got ${chunkCount} instead. Encountered in: "${state.name}"` ); } const data = { chunk: state.streamed_output[0] }; // Clean up the stream, we don't need it anymore. state.streamed_output = []; const event = { event: `on_${state.type}_stream`, run_id: state.id, tags: rootTags, metadata: rootMetadata, name: rootName, data, }; if (rootEventFilter.includeEvent(event, state.type)) { yield event; } } } const state = runLog?.state; if (state !== undefined) { // Finally, yield the end event for the root runnable. const event = { event: `on_${state.type}_end`, name: rootName, run_id: state.id, tags: rootTags, metadata: rootMetadata, data: { output: state.final_output, }, }; if (rootEventFilter.includeEvent(event, state.type)) yield event; } } // eslint-disable-next-line @typescript-eslint/no-explicit-any static isRunnable(thing: any): thing is Runnable { return isRunnableInterface(thing); } /** * Bind lifecycle listeners to a Runnable, returning a new Runnable. * The Run object contains information about the run, including its id, * type, input, output, error, startTime, endTime, and any tags or metadata * added to the run. * * @param {Object} params - The object containing the callback functions. * @param {(run: Run) => void} params.onStart - Called before the runnable starts running, with the Run object. * @param {(run: Run) => void} params.onEnd - Called after the runnable finishes running, with the Run object. * @param {(run: Run) => void} params.onError - Called if the runnable throws an error, with the Run object. */ withListeners({ onStart, onEnd, onError, }: { onStart?: (run: Run, config?: RunnableConfig) => void | Promise<void>; onEnd?: (run: Run, config?: RunnableConfig) => void | Promise<void>; onError?: (run: Run, config?: RunnableConfig) => void | Promise<void>; }): Runnable<RunInput, RunOutput, CallOptions> { // eslint-disable-next-line @typescript-eslint/no-use-before-define return new RunnableBinding<RunInput, RunOutput, CallOptions>({ bound: this, config: {}, configFactories: [ (config) => ({ callbacks: [ new RootListenersTracer({ config, onStart, onEnd, onError, }), ], }), ], }); } /** * Convert a runnable to a tool. Return a new instance of `RunnableToolLike` * which contains the runnable, name, description and schema. * * @template {T extends RunInput = RunInput} RunInput - The input type of the runnable. Should be the same as the `RunInput` type of the runnable. * * @param fields * @param {string | undefined} [fields.name] The name of the tool. If not provided, it will default to the name of the runnable. * @param {string | undefined} [fields.description] The description of the tool. Falls back to the description on the Zod schema if not provided, or undefined if neither are provided. * @param {z.ZodType<T>} [fields.schema] The Zod schema for the input of the tool. Infers the Zod type from the input type of the runnable. * @returns {RunnableToolLike<z.ZodType<T>, RunOutput>} An instance of `RunnableToolLike` which is a runnable that can be used as a tool. */ asTool<T extends RunInput = RunInput>(fields: { name?: string; description?: string; schema: z.ZodType<T>; }): RunnableToolLike<z.ZodType<T | ToolCall>, RunOutput> { return convertRunnableToTool<T, RunOutput>(this, fields); } } export type RunnableBindingArgs< RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig > = { bound: Runnable<RunInput, RunOutput, CallOptions>; kwargs?: Partial<CallOptions>; config: RunnableConfig; configFactories?: Array<(config: RunnableConfig) => RunnableConfig>; }; /** * A runnable that delegates calls to another runnable with a set of kwargs. * @example * ```typescript * import { * type RunnableConfig, * RunnableLambda, * } from "@langchain/core/runnables"; * * const enhanceProfile = ( * profile: Record<string, any>, * config?: RunnableConfig * ) => { * if (config?.configurable?.role) { * return { ...profile, role: config.configurable.role }; * } * return profile; * }; * * const runnable = RunnableLambda.from(enhanceProfile); * * // Bind configuration to the runnable to set the user's role dynamically * const adminRunnable = runnable.bind({ configurable: { role: "Admin" } }); * const userRunnable = runnable.bind({ configurable: { role: "User" } }); * * const result1 = await adminRunnable.invoke({ * name: "Alice", * email: "alice@example.com" * }); * * // { name: "Alice", email: "alice@example.com", role: "Admin" } * * const result2 = await userRunnable.invoke({ * name: "Bob", * email: "bob@example.com" * }); * * // { name: "Bob", email: "bob@example.com", role: "User" } * ``` */ export class RunnableBinding< RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig > extends Runnable<RunInput, RunOutput, CallOptions> { static lc_name() { return "RunnableBinding"; } lc_namespace = ["langchain_core", "runnables"]; lc_serializable = true; bound: Runnable<RunInput, RunOutput, CallOptions>; config: RunnableConfig; kwargs?: Partial<CallOptions>; configFactories?: Array< (config: RunnableConfig) => RunnableConfig | Promise<RunnableConfig> >; constructor(fields: RunnableBindingArgs<RunInput, RunOutput, CallOptions>) { super(fields); this.bound = fields.bound; this.kwargs = fields.kwargs; this.config = fields.config; this.configFactories = fields.configFactories; } getName(suffix?: string | undefined): string { return this.bound.getName(suffix); } async _mergeConfig( ...options: (Partial<CallOptions> | RunnableConfig | undefined)[] ): Promise<Partial<CallOptions>> { const config = mergeConfigs(this.config, ...options); return mergeConfigs( config, ...(this.configFactories ? await Promise.all( this.configFactories.map( async (configFactory) => await configFactory(config) ) ) : []) ); } bind( kwargs: Partial<CallOptions> ): RunnableBinding<RunInput, RunOutput, CallOptions> { // eslint-disable-next-line @typescript-eslint/no-explicit-any return new (this.constructor as any)({ bound: this.bound, kwargs: { ...this.kwargs, ...kwargs }, config: this.config, }); } withConfig( config: RunnableConfig ): Runnable<RunInput, RunOutput, CallOptions> { // eslint-disable-next-line @typescript-eslint/no-explicit-any return new (this.constructor as any)({ bound: this.bound, kwargs: this.kwargs, config: { ...this.config, ...config }, }); } withRetry(fields?: { stopAfterAttempt?: number; onFailedAttempt?: RunnableRetryFailedAttemptHandler; }): RunnableRetry<RunInput, RunOutput, CallOptions> { // eslint-disable-next-line @typescript-eslint/no-explicit-any return new (this.constructor as any)({ bound: this.bound.withRetry(fields), kwargs: this.kwargs, config: this.config, }); } async invoke( input: RunInput, options?: Partial<CallOptions> ): Promise<RunOutput> { return this.bound.invoke( input, await this._mergeConfig(ensureConfig(options), this.kwargs) ); } async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & { returnExceptions?: false } ): Promise<RunOutput[]>; async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & { returnExceptions: true } ): Promise<(RunOutput | Error)[]>; async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]>; async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]> { const mergedOptions = Array.isArray(options) ? await Promise.all( options.map(async (individualOption) => this._mergeConfig(ensureConfig(individualOption), this.kwargs) ) ) : await this._mergeConfig(ensureConfig(options), this.kwargs); return this.bound.batch(inputs, mergedOptions, batchOptions); } async *_streamIterator( input: RunInput, options?: Partial<CallOptions> | undefined ) { yield* this.bound._streamIterator( input, await this._mergeConfig(ensureConfig(options), this.kwargs) ); } async stream( input: RunInput, options?: Partial<CallOptions> | undefined ): Promise<IterableReadableStream<RunOutput>> { return this.bound.stream( input, await this._mergeConfig(ensureConfig(options), this.kwargs) ); } async *transform( generator: AsyncGenerator<RunInput>, options?: Partial<CallOptions> ): AsyncGenerator<RunOutput> { yield* this.bound.transform( generator, await this._mergeConfig(ensureConfig(options), this.kwargs) ); } streamEvents( input: RunInput, options: Partial<CallOptions> & { version: "v1" | "v2" }, streamOptions?: Omit<LogStreamCallbackHandlerInput, "autoClose"> ): IterableReadableStream<StreamEvent>; streamEvents( input: RunInput, options: Partial<CallOptions> & { version: "v1" | "v2"; encoding: "text/event-stream"; }, streamOptions?: Omit<LogStreamCallbackHandlerInput, "autoClose"> ): IterableReadableStream<Uint8Array>; streamEvents( input: RunInput, options: Partial<CallOptions> & { version: "v1" | "v2"; encoding?: "text/event-stream" | undefined; }, streamOptions?: Omit<LogStreamCallbackHandlerInput, "autoClose"> ): IterableReadableStream<StreamEvent | Uint8Array> { // eslint-disable-next-line @typescript-eslint/no-this-alias const outerThis = this; const generator = async function* () { yield* outerThis.bound.streamEvents( input, { ...(await outerThis._mergeConfig( ensureConfig(options), outerThis.kwargs )), version: options.version, }, streamOptions ); }; return IterableReadableStream.fromAsyncGenerator(generator()); } static isRunnableBinding( // eslint-disable-next-line @typescript-eslint/no-explicit-any thing: any // eslint-disable-next-line @typescript-eslint/no-explicit-any ): thing is RunnableBinding<any, any, any> { return thing.bound && Runnable.isRunnable(thing.bound); } /** * Bind lifecycle listeners to a Runnable, returning a new Runnable. * The Run object contains information about the run, including its id, * type, input, output, error, startTime, endTime, and any tags or metadata * added to the run. * * @param {Object} params - The object containing the callback functions. * @param {(run: Run) => void} params.onStart - Called before the runnable starts running, with the Run object. * @param {(run: Run) => void} params.onEnd - Called after the runnable finishes running, with the Run object. * @param {(run: Run) => void} params.onError - Called if the runnable throws an error, with the Run object. */ withListeners({ onStart, onEnd, onError, }: { onStart?: (run: Run, config?: RunnableConfig) => void | Promise<void>; onEnd?: (run: Run, config?: RunnableConfig) => void | Promise<void>; onError?: (run: Run, config?: RunnableConfig) => void | Promise<void>; }): Runnable<RunInput, RunOutput, CallOptions> { return new RunnableBinding<RunInput, RunOutput, CallOptions>({ bound: this.bound, kwargs: this.kwargs, config: this.config, configFactories: [ (config) => ({ callbacks: [ new RootListenersTracer({ config, onStart, onEnd, onError, }), ], }), ], }); } } /** * A runnable that delegates calls to another runnable * with each element of the input sequence. * @example * ```typescript * import { RunnableEach, RunnableLambda } from "@langchain/core/runnables"; * * const toUpperCase = (input: string): string => input.toUpperCase(); * const addGreeting = (input: string): string => `Hello, ${input}!`; * * const upperCaseLambda = RunnableLambda.from(toUpperCase); * const greetingLambda = RunnableLambda.from(addGreeting); * * const chain = new RunnableEach({ * bound: upperCaseLambda.pipe(greetingLambda), * }); * * const result = await chain.invoke(["alice", "bob", "carol"]) * * // ["Hello, ALICE!", "Hello, BOB!", "Hello, CAROL!"] * ``` */ export class RunnableEach< RunInputItem, RunOutputItem, CallOptions extends RunnableConfig > extends Runnable<RunInputItem[], RunOutputItem[], CallOptions> { static lc_name() { return "RunnableEach"; } lc_serializable = true; lc_namespace = ["langchain_core", "runnables"]; bound: Runnable<RunInputItem, RunOutputItem, CallOptions>; constructor(fields: { bound: Runnable<RunInputItem, RunOutputItem, CallOptions>; }) { super(fields); this.bound = fields.bound; } /** * Binds the runnable with the specified arguments. * @param kwargs The arguments to bind the runnable with. * @returns A new instance of the `RunnableEach` class that is bound with the specified arguments. */ bind(kwargs: Partial<CallOptions>) { return new RunnableEach({ bound: this.bound.bind(kwargs), }); } /** * Invokes the runnable with the specified input and configuration. * @param input The input to invoke the runnable with. * @param config The configuration to invoke the runnable with. * @returns A promise that resolves to the output of the runnable. */ async invoke( inputs: RunInputItem[], config?: Partial<CallOptions> ): Promise<RunOutputItem[]> { return this._callWithConfig(this._invoke.bind(this), inputs, config); } /** * A helper method that is used to invoke the runnable with the specified input and configuration. * @param input The input to invoke the runnable with. * @param config The configuration to invoke the runnable with. * @returns A promise that resolves to the output of the runnable. */ protected async _invoke( inputs: RunInputItem[], config?: Partial<CallOptions>, runManager?: CallbackManagerForChainRun ): Promise<RunOutputItem[]> { return this.bound.batch( inputs, patchConfig(config, { callbacks: runManager?.getChild() }) ); } /** * Bind lifecycle listeners to a Runnable, returning a new Runnable. * The Run object contains information about the run, including its id, * type, input, output, error, startTime, endTime, and any tags or metadata * added to the run. * * @param {Object} params - The object containing the callback functions. * @param {(run: Run) => void} params.onStart - Called before the runnable starts running, with the Run object. * @param {(run: Run) => void} params.onEnd - Called after the runnable finishes running, with the Run object. * @param {(run: Run) => void} params.onError - Called if the runnable throws an error, with the Run object. */ withListeners({ onStart, onEnd, onError, }: { onStart?: (run: Run, config?: RunnableConfig) => void | Promise<void>; onEnd?: (run: Run, config?: RunnableConfig) => void | Promise<void>; onError?: (run: Run, config?: RunnableConfig) => void | Promise<void>; // eslint-disable-next-line @typescript-eslint/no-explicit-any }): Runnable<any, any, CallOptions> { return new RunnableEach<RunInputItem, RunOutputItem, CallOptions>({ bound: this.bound.withListeners({ onStart, onEnd, onError }), }); } } /** * Base class for runnables that can be retried a * specified number of times. * @example * ```typescript * import { * RunnableLambda, * RunnableRetry, * } from "@langchain/core/runnables"; * * // Simulate an API call that fails * const simulateApiCall = (input: string): string => { * console.log(`Attempting API call with input: ${input}`); * throw new Error("API call failed due to network issue"); * }; * * const apiCallLambda = RunnableLambda.from(simulateApiCall); * * // Apply retry logic using the .withRetry() method * const apiCallWithRetry = apiCallLambda.withRetry({ stopAfterAttempt: 3 }); * * // Alternatively, create a RunnableRetry instance manually * const manualRetry = new RunnableRetry({ * bound: apiCallLambda, * maxAttemptNumber: 3, * config: {}, * }); * * // Example invocation using the .withRetry() method * const res = await apiCallWithRetry * .invoke("Request 1") * .catch((error) => { * console.error("Failed after multiple retries:", error.message); * }); * * // Example invocation using the manual retry instance * const res2 = await manualRetry * .invoke("Request 2") * .catch((error) => { * console.error("Failed after multiple retries:", error.message); * }); * ``` */ export class RunnableRetry< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput = any, CallOptions extends RunnableConfig = RunnableConfig > extends RunnableBinding<RunInput, RunOutput, CallOptions> { static lc_name() { return "RunnableRetry"; } lc_namespace = ["langchain_core", "runnables"]; protected maxAttemptNumber = 3; // eslint-disable-next-line @typescript-eslint/no-explicit-any onFailedAttempt: RunnableRetryFailedAttemptHandler = () => {}; constructor( fields: RunnableBindingArgs<RunInput, RunOutput, CallOptions> & { maxAttemptNumber?: number; // eslint-disable-next-line @typescript-eslint/no-explicit-any onFailedAttempt?: RunnableRetryFailedAttemptHandler; } ) { super(fields); this.maxAttemptNumber = fields.maxAttemptNumber ?? this.maxAttemptNumber; this.onFailedAttempt = fields.onFailedAttempt ?? this.onFailedAttempt; } _patchConfigForRetry( attempt: number, config?: Partial<CallOptions>, runManager?: CallbackManagerForChainRun ): Partial<CallOptions> { const tag = attempt > 1 ? `retry:attempt:${attempt}` : undefined; return patchConfig(config, { callbacks: runManager?.getChild(tag) }); } protected async _invoke( input: RunInput, config?: CallOptions, runManager?: CallbackManagerForChainRun ): Promise<RunOutput> { return pRetry( (attemptNumber: number) => super.invoke( input, this._patchConfigForRetry(attemptNumber, config, runManager) ), { // eslint-disable-next-line @typescript-eslint/no-explicit-any onFailedAttempt: (error: any) => this.onFailedAttempt(error, input), retries: Math.max(this.maxAttemptNumber - 1, 0), randomize: true, } ); } /** * Method that invokes the runnable with the specified input, run manager, * and config. It handles the retry logic by catching any errors and * recursively invoking itself with the updated config for the next retry * attempt. * @param input The input for the runnable. * @param runManager The run manager for the runnable. * @param config The config for the runnable. * @returns A promise that resolves to the output of the runnable. */ async invoke(input: RunInput, config?: CallOptions): Promise<RunOutput> { return this._callWithConfig(this._invoke.bind(this), input, config); } async _batch<ReturnExceptions extends boolean = false>( inputs: RunInput[], configs?: RunnableConfig[], runManagers?: (CallbackManagerForChainRun | undefined)[], batchOptions?: RunnableBatchOptions ) { const resultsMap: Record<string, RunOutput | Error> = {}; try { await pRetry( async (attemptNumber: number) => { const remainingIndexes = inputs .map((_, i) => i) .filter( (i) => resultsMap[i.toString()] === undefined || // eslint-disable-next-line no-instanceof/no-instanceof resultsMap[i.toString()] instanceof Error ); const remainingInputs = remainingIndexes.map((i) => inputs[i]); const patchedConfigs = remainingIndexes.map((i) => this._patchConfigForRetry( attemptNumber, configs?.[i] as CallOptions, runManagers?.[i] ) ); const results = await super.batch(remainingInputs, patchedConfigs, { ...batchOptions, returnExceptions: true, }); let firstException; for (let i = 0; i < results.length; i += 1) { const result = results[i]; const resultMapIndex = remainingIndexes[i]; // eslint-disable-next-line no-instanceof/no-instanceof if (result instanceof Error) { if (firstException === undefined) { firstException = result; // eslint-disable-next-line @typescript-eslint/no-explicit-any (firstException as any).input = remainingInputs[i]; } } resultsMap[resultMapIndex.toString()] = result; } if (firstException) { throw firstException; } return results; }, { // eslint-disable-next-line @typescript-eslint/no-explicit-any onFailedAttempt: (error: any) => this.onFailedAttempt(error, error.input), retries: Math.max(this.maxAttemptNumber - 1, 0), randomize: true, } ); } catch (e) { if (batchOptions?.returnExceptions !== true) { throw e; } } return Object.keys(resultsMap) .sort((a, b) => parseInt(a, 10) - parseInt(b, 10)) .map( (key) => resultsMap[parseInt(key, 10)] ) as ReturnExceptions extends false ? RunOutput[] : (RunOutput | Error)[]; } async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & { returnExceptions?: false } ): Promise<RunOutput[]>; async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & { returnExceptions: true } ): Promise<(RunOutput | Error)[]>; async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]>; async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]> { return this._batchWithConfig( this._batch.bind(this), inputs, options, batchOptions ); } } export type RunnableSequenceFields<RunInput, RunOutput> = { first: Runnable<RunInput>; middle?: Runnable[]; // eslint-disable-next-line @typescript-eslint/no-explicit-any last: Runnable<any, RunOutput>; name?: string; omitSequenceTags?: boolean; }; /** * A sequence of runnables, where the output of each is the input of the next. * @example * ```typescript * const promptTemplate = PromptTemplate.fromTemplate( * "Tell me a joke about {topic}", * ); * const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({})]); * const result = await chain.invoke({ topic: "bears" }); * ``` */ export class RunnableSequence< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput = any > extends Runnable<RunInput, RunOutput> { static lc_name() { return "RunnableSequence"; } protected first: Runnable<RunInput>; protected middle: Runnable[] = []; // eslint-disable-next-line @typescript-eslint/no-explicit-any protected last: Runnable<any, RunOutput>; omitSequenceTags = false; lc_serializable = true; lc_namespace = ["langchain_core", "runnables"]; constructor(fields: RunnableSequenceFields<RunInput, RunOutput>) { super(fields); this.first = fields.first; this.middle = fields.middle ?? this.middle; this.last = fields.last; this.name = fields.name; this.omitSequenceTags = fields.omitSequenceTags ?? this.omitSequenceTags; } get steps() { return [this.first, ...this.middle, this.last]; } async invoke(input: RunInput, options?: RunnableConfig): Promise<RunOutput> { const config = ensureConfig(options); const callbackManager_ = await getCallbackManagerForConfig(config); const runManager = await callbackManager_?.handleChainStart( this.toJSON(), _coerceToDict(input, "input"), config.runId, undefined, undefined, undefined, config?.runName ); delete config.runId; let nextStepInput = input; let finalOutput: RunOutput; try { const initialSteps = [this.first, ...this.middle]; for (let i = 0; i < initialSteps.length; i += 1) { const step = initialSteps[i]; const promise = step.invoke( nextStepInput, patchConfig(config, { callbacks: runManager?.getChild( this.omitSequenceTags ? undefined : `seq:step:${i + 1}` ), }) ); nextStepInput = await raceWithSignal(promise, options?.signal); } // TypeScript can't detect that the last output of the sequence returns RunOutput, so call it out of the loop here if (options?.signal?.aborted) { throw new Error("Aborted"); } finalOutput = await this.last.invoke( nextStepInput, patchConfig(config, { callbacks: runManager?.getChild( this.omitSequenceTags ? undefined : `seq:step:${this.steps.length}` ), }) ); } catch (e) { await runManager?.handleChainError(e); throw e; } await runManager?.handleChainEnd(_coerceToDict(finalOutput, "output")); return finalOutput; } async batch( inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions & { returnExceptions?: false } ): Promise<RunOutput[]>; async batch( inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions & { returnExceptions: true } ): Promise<(RunOutput | Error)[]>; async batch( inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]>; async batch( inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]> { const configList = this._getOptionsList(options ?? {}, inputs.length); const callbackManagers = await Promise.all( configList.map(getCallbackManagerForConfig) ); const runManagers = await Promise.all( callbackManagers.map(async (callbackManager, i) => { const handleStartRes = await callbackManager?.handleChainStart( this.toJSON(), _coerceToDict(inputs[i], "input"), configList[i].runId, undefined, undefined, undefined, configList[i].runName ); delete configList[i].runId; return handleStartRes; }) ); // eslint-disable-next-line @typescript-eslint/no-explicit-any let nextStepInputs: any = inputs; try { for (let i = 0; i < this.steps.length; i += 1) { const step = this.steps[i]; const promise = step.batch( nextStepInputs, runManagers.map((runManager, j) => { const childRunManager = runManager?.getChild( this.omitSequenceTags ? undefined : `seq:step:${i + 1}` ); return patchConfig(configList[j], { callbacks: childRunManager }); }), batchOptions ); nextStepInputs = await raceWithSignal(promise, configList[0]?.signal); } } catch (e) { await Promise.all( runManagers.map((runManager) => runManager?.handleChainError(e)) ); throw e; } await Promise.all( runManagers.map((runManager) => runManager?.handleChainEnd(_coerceToDict(nextStepInputs, "output")) ) ); return nextStepInputs; } async *_streamIterator( input: RunInput, options?: RunnableConfig ): AsyncGenerator<RunOutput> { const callbackManager_ = await getCallbackManagerForConfig(options); const { runId, ...otherOptions } = options ?? {}; const runManager = await callbackManager_?.handleChainStart( this.toJSON(), _coerceToDict(input, "input"), runId, undefined, undefined, undefined, otherOptions?.runName ); const steps = [this.first, ...this.middle, this.last]; let concatSupported = true; let finalOutput; async function* inputGenerator() { yield input; } try { let finalGenerator = steps[0].transform( inputGenerator(), patchConfig(otherOptions, { callbacks: runManager?.getChild( this.omitSequenceTags ? undefined : `seq:step:1` ), }) ); for (let i = 1; i < steps.length; i += 1) { const step = steps[i]; finalGenerator = await step.transform( finalGenerator, patchConfig(otherOptions, { callbacks: runManager?.getChild( this.omitSequenceTags ? undefined : `seq:step:${i + 1}` ), }) ); } for await (const chunk of finalGenerator) { options?.signal?.throwIfAborted(); yield chunk; if (concatSupported) { if (finalOutput === undefined) { finalOutput = chunk; } else { try { // eslint-disable-next-line @typescript-eslint/no-explicit-any finalOutput = concat(finalOutput, chunk as any); } catch (e) { finalOutput = undefined; concatSupported = false; } } } } } catch (e) { await runManager?.handleChainError(e); throw e; } await runManager?.handleChainEnd(_coerceToDict(finalOutput, "output")); } getGraph(config?: RunnableConfig): Graph { const graph = new Graph(); // eslint-disable-next-line @typescript-eslint/no-explicit-any let currentLastNode: any = null; this.steps.forEach((step, index) => { const stepGraph = step.getGraph(config); if (index !== 0) { stepGraph.trimFirstNode(); } if (index !== this.steps.length - 1) { stepGraph.trimLastNode(); } graph.extend(stepGraph); const stepFirstNode = stepGraph.firstNode(); if (!stepFirstNode) { throw new Error(`Runnable ${step} has no first node`); } if (currentLastNode) { graph.addEdge(currentLastNode, stepFirstNode); } currentLastNode = stepGraph.lastNode(); }); return graph; } pipe<NewRunOutput>( coerceable: RunnableLike<RunOutput, NewRunOutput> ): RunnableSequence<RunInput, Exclude<NewRunOutput, Error>> { if (RunnableSequence.isRunnableSequence(coerceable)) { return new RunnableSequence({ first: this.first, middle: this.middle.concat([ this.last, coerceable.first, ...coerceable.middle, ]), last: coerceable.last, name: this.name ?? coerceable.name, }); } else { return new RunnableSequence({ first: this.first, middle: [...this.middle, this.last], last: _coerceToRunnable(coerceable), name: this.name, }); } } // eslint-disable-next-line @typescript-eslint/no-explicit-any static isRunnableSequence(thing: any): thing is RunnableSequence { return Array.isArray(thing.middle) && Runnable.isRunnable(thing); } // eslint-disable-next-line @typescript-eslint/no-explicit-any static from<RunInput = any, RunOutput = any>( [first, ...runnables]: [ RunnableLike<RunInput>, ...RunnableLike[], // eslint-disable-next-line @typescript-eslint/no-explicit-any RunnableLike<any, RunOutput> ], nameOrFields?: | string | Omit< RunnableSequenceFields<RunInput, RunOutput>, "first" | "middle" | "last" > ) { let extra: Record<string, unknown> = {}; if (typeof nameOrFields === "string") { extra.name = nameOrFields; } else if (nameOrFields !== undefined) { extra = nameOrFields; } return new RunnableSequence<RunInput, Exclude<RunOutput, Error>>({ ...extra, first: _coerceToRunnable(first), middle: runnables.slice(0, -1).map(_coerceToRunnable), last: _coerceToRunnable(runnables[runnables.length - 1]), }); } } /** * A runnable that runs a mapping of runnables in parallel, * and returns a mapping of their outputs. * @example * ```typescript * const mapChain = RunnableMap.from({ * joke: PromptTemplate.fromTemplate("Tell me a joke about {topic}").pipe( * new ChatAnthropic({}), * ), * poem: PromptTemplate.fromTemplate("write a 2-line poem about {topic}").pipe( * new ChatAnthropic({}), * ), * }); * const result = await mapChain.invoke({ topic: "bear" }); * ``` */ export class RunnableMap< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> > extends Runnable<RunInput, RunOutput> { static lc_name() { return "RunnableMap"; } lc_namespace = ["langchain_core", "runnables"]; lc_serializable = true; protected steps: Record<string, Runnable<RunInput>>; public getStepsKeys(): string[] { return Object.keys(this.steps); } constructor(fields: { steps: RunnableMapLike<RunInput, RunOutput> }) { super(fields); this.steps = {}; for (const [key, value] of Object.entries(fields.steps)) { this.steps[key] = _coerceToRunnable(value); } } static from< RunInput, // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( steps: RunnableMapLike<RunInput, RunOutput> ): RunnableMap<RunInput, RunOutput> { return new RunnableMap<RunInput, RunOutput>({ steps }); } async invoke( input: RunInput, options?: Partial<RunnableConfig> ): Promise<RunOutput> { const config = ensureConfig(options); const callbackManager_ = await getCallbackManagerForConfig(config); const runManager = await callbackManager_?.handleChainStart( this.toJSON(), { input, }, config.runId, undefined, undefined, undefined, config?.runName ); delete config.runId; // eslint-disable-next-line @typescript-eslint/no-explicit-any const output: Record<string, any> = {}; try { const promises = Object.entries(this.steps).map( async ([key, runnable]) => { output[key] = await runnable.invoke( input, patchConfig(config, { callbacks: runManager?.getChild(`map:key:${key}`), }) ); } ); await raceWithSignal(Promise.all(promises), options?.signal); } catch (e) { await runManager?.handleChainError(e); throw e; } await runManager?.handleChainEnd(output); return output as RunOutput; } async *_transform( generator: AsyncGenerator<RunInput>, runManager?: CallbackManagerForChainRun, options?: Partial<RunnableConfig> ): AsyncGenerator<RunOutput> { // shallow copy steps to ignore changes while iterating const steps = { ...this.steps }; // each step gets a copy of the input iterator const inputCopies = atee(generator, Object.keys(steps).length); // start the first iteration of each output iterator const tasks = new Map( Object.entries(steps).map(([key, runnable], i) => { const gen = runnable.transform( inputCopies[i], patchConfig(options, { callbacks: runManager?.getChild(`map:key:${key}`), }) ); return [key, gen.next().then((result) => ({ key, gen, result }))]; }) ); // yield chunks as they become available, // starting new iterations as needed, // until all iterators are done while (tasks.size) { const promise = Promise.race(tasks.values()); const { key, result, gen } = await raceWithSignal( promise, options?.signal ); tasks.delete(key); if (!result.done) { yield { [key]: result.value } as unknown as RunOutput; tasks.set( key, gen.next().then((result) => ({ key, gen, result })) ); } } } transform( generator: AsyncGenerator<RunInput>, options?: Partial<RunnableConfig> ): AsyncGenerator<RunOutput> { return this._transformStreamWithConfig( generator, this._transform.bind(this), options ); } async stream( input: RunInput, options?: Partial<RunnableConfig> ): Promise<IterableReadableStream<RunOutput>> { async function* generator() { yield input; } const config = ensureConfig(options); const wrappedGenerator = new AsyncGeneratorWithSetup({ generator: this.transform(generator(), config), config, }); await wrappedGenerator.setup; return IterableReadableStream.fromAsyncGenerator(wrappedGenerator); } } // eslint-disable-next-line @typescript-eslint/no-explicit-any type AnyTraceableFunction = TraceableFunction<(...any: any[]) => any>; /** * A runnable that wraps a traced LangSmith function. */ export class RunnableTraceable<RunInput, RunOutput> extends Runnable< RunInput, RunOutput > { lc_serializable = false; lc_namespace = ["langchain_core", "runnables"]; protected func: AnyTraceableFunction; constructor(fields: { func: AnyTraceableFunction }) { super(fields); if (!isTraceableFunction(fields.func)) { throw new Error( "RunnableTraceable requires a function that is wrapped in traceable higher-order function" ); } this.func = fields.func; } async invoke(input: RunInput, options?: Partial<RunnableConfig>) { const [config] = this._getOptionsList(options ?? {}, 1); const callbacks = await getCallbackManagerForConfig(config); const promise = this.func( patchConfig(config, { callbacks }), input ) as Promise<RunOutput>; return raceWithSignal(promise, config?.signal); } async *_streamIterator( input: RunInput, options?: Partial<RunnableConfig> ): AsyncGenerator<RunOutput> { const [config] = this._getOptionsList(options ?? {}, 1); const result = await this.invoke(input, options); if (isAsyncIterable(result)) { for await (const item of result) { config?.signal?.throwIfAborted(); yield item as RunOutput; } return; } if (isIterator(result)) { while (true) { config?.signal?.throwIfAborted(); const state: IteratorResult<unknown> = result.next(); if (state.done) break; yield state.value as RunOutput; } return; } yield result; } static from(func: AnyTraceableFunction) { return new RunnableTraceable({ func }); } } function assertNonTraceableFunction< RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig >( func: | RunnableFunc< RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions > | TraceableFunction< RunnableFunc< RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions > > ): asserts func is RunnableFunc< RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions > { if (isTraceableFunction(func)) { throw new Error( "RunnableLambda requires a function that is not wrapped in traceable higher-order function. This shouldn't happen." ); } } /** * A runnable that wraps an arbitrary function that takes a single argument. * @example * ```typescript * import { RunnableLambda } from "@langchain/core/runnables"; * * const add = (input: { x: number; y: number }) => input.x + input.y; * * const multiply = (input: { value: number; multiplier: number }) => * input.value * input.multiplier; * * // Create runnables for the functions * const addLambda = RunnableLambda.from(add); * const multiplyLambda = RunnableLambda.from(multiply); * * // Chain the lambdas for a mathematical operation * const chainedLambda = addLambda.pipe((result) => * multiplyLambda.invoke({ value: result, multiplier: 2 }) * ); * * // Example invocation of the chainedLambda * const result = await chainedLambda.invoke({ x: 2, y: 3 }); * * // Will log "10" (since (2 + 3) * 2 = 10) * ``` */ export class RunnableLambda< RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig > extends Runnable<RunInput, RunOutput, CallOptions> { static lc_name() { return "RunnableLambda"; } lc_namespace = ["langchain_core", "runnables"]; protected func: RunnableFunc< RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions >; constructor(fields: { func: | RunnableFunc< RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions > | TraceableFunction< RunnableFunc< RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions > >; }) { if (isTraceableFunction(fields.func)) { // eslint-disable-next-line no-constructor-return return RunnableTraceable.from(fields.func) as unknown as RunnableLambda< RunInput, RunOutput, CallOptions >; } super(fields); assertNonTraceableFunction(fields.func); this.func = fields.func; } static from< RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig >( func: RunnableFunc< RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions > ): RunnableLambda<RunInput, RunOutput, CallOptions>; static from< RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig >( func: TraceableFunction< RunnableFunc< RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions > > ): RunnableLambda<RunInput, RunOutput, CallOptions>; static from< RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig >( func: | RunnableFunc< RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions > | TraceableFunction< RunnableFunc< RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions > > ): RunnableLambda<RunInput, RunOutput, CallOptions> { return new RunnableLambda({ func, }); } async _invoke( input: RunInput, config?: Partial<CallOptions>, runManager?: CallbackManagerForChainRun ) { return new Promise<RunOutput>((resolve, reject) => { const childConfig = patchConfig(config, { callbacks: runManager?.getChild(), recursionLimit: (config?.recursionLimit ?? DEFAULT_RECURSION_LIMIT) - 1, }); void AsyncLocalStorageProviderSingleton.runWithConfig( pickRunnableConfigKeys(childConfig), async () => { try { let output = await this.func(input, { ...childConfig, }); if (output && Runnable.isRunnable(output)) { if (config?.recursionLimit === 0) { throw new Error("Recursion limit reached."); } output = await output.invoke(input, { ...childConfig, recursionLimit: (childConfig.recursionLimit ?? DEFAULT_RECURSION_LIMIT) - 1, }); } else if (isAsyncIterable(output)) { let finalOutput: RunOutput | undefined; for await (const chunk of consumeAsyncIterableInContext( childConfig, output )) { config?.signal?.throwIfAborted(); if (finalOutput === undefined) { finalOutput = chunk as RunOutput; } else { // Make a best effort to gather, for any type that supports concat. try { // eslint-disable-next-line @typescript-eslint/no-explicit-any finalOutput = concat(finalOutput, chunk as any); } catch (e) { finalOutput = chunk as RunOutput; } } } output = finalOutput as typeof output; } else if (isIterableIterator(output)) { let finalOutput: RunOutput | undefined; for (const chunk of consumeIteratorInContext( childConfig, output )) { config?.signal?.throwIfAborted(); if (finalOutput === undefined) { finalOutput = chunk as RunOutput; } else { // Make a best effort to gather, for any type that supports concat. try { // eslint-disable-next-line @typescript-eslint/no-explicit-any finalOutput = concat(finalOutput, chunk as any); } catch (e) { finalOutput = chunk as RunOutput; } } } output = finalOutput as typeof output; } resolve(output); } catch (e) { reject(e); } } ); }); } async invoke( input: RunInput, options?: Partial<CallOptions> ): Promise<RunOutput> { return this._callWithConfig(this._invoke.bind(this), input, options); } async *_transform( generator: AsyncGenerator<RunInput>, runManager?: CallbackManagerForChainRun, config?: Partial<CallOptions> ): AsyncGenerator<RunOutput> { let finalChunk: RunInput | undefined; for await (const chunk of generator) { if (finalChunk === undefined) { finalChunk = chunk; } else { // Make a best effort to gather, for any type that supports concat. try { // eslint-disable-next-line @typescript-eslint/no-explicit-any finalChunk = concat(finalChunk, chunk as any); } catch (e) { finalChunk = chunk; } } } const childConfig = patchConfig(config, { callbacks: runManager?.getChild(), recursionLimit: (config?.recursionLimit ?? DEFAULT_RECURSION_LIMIT) - 1, }); const output = await new Promise<RunOutput | Runnable>( (resolve, reject) => { void AsyncLocalStorageProviderSingleton.runWithConfig( pickRunnableConfigKeys(childConfig), async () => { try { const res = await this.func(finalChunk as RunInput, { ...childConfig, config: childConfig, }); resolve(res); } catch (e) { reject(e); } } ); } ); if (output && Runnable.isRunnable(output)) { if (config?.recursionLimit === 0) { throw new Error("Recursion limit reached."); } const stream = await output.stream(finalChunk as RunInput, childConfig); for await (const chunk of stream) { yield chunk; } } else if (isAsyncIterable(output)) { for await (const chunk of consumeAsyncIterableInContext( childConfig, output )) { config?.signal?.throwIfAborted(); yield chunk as RunOutput; } } else if (isIterableIterator(output)) { for (const chunk of consumeIteratorInContext(childConfig, output)) { config?.signal?.throwIfAborted(); yield chunk as RunOutput; } } else { yield output; } } transform( generator: AsyncGenerator<RunInput>, options?: Partial<CallOptions> ): AsyncGenerator<RunOutput> { return this._transformStreamWithConfig( generator, this._transform.bind(this), options ); } async stream( input: RunInput, options?: Partial<CallOptions> ): Promise<IterableReadableStream<RunOutput>> { async function* generator() { yield input; } const config = ensureConfig(options); const wrappedGenerator = new AsyncGeneratorWithSetup({ generator: this.transform(generator(), config), config, }); await wrappedGenerator.setup; return IterableReadableStream.fromAsyncGenerator(wrappedGenerator); } } /** * A runnable that runs a mapping of runnables in parallel, * and returns a mapping of their outputs. * @example * ```typescript * import { * RunnableLambda, * RunnableParallel, * } from "@langchain/core/runnables"; * * const addYears = (age: number): number => age + 5; * const yearsToFifty = (age: number): number => 50 - age; * const yearsToHundred = (age: number): number => 100 - age; * * const addYearsLambda = RunnableLambda.from(addYears); * const milestoneFiftyLambda = RunnableLambda.from(yearsToFifty); * const milestoneHundredLambda = RunnableLambda.from(yearsToHundred); * * // Pipe will coerce objects into RunnableParallel by default, but we * // explicitly instantiate one here to demonstrate * const sequence = addYearsLambda.pipe( * RunnableParallel.from({ * years_to_fifty: milestoneFiftyLambda, * years_to_hundred: milestoneHundredLambda, * }) * ); * * // Invoke the sequence with a single age input * const res = sequence.invoke(25); * * // { years_to_fifty: 25, years_to_hundred: 75 } * ``` */ export class RunnableParallel<RunInput> extends RunnableMap<RunInput> {} /** * A Runnable that can fallback to other Runnables if it fails. * External APIs (e.g., APIs for a language model) may at times experience * degraded performance or even downtime. * * In these cases, it can be useful to have a fallback Runnable that can be * used in place of the original Runnable (e.g., fallback to another LLM provider). * * Fallbacks can be defined at the level of a single Runnable, or at the level * of a chain of Runnables. Fallbacks are tried in order until one succeeds or * all fail. * * While you can instantiate a `RunnableWithFallbacks` directly, it is usually * more convenient to use the `withFallbacks` method on an existing Runnable. * * When streaming, fallbacks will only be called on failures during the initial * stream creation. Errors that occur after a stream starts will not fallback * to the next Runnable. * * @example * ```typescript * import { * RunnableLambda, * RunnableWithFallbacks, * } from "@langchain/core/runnables"; * * const primaryOperation = (input: string): string => { * if (input !== "safe") { * throw new Error("Primary operation failed due to unsafe input"); * } * return `Processed: ${input}`; * }; * * // Define a fallback operation that processes the input differently * const fallbackOperation = (input: string): string => * `Fallback processed: ${input}`; * * const primaryRunnable = RunnableLambda.from(primaryOperation); * const fallbackRunnable = RunnableLambda.from(fallbackOperation); * * // Apply the fallback logic using the .withFallbacks() method * const runnableWithFallback = primaryRunnable.withFallbacks([fallbackRunnable]); * * // Alternatively, create a RunnableWithFallbacks instance manually * const manualFallbackChain = new RunnableWithFallbacks({ * runnable: primaryRunnable, * fallbacks: [fallbackRunnable], * }); * * // Example invocation using .withFallbacks() * const res = await runnableWithFallback * .invoke("unsafe input") * .catch((error) => { * console.error("Failed after all attempts:", error.message); * }); * * // "Fallback processed: unsafe input" * * // Example invocation using manual instantiation * const res = await manualFallbackChain * .invoke("safe") * .catch((error) => { * console.error("Failed after all attempts:", error.message); * }); * * // "Processed: safe" * ``` */ export class RunnableWithFallbacks<RunInput, RunOutput> extends Runnable< RunInput, RunOutput > { static lc_name() { return "RunnableWithFallbacks"; } lc_namespace = ["langchain_core", "runnables"]; lc_serializable = true; runnable: Runnable<RunInput, RunOutput>; fallbacks: Runnable<RunInput, RunOutput>[]; constructor(fields: { runnable: Runnable<RunInput, RunOutput>; fallbacks: Runnable<RunInput, RunOutput>[]; }) { super(fields); this.runnable = fields.runnable; this.fallbacks = fields.fallbacks; } *runnables() { yield this.runnable; for (const fallback of this.fallbacks) { yield fallback; } } async invoke( input: RunInput, options?: Partial<RunnableConfig> ): Promise<RunOutput> { const config = ensureConfig(options); const callbackManager_ = await getCallbackManagerForConfig(options); const { runId, ...otherConfigFields } = config; const runManager = await callbackManager_?.handleChainStart( this.toJSON(), _coerceToDict(input, "input"), runId, undefined, undefined, undefined, otherConfigFields?.runName ); let firstError; for (const runnable of this.runnables()) { config?.signal?.throwIfAborted(); try { const output = await runnable.invoke( input, patchConfig(otherConfigFields, { callbacks: runManager?.getChild() }) ); await runManager?.handleChainEnd(_coerceToDict(output, "output")); return output; } catch (e) { if (firstError === undefined) { firstError = e; } } } if (firstError === undefined) { throw new Error("No error stored at end of fallback."); } await runManager?.handleChainError(firstError); throw firstError; } async *_streamIterator( input: RunInput, options?: Partial<RunnableConfig> | undefined ): AsyncGenerator<RunOutput> { const config = ensureConfig(options); const callbackManager_ = await getCallbackManagerForConfig(options); const { runId, ...otherConfigFields } = config; const runManager = await callbackManager_?.handleChainStart( this.toJSON(), _coerceToDict(input, "input"), runId, undefined, undefined, undefined, otherConfigFields?.runName ); let firstError; let stream; for (const runnable of this.runnables()) { config?.signal?.throwIfAborted(); const childConfig = patchConfig(otherConfigFields, { callbacks: runManager?.getChild(), }); try { stream = await runnable.stream(input, childConfig); break; } catch (e) { if (firstError === undefined) { firstError = e; } } } if (stream === undefined) { const error = firstError ?? new Error("No error stored at end of fallback."); await runManager?.handleChainError(error); throw error; } let output; try { for await (const chunk of stream) { yield chunk; try { output = output === undefined ? output : concat(output, chunk); } catch (e) { output = undefined; } } } catch (e) { await runManager?.handleChainError(e); throw e; } await runManager?.handleChainEnd(_coerceToDict(output, "output")); } async batch( inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions & { returnExceptions?: false } ): Promise<RunOutput[]>; async batch( inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions & { returnExceptions: true } ): Promise<(RunOutput | Error)[]>; async batch( inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]>; async batch( inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]> { if (batchOptions?.returnExceptions) { throw new Error("Not implemented."); } const configList = this._getOptionsList(options ?? {}, inputs.length); const callbackManagers = await Promise.all( configList.map((config) => getCallbackManagerForConfig(config)) ); const runManagers = await Promise.all( callbackManagers.map(async (callbackManager, i) => { const handleStartRes = await callbackManager?.handleChainStart( this.toJSON(), _coerceToDict(inputs[i], "input"), configList[i].runId, undefined, undefined, undefined, configList[i].runName ); delete configList[i].runId; return handleStartRes; }) ); // eslint-disable-next-line @typescript-eslint/no-explicit-any let firstError: any; for (const runnable of this.runnables()) { configList[0].signal?.throwIfAborted(); try { const outputs = await runnable.batch( inputs, runManagers.map((runManager, j) => patchConfig(configList[j], { callbacks: runManager?.getChild(), }) ), batchOptions ); await Promise.all( runManagers.map((runManager, i) => runManager?.handleChainEnd(_coerceToDict(outputs[i], "output")) ) ); return outputs; } catch (e) { if (firstError === undefined) { firstError = e; } } } if (!firstError) { throw new Error("No error stored at end of fallbacks."); } await Promise.all( runManagers.map((runManager) => runManager?.handleChainError(firstError)) ); throw firstError; } } // TODO: Figure out why the compiler needs help eliminating Error as a RunOutput type export function _coerceToRunnable< RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig >( coerceable: RunnableLike<RunInput, RunOutput, CallOptions> ): Runnable<RunInput, Exclude<RunOutput, Error>, CallOptions> { if (typeof coerceable === "function") { return new RunnableLambda({ func: coerceable }) as Runnable< RunInput, Exclude<RunOutput, Error>, CallOptions >; } else if (Runnable.isRunnable(coerceable)) { return coerceable as Runnable< RunInput, Exclude<RunOutput, Error>, CallOptions >; } else if (!Array.isArray(coerceable) && typeof coerceable === "object") { const runnables: Record<string, Runnable<RunInput>> = {}; for (const [key, value] of Object.entries(coerceable)) { runnables[key] = _coerceToRunnable(value as RunnableLike); } return new RunnableMap({ steps: runnables, }) as unknown as Runnable<RunInput, Exclude<RunOutput, Error>, CallOptions>; } else { throw new Error( `Expected a Runnable, function or object.\nInstead got an unsupported type.` ); } } export interface RunnableAssignFields<RunInput> { mapper: RunnableMap<RunInput>; } /** * A runnable that assigns key-value pairs to inputs of type `Record<string, unknown>`. * @example * ```typescript * import { * RunnableAssign, * RunnableLambda, * RunnableParallel, * } from "@langchain/core/runnables"; * * const calculateAge = (x: { birthYear: number }): { age: number } => { * const currentYear = new Date().getFullYear(); * return { age: currentYear - x.birthYear }; * }; * * const createGreeting = (x: { name: string }): { greeting: string } => { * return { greeting: `Hello, ${x.name}!` }; * }; * * const mapper = RunnableParallel.from({ * age_step: RunnableLambda.from(calculateAge), * greeting_step: RunnableLambda.from(createGreeting), * }); * * const runnableAssign = new RunnableAssign({ mapper }); * * const res = await runnableAssign.invoke({ name: "Alice", birthYear: 1990 }); * * // { name: "Alice", birthYear: 1990, age_step: { age: 34 }, greeting_step: { greeting: "Hello, Alice!" } } * ``` */ export class RunnableAssign< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends Record<string, any> = Record<string, any>, // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any>, CallOptions extends RunnableConfig = RunnableConfig > extends Runnable<RunInput, RunOutput> implements RunnableAssignFields<RunInput> { static lc_name() { return "RunnableAssign"; } lc_namespace = ["langchain_core", "runnables"]; lc_serializable = true; mapper: RunnableMap<RunInput>; constructor(fields: RunnableMap<RunInput> | RunnableAssignFields<RunInput>) { // eslint-disable-next-line no-instanceof/no-instanceof if (fields instanceof RunnableMap) { // eslint-disable-next-line no-param-reassign fields = { mapper: fields }; } super(fields); this.mapper = fields.mapper; } async invoke( input: RunInput, options?: Partial<CallOptions> ): Promise<RunOutput> { const mapperResult = await this.mapper.invoke(input, options); return { ...input, ...mapperResult, } as RunOutput; } async *_transform( generator: AsyncGenerator<RunInput>, runManager?: CallbackManagerForChainRun, options?: Partial<RunnableConfig> ): AsyncGenerator<RunOutput> { // collect mapper keys const mapperKeys = this.mapper.getStepsKeys(); // create two input gens, one for the mapper, one for the input const [forPassthrough, forMapper] = atee(generator); // create mapper output gen const mapperOutput = this.mapper.transform( forMapper, patchConfig(options, { callbacks: runManager?.getChild() }) ); // start the mapper const firstMapperChunkPromise = mapperOutput.next(); // yield the passthrough for await (const chunk of forPassthrough) { if (typeof chunk !== "object" || Array.isArray(chunk)) { throw new Error( `RunnableAssign can only be used with objects as input, got ${typeof chunk}` ); } const filtered = Object.fromEntries( Object.entries(chunk).filter(([key]) => !mapperKeys.includes(key)) ); if (Object.keys(filtered).length > 0) { yield filtered as unknown as RunOutput; } } // yield the mapper output yield (await firstMapperChunkPromise).value; for await (const chunk of mapperOutput) { yield chunk as unknown as RunOutput; } } transform( generator: AsyncGenerator<RunInput>, options?: Partial<RunnableConfig> ): AsyncGenerator<RunOutput> { return this._transformStreamWithConfig( generator, this._transform.bind(this), options ); } async stream( input: RunInput, options?: Partial<RunnableConfig> ): Promise<IterableReadableStream<RunOutput>> { async function* generator() { yield input; } const config = ensureConfig(options); const wrappedGenerator = new AsyncGeneratorWithSetup({ generator: this.transform(generator(), config), config, }); await wrappedGenerator.setup; return IterableReadableStream.fromAsyncGenerator(wrappedGenerator); } } export interface RunnablePickFields { keys: string | string[]; } /** * A runnable that assigns key-value pairs to inputs of type `Record<string, unknown>`. * Useful for streaming, can be automatically created and chained by calling `runnable.pick();`. * @example * ```typescript * import { RunnablePick } from "@langchain/core/runnables"; * * const inputData = { * name: "John", * age: 30, * city: "New York", * country: "USA", * email: "john.doe@example.com", * phone: "+1234567890", * }; * * const basicInfoRunnable = new RunnablePick(["name", "city"]); * * // Example invocation * const res = await basicInfoRunnable.invoke(inputData); * * // { name: 'John', city: 'New York' } * ``` */ export class RunnablePick< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends Record<string, any> = Record<string, any>, // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> | any = Record<string, any> | any, CallOptions extends RunnableConfig = RunnableConfig > extends Runnable<RunInput, RunOutput> implements RunnablePickFields { static lc_name() { return "RunnablePick"; } lc_namespace = ["langchain_core", "runnables"]; lc_serializable = true; keys: string | string[]; constructor(fields: string | string[] | RunnablePickFields) { if (typeof fields === "string" || Array.isArray(fields)) { // eslint-disable-next-line no-param-reassign fields = { keys: fields }; } super(fields); this.keys = fields.keys; } async _pick(input: RunInput): Promise<RunOutput> { if (typeof this.keys === "string") { return input[this.keys]; } else { const picked = this.keys .map((key) => [key, input[key]]) .filter((v) => v[1] !== undefined); return picked.length === 0 ? undefined : Object.fromEntries(picked); } } async invoke( input: RunInput, options?: Partial<CallOptions> ): Promise<RunOutput> { return this._callWithConfig(this._pick.bind(this), input, options); } async *_transform( generator: AsyncGenerator<RunInput> ): AsyncGenerator<RunOutput> { for await (const chunk of generator) { const picked = await this._pick(chunk); if (picked !== undefined) { yield picked; } } } transform( generator: AsyncGenerator<RunInput>, options?: Partial<RunnableConfig> ): AsyncGenerator<RunOutput> { return this._transformStreamWithConfig( generator, this._transform.bind(this), options ); } async stream( input: RunInput, options?: Partial<RunnableConfig> ): Promise<IterableReadableStream<RunOutput>> { async function* generator() { yield input; } const config = ensureConfig(options); const wrappedGenerator = new AsyncGeneratorWithSetup({ generator: this.transform(generator(), config), config, }); await wrappedGenerator.setup; return IterableReadableStream.fromAsyncGenerator(wrappedGenerator); } } export interface RunnableToolLikeArgs< RunInput extends z.ZodType = z.ZodType, RunOutput = unknown > extends Omit<RunnableBindingArgs<z.infer<RunInput>, RunOutput>, "config"> { name: string; description?: string; schema: RunInput; config?: RunnableConfig; } export class RunnableToolLike< RunInput extends z.ZodType = z.ZodType, RunOutput = unknown > extends RunnableBinding<z.infer<RunInput>, RunOutput> { name: string; description?: string; schema: RunInput; constructor(fields: RunnableToolLikeArgs<RunInput, RunOutput>) { const sequence = RunnableSequence.from([ RunnableLambda.from(async (input) => { let toolInput: z.TypeOf<RunInput>; if (_isToolCall(input)) { try { toolInput = await this.schema.parseAsync(input.args); } catch (e) { throw new ToolInputParsingException( `Received tool input did not match expected schema`, JSON.stringify(input.args) ); } } else { toolInput = input; } return toolInput; }).withConfig({ runName: `${fields.name}:parse_input` }), fields.bound, ]).withConfig({ runName: fields.name }); super({ bound: sequence, config: fields.config ?? {}, }); this.name = fields.name; this.description = fields.description; this.schema = fields.schema; } static lc_name() { return "RunnableToolLike"; } } /** * Given a runnable and a Zod schema, convert the runnable to a tool. * * @template RunInput The input type for the runnable. * @template RunOutput The output type for the runnable. * * @param {Runnable<RunInput, RunOutput>} runnable The runnable to convert to a tool. * @param fields * @param {string | undefined} [fields.name] The name of the tool. If not provided, it will default to the name of the runnable. * @param {string | undefined} [fields.description] The description of the tool. Falls back to the description on the Zod schema if not provided, or undefined if neither are provided. * @param {z.ZodType<RunInput>} [fields.schema] The Zod schema for the input of the tool. Infers the Zod type from the input type of the runnable. * @returns {RunnableToolLike<z.ZodType<RunInput>, RunOutput>} An instance of `RunnableToolLike` which is a runnable that can be used as a tool. */ export function convertRunnableToTool<RunInput, RunOutput>( runnable: Runnable<RunInput, RunOutput>, fields: { name?: string; description?: string; schema: z.ZodType<RunInput>; } ): RunnableToolLike<z.ZodType<RunInput | ToolCall>, RunOutput> { const name = fields.name ?? runnable.getName(); const description = fields.description ?? fields.schema?.description; if (fields.schema.constructor === z.ZodString) { return new RunnableToolLike<z.ZodType<RunInput | ToolCall>, RunOutput>({ name, description, schema: z .object({ input: z.string(), }) .transform((input) => input.input) as z.ZodType, bound: runnable, }); } return new RunnableToolLike<z.ZodType<RunInput | ToolCall>, RunOutput>({ name, description, schema: fields.schema, bound: runnable, }); }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/runnables/graph.ts
import { zodToJsonSchema } from "zod-to-json-schema"; import { v4 as uuidv4, validate as isUuid } from "uuid"; import type { RunnableInterface, RunnableIOSchema, Node, Edge, } from "./types.js"; import { isRunnableInterface } from "./utils.js"; import { drawMermaid, drawMermaidPng } from "./graph_mermaid.js"; export { Node, Edge }; function nodeDataStr( id: string | undefined, data: RunnableInterface | RunnableIOSchema ): string { if (id !== undefined && !isUuid(id)) { return id; } else if (isRunnableInterface(data)) { try { let dataStr = data.getName(); dataStr = dataStr.startsWith("Runnable") ? dataStr.slice("Runnable".length) : dataStr; return dataStr; } catch (error) { return data.getName(); } } else { return data.name ?? "UnknownSchema"; } } function nodeDataJson(node: Node) { // if node.data implements Runnable if (isRunnableInterface(node.data)) { return { type: "runnable", data: { id: node.data.lc_id, name: node.data.getName(), }, }; } else { return { type: "schema", data: { ...zodToJsonSchema(node.data.schema), title: node.data.name }, }; } } export class Graph { nodes: Record<string, Node> = {}; edges: Edge[] = []; constructor(params?: { nodes: Record<string, Node>; edges: Edge[] }) { this.nodes = params?.nodes ?? this.nodes; this.edges = params?.edges ?? this.edges; } // Convert the graph to a JSON-serializable format. // eslint-disable-next-line @typescript-eslint/no-explicit-any toJSON(): Record<string, any> { const stableNodeIds: Record<string, string | number> = {}; Object.values(this.nodes).forEach((node, i) => { stableNodeIds[node.id] = isUuid(node.id) ? i : node.id; }); return { nodes: Object.values(this.nodes).map((node) => ({ id: stableNodeIds[node.id], ...nodeDataJson(node), })), edges: this.edges.map((edge) => { const item: Record<string, unknown> = { source: stableNodeIds[edge.source], target: stableNodeIds[edge.target], }; if (typeof edge.data !== "undefined") { item.data = edge.data; } if (typeof edge.conditional !== "undefined") { item.conditional = edge.conditional; } return item; }), }; } addNode( data: RunnableInterface | RunnableIOSchema, id?: string, // eslint-disable-next-line @typescript-eslint/no-explicit-any metadata?: Record<string, any> ): Node { if (id !== undefined && this.nodes[id] !== undefined) { throw new Error(`Node with id ${id} already exists`); } const nodeId = id ?? uuidv4(); const node: Node = { id: nodeId, data, name: nodeDataStr(id, data), metadata, }; this.nodes[nodeId] = node; return node; } removeNode(node: Node): void { // Remove the node from the nodes map delete this.nodes[node.id]; // Filter out edges connected to the node this.edges = this.edges.filter( (edge) => edge.source !== node.id && edge.target !== node.id ); } addEdge( source: Node, target: Node, data?: string, conditional?: boolean ): Edge { if (this.nodes[source.id] === undefined) { throw new Error(`Source node ${source.id} not in graph`); } if (this.nodes[target.id] === undefined) { throw new Error(`Target node ${target.id} not in graph`); } const edge: Edge = { source: source.id, target: target.id, data, conditional, }; this.edges.push(edge); return edge; } firstNode(): Node | undefined { return _firstNode(this); } lastNode(): Node | undefined { return _lastNode(this); } /** * Add all nodes and edges from another graph. * Note this doesn't check for duplicates, nor does it connect the graphs. */ extend(graph: Graph, prefix: string = "") { let finalPrefix = prefix; const nodeIds = Object.values(graph.nodes).map((node) => node.id); if (nodeIds.every(isUuid)) { finalPrefix = ""; } const prefixed = (id: string) => { return finalPrefix ? `${finalPrefix}:${id}` : id; }; Object.entries(graph.nodes).forEach(([key, value]) => { this.nodes[prefixed(key)] = { ...value, id: prefixed(key) }; }); const newEdges = graph.edges.map((edge) => { return { ...edge, source: prefixed(edge.source), target: prefixed(edge.target), }; }); // Add all edges from the other graph this.edges = [...this.edges, ...newEdges]; const first = graph.firstNode(); const last = graph.lastNode(); return [ first ? { id: prefixed(first.id), data: first.data } : undefined, last ? { id: prefixed(last.id), data: last.data } : undefined, ]; } trimFirstNode(): void { const firstNode = this.firstNode(); if (firstNode && _firstNode(this, [firstNode.id])) { this.removeNode(firstNode); } } trimLastNode(): void { const lastNode = this.lastNode(); if (lastNode && _lastNode(this, [lastNode.id])) { this.removeNode(lastNode); } } /** * Return a new graph with all nodes re-identified, * using their unique, readable names where possible. */ reid(): Graph { const nodeLabels: Record<string, string> = Object.fromEntries( Object.values(this.nodes).map((node) => [node.id, node.name]) ); const nodeLabelCounts = new Map<string, number>(); Object.values(nodeLabels).forEach((label) => { nodeLabelCounts.set(label, (nodeLabelCounts.get(label) || 0) + 1); }); const getNodeId = (nodeId: string): string => { const label = nodeLabels[nodeId]; if (isUuid(nodeId) && nodeLabelCounts.get(label) === 1) { return label; } else { return nodeId; } }; return new Graph({ nodes: Object.fromEntries( Object.entries(this.nodes).map(([id, node]) => [ getNodeId(id), { ...node, id: getNodeId(id) }, ]) ), edges: this.edges.map((edge) => ({ ...edge, source: getNodeId(edge.source), target: getNodeId(edge.target), })), }); } drawMermaid(params?: { withStyles?: boolean; curveStyle?: string; nodeColors?: Record<string, string>; wrapLabelNWords?: number; }): string { const { withStyles, curveStyle, nodeColors = { default: "fill:#f2f0ff,line-height:1.2", first: "fill-opacity:0", last: "fill:#bfb6fc", }, wrapLabelNWords, } = params ?? {}; const graph = this.reid(); const firstNode = graph.firstNode(); const lastNode = graph.lastNode(); return drawMermaid(graph.nodes, graph.edges, { firstNode: firstNode?.id, lastNode: lastNode?.id, withStyles, curveStyle, nodeColors, wrapLabelNWords, }); } async drawMermaidPng(params?: { withStyles?: boolean; curveStyle?: string; nodeColors?: Record<string, string>; wrapLabelNWords?: number; backgroundColor?: string; }): Promise<Blob> { const mermaidSyntax = this.drawMermaid(params); return drawMermaidPng(mermaidSyntax, { backgroundColor: params?.backgroundColor, }); } } /** * Find the single node that is not a target of any edge. * Exclude nodes/sources with ids in the exclude list. * If there is no such node, or there are multiple, return undefined. * When drawing the graph, this node would be the origin. */ function _firstNode(graph: Graph, exclude: string[] = []): Node | undefined { const targets = new Set( graph.edges .filter((edge) => !exclude.includes(edge.source)) .map((edge) => edge.target) ); const found: Node[] = []; for (const node of Object.values(graph.nodes)) { if (!exclude.includes(node.id) && !targets.has(node.id)) { found.push(node); } } return found.length === 1 ? found[0] : undefined; } /** * Find the single node that is not a source of any edge. * Exclude nodes/targets with ids in the exclude list. * If there is no such node, or there are multiple, return undefined. * When drawing the graph, this node would be the destination. */ function _lastNode(graph: Graph, exclude: string[] = []): Node | undefined { const sources = new Set( graph.edges .filter((edge) => !exclude.includes(edge.target)) .map((edge) => edge.source) ); const found: Node[] = []; for (const node of Object.values(graph.nodes)) { if (!exclude.includes(node.id) && !sources.has(node.id)) { found.push(node); } } return found.length === 1 ? found[0] : undefined; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/runnables/utils.ts
import { StreamEvent } from "../tracers/event_stream.js"; import type { RunnableInterface } from "./types.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any export function isRunnableInterface(thing: any): thing is RunnableInterface { return thing ? thing.lc_runnable : false; } /** * Utility to filter the root event in the streamEvents implementation. * This is simply binding the arguments to the namespace to make save on * a bit of typing in the streamEvents implementation. * * TODO: Refactor and remove. */ export class _RootEventFilter { includeNames?: string[]; includeTypes?: string[]; includeTags?: string[]; excludeNames?: string[]; excludeTypes?: string[]; excludeTags?: string[]; constructor(fields: { includeNames?: string[]; includeTypes?: string[]; includeTags?: string[]; excludeNames?: string[]; excludeTypes?: string[]; excludeTags?: string[]; }) { this.includeNames = fields.includeNames; this.includeTypes = fields.includeTypes; this.includeTags = fields.includeTags; this.excludeNames = fields.excludeNames; this.excludeTypes = fields.excludeTypes; this.excludeTags = fields.excludeTags; } includeEvent(event: StreamEvent, rootType: string): boolean { let include = this.includeNames === undefined && this.includeTypes === undefined && this.includeTags === undefined; const eventTags = event.tags ?? []; if (this.includeNames !== undefined) { include = include || this.includeNames.includes(event.name); } if (this.includeTypes !== undefined) { include = include || this.includeTypes.includes(rootType); } if (this.includeTags !== undefined) { include = include || eventTags.some((tag) => this.includeTags?.includes(tag)); } if (this.excludeNames !== undefined) { include = include && !this.excludeNames.includes(event.name); } if (this.excludeTypes !== undefined) { include = include && !this.excludeTypes.includes(rootType); } if (this.excludeTags !== undefined) { include = include && eventTags.every((tag) => !this.excludeTags?.includes(tag)); } return include; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/runnables/remote.ts
import { Runnable, RunnableBatchOptions, _coerceToDict } from "./base.js"; import { getCallbackManagerForConfig, type RunnableConfig } from "./config.js"; import { Document } from "../documents/index.js"; import { CallbackManagerForChainRun } from "../callbacks/manager.js"; import { ChatPromptValue, StringPromptValue } from "../prompt_values.js"; import { RunLogPatch, type LogStreamCallbackHandlerInput, type StreamEvent, RunLog, } from "../tracers/log_stream.js"; import { AIMessage, AIMessageChunk, ChatMessage, ChatMessageChunk, FunctionMessage, FunctionMessageChunk, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, ToolMessage, ToolMessageChunk, isBaseMessage, } from "../messages/index.js"; import { GenerationChunk, ChatGenerationChunk, RUN_KEY } from "../outputs.js"; import { convertEventStreamToIterableReadableDataStream } from "../utils/event_source_parse.js"; import { IterableReadableStream, concat } from "../utils/stream.js"; type RemoteRunnableOptions = { timeout?: number; headers?: Record<string, unknown>; }; function isSuperset(set: Set<string>, subset: Set<string>) { for (const elem of subset) { if (!set.has(elem)) { return false; } } return true; } // eslint-disable-next-line @typescript-eslint/no-explicit-any function revive(obj: any): any { if (Array.isArray(obj)) return obj.map(revive); if (typeof obj === "object") { // eslint-disable-next-line no-instanceof/no-instanceof if (!obj || obj instanceof Date) { return obj; } const keysArr = Object.keys(obj); const keys = new Set(keysArr); if (isSuperset(keys, new Set(["page_content", "metadata"]))) { return new Document({ pageContent: obj.page_content, metadata: obj.metadata, }); } if (isSuperset(keys, new Set(["content", "type", "additional_kwargs"]))) { if (obj.type === "HumanMessage" || obj.type === "human") { return new HumanMessage({ content: obj.content, }); } if (obj.type === "SystemMessage" || obj.type === "system") { return new SystemMessage({ content: obj.content, }); } if (obj.type === "ChatMessage" || obj.type === "generic") { return new ChatMessage({ content: obj.content, role: obj.role, }); } if (obj.type === "FunctionMessage" || obj.type === "function") { return new FunctionMessage({ content: obj.content, name: obj.name, }); } if (obj.type === "ToolMessage" || obj.type === "tool") { return new ToolMessage({ content: obj.content, tool_call_id: obj.tool_call_id, status: obj.status, }); } if (obj.type === "AIMessage" || obj.type === "ai") { return new AIMessage({ content: obj.content, }); } if (obj.type === "HumanMessageChunk") { return new HumanMessageChunk({ content: obj.content, }); } if (obj.type === "SystemMessageChunk") { return new SystemMessageChunk({ content: obj.content, }); } if (obj.type === "ChatMessageChunk") { return new ChatMessageChunk({ content: obj.content, role: obj.role, }); } if (obj.type === "FunctionMessageChunk") { return new FunctionMessageChunk({ content: obj.content, name: obj.name, }); } if (obj.type === "ToolMessageChunk") { return new ToolMessageChunk({ content: obj.content, tool_call_id: obj.tool_call_id, status: obj.status, }); } if (obj.type === "AIMessageChunk") { return new AIMessageChunk({ content: obj.content, }); } } if (isSuperset(keys, new Set(["text", "generation_info", "type"]))) { if (obj.type === "ChatGenerationChunk") { return new ChatGenerationChunk({ message: revive(obj.message), text: obj.text, generationInfo: obj.generation_info, }); } else if (obj.type === "ChatGeneration") { return { message: revive(obj.message), text: obj.text, generationInfo: obj.generation_info, }; } else if (obj.type === "GenerationChunk") { return new GenerationChunk({ text: obj.text, generationInfo: obj.generation_info, }); } else if (obj.type === "Generation") { return { text: obj.text, generationInfo: obj.generation_info, }; } } if (isSuperset(keys, new Set(["tool", "tool_input", "log", "type"]))) { if (obj.type === "AgentAction") { return { tool: obj.tool, toolInput: obj.tool_input, log: obj.log, }; } } if (isSuperset(keys, new Set(["return_values", "log", "type"]))) { if (obj.type === "AgentFinish") { return { returnValues: obj.return_values, log: obj.log, }; } } if (isSuperset(keys, new Set(["generations", "run", "type"]))) { if (obj.type === "LLMResult") { return { generations: revive(obj.generations), llmOutput: obj.llm_output, [RUN_KEY]: obj.run, }; } } if (isSuperset(keys, new Set(["messages"]))) { // TODO: Start checking for type: ChatPromptValue and ChatPromptValueConcrete // when LangServe bug is fixed return new ChatPromptValue({ // eslint-disable-next-line @typescript-eslint/no-explicit-any messages: obj.messages.map((msg: any) => revive(msg)), }); } if (isSuperset(keys, new Set(["text"]))) { // TODO: Start checking for type: StringPromptValue // when LangServe bug is fixed return new StringPromptValue(obj.text); } // eslint-disable-next-line @typescript-eslint/no-explicit-any const innerRevive: (key: string) => [string, any] = (key: string) => [ key, revive(obj[key]), ]; const rtn = Object.fromEntries(keysArr.map(innerRevive)); return rtn; } return obj; } function deserialize<RunOutput>(str: string): RunOutput { const obj = JSON.parse(str); return revive(obj); } function removeCallbacksAndSignal( options?: RunnableConfig ): Omit<RunnableConfig, "callbacks" | "signal"> { const rest = { ...options }; delete rest.callbacks; delete rest.signal; return rest; } // eslint-disable-next-line @typescript-eslint/no-explicit-any function serialize<RunInput>(input: RunInput): any { if (Array.isArray(input)) return input.map(serialize); if (isBaseMessage(input)) { // eslint-disable-next-line @typescript-eslint/no-explicit-any const serializedMessage: Record<string, any> = { content: input.content, type: input._getType(), additional_kwargs: input.additional_kwargs, name: input.name, example: false, }; if (ToolMessage.isInstance(input)) { serializedMessage.tool_call_id = input.tool_call_id; } else if (ChatMessage.isInstance(input)) { serializedMessage.role = input.role; } return serializedMessage; } if (typeof input === "object") { // eslint-disable-next-line no-instanceof/no-instanceof if (!input || input instanceof Date) { return input; } const keysArr = Object.keys(input); // eslint-disable-next-line @typescript-eslint/no-explicit-any const innerSerialize: (key: string) => [string, any] = (key: string) => [ key, serialize((input as Record<string, unknown>)[key]), ]; const rtn = Object.fromEntries(keysArr.map(innerSerialize)); return rtn; } return input; } /** * Client for interacting with LangChain runnables * that are hosted as LangServe endpoints. * * Allows you to interact with hosted runnables using the standard * `.invoke()`, `.stream()`, `.streamEvents()`, etc. methods that * other runnables support. * * @param url - The base URL of the LangServe endpoint. * @param options - Optional configuration for the remote runnable, including timeout and headers. * @param fetch - Optional custom fetch implementation. * @param fetchRequestOptions - Optional additional options for fetch requests. */ export class RemoteRunnable< RunInput, RunOutput, CallOptions extends RunnableConfig > extends Runnable<RunInput, RunOutput, CallOptions> { private url: string; private options?: RemoteRunnableOptions; // Wrap the default fetch call due to issues with illegal invocations // from the browser: // https://stackoverflow.com/questions/69876859/why-does-bind-fix-failed-to-execute-fetch-on-window-illegal-invocation-err // eslint-disable-next-line @typescript-eslint/no-explicit-any fetchImplementation: (...args: any[]) => any = (...args: any[]) => // @ts-expect-error Broad typing to support a range of fetch implementations fetch(...args); // eslint-disable-next-line @typescript-eslint/no-explicit-any fetchRequestOptions?: Record<string, any>; lc_namespace = ["langchain", "schema", "runnable", "remote"]; constructor(fields: { url: string; options?: RemoteRunnableOptions; // eslint-disable-next-line @typescript-eslint/no-explicit-any fetch?: (...args: any[]) => any; // eslint-disable-next-line @typescript-eslint/no-explicit-any fetchRequestOptions?: Record<string, any>; }) { super(fields); const { url, options, fetch: fetchImplementation, fetchRequestOptions, } = fields; this.url = url.replace(/\/$/, ""); // remove trailing slash this.options = options; this.fetchImplementation = fetchImplementation ?? this.fetchImplementation; this.fetchRequestOptions = fetchRequestOptions; } private async post<Body>(path: string, body: Body, signal?: AbortSignal) { return this.fetchImplementation(`${this.url}${path}`, { method: "POST", body: JSON.stringify(serialize(body)), signal: signal ?? AbortSignal.timeout(this.options?.timeout ?? 60000), ...this.fetchRequestOptions, headers: { "Content-Type": "application/json", ...this.fetchRequestOptions?.headers, ...this.options?.headers, }, }); } async _invoke( input: RunInput, options?: Partial<CallOptions>, _?: CallbackManagerForChainRun ) { const [config, kwargs] = this._separateRunnableConfigFromCallOptions(options); const response = await this.post<{ input: RunInput; config?: RunnableConfig; kwargs?: Omit<Partial<CallOptions>, keyof RunnableConfig>; }>( "/invoke", { input, config: removeCallbacksAndSignal(config), kwargs: kwargs ?? {}, }, config.signal ); if (!response.ok) { throw new Error(`${response.status} Error: ${await response.text()}`); } return revive((await response.json()).output) as RunOutput; } async invoke( input: RunInput, options?: Partial<CallOptions> ): Promise<RunOutput> { return this._callWithConfig(this._invoke, input, options); } async _batch( inputs: RunInput[], options?: Partial<CallOptions>[], _?: (CallbackManagerForChainRun | undefined)[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]> { if (batchOptions?.returnExceptions) { throw new Error("returnExceptions is not supported for remote clients"); } const configsAndKwargsArray = options?.map((opts) => this._separateRunnableConfigFromCallOptions(opts) ); const [configs, kwargs] = configsAndKwargsArray?.reduce( ([pc, pk], [c, k]) => [ [...pc, c], [...pk, k], ] as [ RunnableConfig[], Omit<Partial<CallOptions>, keyof RunnableConfig>[] ], [[], []] as [ RunnableConfig[], Omit<Partial<CallOptions>, keyof RunnableConfig>[] ] ) ?? [undefined, undefined]; const response = await this.post<{ inputs: RunInput[]; config?: (RunnableConfig & RunnableBatchOptions)[]; kwargs?: Omit<Partial<CallOptions>, keyof RunnableConfig>[]; }>( "/batch", { inputs, config: (configs ?? []) .map(removeCallbacksAndSignal) .map((config) => ({ ...config, ...batchOptions })), kwargs, }, options?.[0]?.signal ); if (!response.ok) { throw new Error(`${response.status} Error: ${await response.text()}`); } const body = await response.json(); if (!body.output) throw new Error("Invalid response from remote runnable"); return revive(body.output); } async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & { returnExceptions?: false } ): Promise<RunOutput[]>; async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & { returnExceptions: true } ): Promise<(RunOutput | Error)[]>; async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]>; async batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions ): Promise<(RunOutput | Error)[]> { if (batchOptions?.returnExceptions) { throw Error("returnExceptions is not supported for remote clients"); } return this._batchWithConfig( this._batch.bind(this), inputs, options, batchOptions ); } async *_streamIterator( input: RunInput, options?: Partial<CallOptions> ): AsyncGenerator<RunOutput> { const [config, kwargs] = this._separateRunnableConfigFromCallOptions(options); const callbackManager_ = await getCallbackManagerForConfig(options); const runManager = await callbackManager_?.handleChainStart( this.toJSON(), _coerceToDict(input, "input"), config.runId, undefined, undefined, undefined, config.runName ); delete config.runId; let finalOutput: RunOutput | undefined; let finalOutputSupported = true; try { const response = await this.post<{ input: RunInput; config?: RunnableConfig; kwargs?: Omit<Partial<CallOptions>, keyof RunnableConfig>; }>( "/stream", { input, config: removeCallbacksAndSignal(config), kwargs, }, config.signal ); if (!response.ok) { const json = await response.json(); const error = new Error( `RemoteRunnable call failed with status code ${response.status}: ${json.message}` ); // eslint-disable-next-line @typescript-eslint/no-explicit-any (error as any).response = response; throw error; } const { body } = response; if (!body) { throw new Error( "Could not begin remote stream. Please check the given URL and try again." ); } const runnableStream = convertEventStreamToIterableReadableDataStream(body); for await (const chunk of runnableStream) { const deserializedChunk = deserialize(chunk) as RunOutput; yield deserializedChunk; if (finalOutputSupported) { if (finalOutput === undefined) { finalOutput = deserializedChunk; } else { try { // eslint-disable-next-line @typescript-eslint/no-explicit-any finalOutput = concat(finalOutput, deserializedChunk as any); } catch { finalOutput = undefined; finalOutputSupported = false; } } } } } catch (err) { await runManager?.handleChainError(err); throw err; } await runManager?.handleChainEnd(finalOutput ?? {}); } async *streamLog( input: RunInput, options?: Partial<CallOptions>, streamOptions?: Omit<LogStreamCallbackHandlerInput, "autoClose"> ): AsyncGenerator<RunLogPatch> { const [config, kwargs] = this._separateRunnableConfigFromCallOptions(options); const callbackManager_ = await getCallbackManagerForConfig(options); const runManager = await callbackManager_?.handleChainStart( this.toJSON(), _coerceToDict(input, "input"), config.runId, undefined, undefined, undefined, config.runName ); delete config.runId; // The type is in camelCase but the API only accepts snake_case. const camelCaseStreamOptions = { include_names: streamOptions?.includeNames, include_types: streamOptions?.includeTypes, include_tags: streamOptions?.includeTags, exclude_names: streamOptions?.excludeNames, exclude_types: streamOptions?.excludeTypes, exclude_tags: streamOptions?.excludeTags, }; let runLog; try { const response = await this.post<{ input: RunInput; config?: RunnableConfig; kwargs?: Omit<Partial<CallOptions>, keyof RunnableConfig>; diff: false; }>( "/stream_log", { input, config: removeCallbacksAndSignal(config), kwargs, ...camelCaseStreamOptions, diff: false, }, config.signal ); const { body, ok } = response; if (!ok) { throw new Error(`${response.status} Error: ${await response.text()}`); } if (!body) { throw new Error( "Could not begin remote stream log. Please check the given URL and try again." ); } const runnableStream = convertEventStreamToIterableReadableDataStream(body); for await (const log of runnableStream) { const chunk = revive(JSON.parse(log)); const logPatch = new RunLogPatch({ ops: chunk.ops }); yield logPatch; if (runLog === undefined) { runLog = RunLog.fromRunLogPatch(logPatch); } else { runLog = runLog.concat(logPatch); } } } catch (err) { await runManager?.handleChainError(err); throw err; } await runManager?.handleChainEnd(runLog?.state.final_output); } _streamEvents( input: RunInput, options: Partial<CallOptions> & { version: "v1" | "v2" }, streamOptions?: Omit<LogStreamCallbackHandlerInput, "autoClose"> | undefined ): AsyncGenerator<StreamEvent> { // eslint-disable-next-line @typescript-eslint/no-this-alias const outerThis = this; const generator = async function* () { const [config, kwargs] = outerThis._separateRunnableConfigFromCallOptions(options); const callbackManager_ = await getCallbackManagerForConfig(options); const runManager = await callbackManager_?.handleChainStart( outerThis.toJSON(), _coerceToDict(input, "input"), config.runId, undefined, undefined, undefined, config.runName ); delete config.runId; // The type is in camelCase but the API only accepts snake_case. const camelCaseStreamOptions = { include_names: streamOptions?.includeNames, include_types: streamOptions?.includeTypes, include_tags: streamOptions?.includeTags, exclude_names: streamOptions?.excludeNames, exclude_types: streamOptions?.excludeTypes, exclude_tags: streamOptions?.excludeTags, }; const events = []; try { const response = await outerThis.post<{ input: RunInput; config?: RunnableConfig; kwargs?: Omit<Partial<CallOptions>, keyof RunnableConfig>; diff: false; }>( "/stream_events", { input, config: removeCallbacksAndSignal(config), kwargs, ...camelCaseStreamOptions, diff: false, }, config.signal ); const { body, ok } = response; if (!ok) { throw new Error(`${response.status} Error: ${await response.text()}`); } if (!body) { throw new Error( "Could not begin remote stream events. Please check the given URL and try again." ); } const runnableStream = convertEventStreamToIterableReadableDataStream(body); for await (const log of runnableStream) { const chunk = revive(JSON.parse(log)); const event = { event: chunk.event, name: chunk.name, run_id: chunk.run_id, tags: chunk.tags, metadata: chunk.metadata, data: chunk.data, }; yield event; events.push(event); } } catch (err) { await runManager?.handleChainError(err); throw err; } await runManager?.handleChainEnd(events); }; return generator(); } streamEvents( input: RunInput, options: Partial<CallOptions> & { version: "v1" | "v2" }, streamOptions?: Omit<LogStreamCallbackHandlerInput, "autoClose"> ): IterableReadableStream<StreamEvent>; streamEvents( input: RunInput, options: Partial<CallOptions> & { version: "v1" | "v2"; encoding: "text/event-stream"; }, streamOptions?: Omit<LogStreamCallbackHandlerInput, "autoClose"> ): IterableReadableStream<Uint8Array>; streamEvents( input: RunInput, options: Partial<CallOptions> & { version: "v1" | "v2"; encoding?: "text/event-stream" | undefined; }, streamOptions?: Omit<LogStreamCallbackHandlerInput, "autoClose"> ): IterableReadableStream<StreamEvent | Uint8Array> { if (options.version !== "v1" && options.version !== "v2") { throw new Error( `Only versions "v1" and "v2" of the events schema is currently supported.` ); } if (options.encoding !== undefined) { throw new Error("Special encodings are not supported for this runnable."); } const eventStream = this._streamEvents(input, options, streamOptions); return IterableReadableStream.fromAsyncGenerator(eventStream); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/runnables/passthrough.ts
import { concat } from "../utils/stream.js"; import { Runnable, RunnableAssign, RunnableMap, RunnableMapLike, } from "./base.js"; import { ensureConfig, type RunnableConfig } from "./config.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any type RunnablePassthroughFunc<RunInput = any> = | ((input: RunInput) => void) | ((input: RunInput, config?: RunnableConfig) => void) | ((input: RunInput) => Promise<void>) | ((input: RunInput, config?: RunnableConfig) => Promise<void>); /** * A runnable to passthrough inputs unchanged or with additional keys. * * This runnable behaves almost like the identity function, except that it * can be configured to add additional keys to the output, if the input is * an object. * * The example below demonstrates how to use `RunnablePassthrough to * passthrough the input from the `.invoke()` * * @example * ```typescript * const chain = RunnableSequence.from([ * { * question: new RunnablePassthrough(), * context: async () => loadContextFromStore(), * }, * prompt, * llm, * outputParser, * ]); * const response = await chain.invoke( * "I can pass a single string instead of an object since I'm using `RunnablePassthrough`." * ); * ``` */ // eslint-disable-next-line @typescript-eslint/no-explicit-any export class RunnablePassthrough<RunInput = any> extends Runnable< RunInput, RunInput > { static lc_name() { return "RunnablePassthrough"; } lc_namespace = ["langchain_core", "runnables"]; lc_serializable = true; func?: RunnablePassthroughFunc<RunInput>; constructor(fields?: { func?: RunnablePassthroughFunc<RunInput> }) { super(fields); if (fields) { this.func = fields.func; } } async invoke( input: RunInput, options?: Partial<RunnableConfig> ): Promise<RunInput> { const config = ensureConfig(options); if (this.func) { await this.func(input, config); } return this._callWithConfig( (input: RunInput) => Promise.resolve(input), input, config ); } async *transform( generator: AsyncGenerator<RunInput>, options: Partial<RunnableConfig> ): AsyncGenerator<RunInput> { const config = ensureConfig(options); let finalOutput: RunInput | undefined; let finalOutputSupported = true; for await (const chunk of this._transformStreamWithConfig( generator, (input: AsyncGenerator<RunInput>) => input, config )) { yield chunk; if (finalOutputSupported) { if (finalOutput === undefined) { finalOutput = chunk; } else { try { // eslint-disable-next-line @typescript-eslint/no-explicit-any finalOutput = concat(finalOutput, chunk as any); } catch { finalOutput = undefined; finalOutputSupported = false; } } } } if (this.func && finalOutput !== undefined) { await this.func(finalOutput, config); } } /** * A runnable that assigns key-value pairs to the input. * * The example below shows how you could use it with an inline function. * * @example * ```typescript * const prompt = * PromptTemplate.fromTemplate(`Write a SQL query to answer the question using the following schema: {schema} * Question: {question} * SQL Query:`); * * // The `RunnablePassthrough.assign()` is used here to passthrough the input from the `.invoke()` * // call (in this example it's the question), along with any inputs passed to the `.assign()` method. * // In this case, we're passing the schema. * const sqlQueryGeneratorChain = RunnableSequence.from([ * RunnablePassthrough.assign({ * schema: async () => db.getTableInfo(), * }), * prompt, * new ChatOpenAI({}).bind({ stop: ["\nSQLResult:"] }), * new StringOutputParser(), * ]); * const result = await sqlQueryGeneratorChain.invoke({ * question: "How many employees are there?", * }); * ``` */ static assign< RunInput extends Record<string, unknown> = Record<string, unknown>, RunOutput extends Record<string, unknown> = Record<string, unknown> >( mapping: RunnableMapLike<RunInput, RunOutput> ): RunnableAssign<RunInput, RunInput & RunOutput> { return new RunnableAssign(new RunnableMap({ steps: mapping })); } }
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_passthrough.test.ts
import { test, expect } from "@jest/globals"; import { PromptTemplate } from "../../prompts/prompt.js"; import { FakeChatModel } from "../../utils/testing/index.js"; import { RunnablePassthrough } from "../passthrough.js"; import { JsonOutputParser } from "../../output_parsers/json.js"; import { RunnableSequence } from "../base.js"; import { RunnableConfig } from "../config.js"; test("RunnablePassthrough can call .assign and pass prev result through", async () => { const promptTemplate = PromptTemplate.fromTemplate("{input}"); const llm = new FakeChatModel({}); const parser = new JsonOutputParser(); const text = `\`\`\` {"outputValue": "testing"} \`\`\``; const chain = promptTemplate.pipe(llm).pipe(parser); const chainWithAssign = chain.pipe( RunnablePassthrough.assign({ outputValue: (i) => i.outputValue, }) ); const result = await chainWithAssign.invoke({ input: text }); console.log(result); expect(result).toEqual({ outputValue: "testing" }); }); test("RunnablePassthrough can call .assign as the first step with proper typing", async () => { const promptTemplate = PromptTemplate.fromTemplate("{input}"); const llm = new FakeChatModel({}); const parser = new JsonOutputParser(); const text = `\`\`\` {"outputValue": "testing2"} \`\`\``; const chain = RunnableSequence.from([ RunnablePassthrough.assign({ input: (input) => input.otherProp, }), promptTemplate, llm, parser, ]); const result = await chain.invoke({ otherProp: text }); console.log(result); expect(result).toEqual({ outputValue: "testing2" }); }); test("RunnablePassthrough can invoke a function without modifying passthrough value", async () => { let wasCalled = false; const addOne = (input: number) => { wasCalled = true; return input + 1; }; const passthrough = new RunnablePassthrough<number>({ func: addOne, }); const result = await passthrough.invoke(1); expect(result).toEqual(1); expect(wasCalled).toEqual(true); }); test("RunnablePassthrough can transform a function as constructor args", async () => { let wasCalled = false; const addOne = (input: number) => { wasCalled = true; return input + 1; }; const passthrough = new RunnablePassthrough<number>({ func: addOne, }); async function* generateNumbers() { yield 1; } const transformedGenerator = passthrough.transform(generateNumbers(), {}); const results = []; for await (const value of transformedGenerator) { results.push(value); } expect(results).toEqual([1]); expect(wasCalled).toEqual(true); }); test("RunnablePassthrough can invoke a function and pass through config", async () => { let wasCalled = false; let addOneResult: number = 0; const addOne = (input: number, config?: RunnableConfig) => { wasCalled = true; if ( !config?.configurable?.number ?? Number.isNaN(config?.configurable?.number) ) { throw new Error("configurable.number is NaN"); } console.log(config.configurable.number); addOneResult = input + config.configurable.number; }; const passthrough = new RunnablePassthrough<number>({ func: addOne, }); const result = await passthrough.invoke(1, { configurable: { number: 1, }, }); expect(result).toEqual(1); expect(addOneResult).toEqual(2); expect(wasCalled).toEqual(true); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_stream_log.test.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { test, expect } from "@jest/globals"; import { ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate, SystemMessagePromptTemplate, } from "../../prompts/index.js"; import { Document } from "../../documents/document.js"; import { RunLog } from "../../tracers/log_stream.js"; import { RunnableSequence, RunnableMap } from "../base.js"; import { FakeLLM, FakeChatModel, FakeRetriever, } from "../../utils/testing/index.js"; test("Runnable streamLog method", async () => { const promptTemplate = PromptTemplate.fromTemplate("{input}"); const llm = new FakeLLM({}); const runnable = promptTemplate.pipe(llm); const result = await runnable.streamLog({ input: "Hello world!" }); let finalState; for await (const chunk of result) { if (finalState === undefined) { finalState = chunk; } else { finalState = finalState.concat(chunk); } } expect((finalState as RunLog).state.final_output).toEqual({ output: "Hello world!", }); }); test("Runnable streamLog method with a more complicated sequence", async () => { const promptTemplate = ChatPromptTemplate.fromMessages<{ documents: string; question: string; }>([ SystemMessagePromptTemplate.fromTemplate(`You are a nice assistant.`), HumanMessagePromptTemplate.fromTemplate( `Context:\n{documents}\n\nQuestion:\n{question}` ), ]); const llm = new FakeChatModel({}); const retrieverOutputDocs = [ new Document({ pageContent: "foo" }), new Document({ pageContent: "bar" }), ]; const inputs = { question: (input: string) => input, documents: RunnableSequence.from([ new FakeRetriever({ output: retrieverOutputDocs, }), (docs: Document[]) => JSON.stringify(docs), ]).withConfig({ runName: "CUSTOM_NAME" }), extraField: new FakeLLM({ response: "testing", }).withConfig({ tags: ["only_one"] }), }; const runnable = new RunnableMap({ steps: inputs, }) .pipe(promptTemplate) .pipe(llm); const stream = await runnable.streamLog( "Do you know the Muffin Man?", {}, { includeTags: ["only_one"], includeNames: ["CUSTOM_NAME"], } ); let finalState; for await (const chunk of stream) { if (finalState === undefined) { finalState = chunk; } else { finalState = finalState.concat(chunk); } } expect((finalState as RunLog).state.logs.FakeLLM).toBeDefined(); expect( (finalState as RunLog).state.logs.FakeLLM.final_output.generations[0][0] .text ).toEqual("testing"); expect((finalState as RunLog).state.logs.CUSTOM_NAME).toBeDefined(); expect( (finalState as RunLog).state.logs.CUSTOM_NAME.final_output.output ).toEqual(JSON.stringify(retrieverOutputDocs)); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_with_fallbacks.test.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { test, expect } from "@jest/globals"; import { FakeLLM, FakeStreamingLLM } from "../../utils/testing/index.js"; test("RunnableWithFallbacks", async () => { const llm = new FakeLLM({ thrownErrorString: "Bad error!", }); await expect(async () => { const result1 = await llm.invoke("What up"); console.log(result1); }).rejects.toThrow(); const llmWithFallbacks = llm.withFallbacks({ fallbacks: [new FakeLLM({})], }); const result2 = await llmWithFallbacks.invoke("What up"); expect(result2).toEqual("What up"); }); test("RunnableWithFallbacks batch", async () => { const llm = new FakeLLM({ thrownErrorString: "Bad error!", }); await expect(async () => { const result1 = await llm.batch(["What up"]); console.log(result1); }).rejects.toThrow(); const llmWithFallbacks = llm.withFallbacks({ fallbacks: [new FakeLLM({})], }); const result2 = await llmWithFallbacks.batch([ "What up 1", "What up 2", "What up 3", ]); expect(result2).toEqual(["What up 1", "What up 2", "What up 3"]); }); test("RunnableWithFallbacks stream", async () => { const llm = new FakeStreamingLLM({ thrownErrorString: "Bad error!", }); await expect(async () => { await llm.stream("What up"); }).rejects.toThrow(); const llmWithFallbacks = llm.withFallbacks({ fallbacks: [new FakeStreamingLLM({})], }); const stream = await llmWithFallbacks.stream("What up"); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); expect(chunks.join("")).toEqual("What up"); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_remote.int.test.ts
import { test, expect } from "@jest/globals"; import { HumanMessage } from "../../messages/index.js"; import { applyPatch } from "../../utils/json_patch.js"; import { RemoteRunnable } from "../remote.js"; test("invoke hosted langserve", async () => { const remote = new RemoteRunnable({ url: `https://chat-langchain-backend.langchain.dev/chat`, }); const result = await remote.invoke({ question: "What is a document loader?", }); console.log(result); }); test("invoke hosted langserve error handling", async () => { const remote = new RemoteRunnable({ url: `https://chat-langchain-backend.langchain.dev/nonexistent`, }); await expect(async () => { await remote.invoke({ question: "What is a document loader?", }); }).rejects.toThrowError(); }); test("stream hosted langserve", async () => { const remote = new RemoteRunnable({ url: `https://chat-langchain-backend.langchain.dev/chat`, }); const result = await remote.stream({ question: "What is a document loader?", }); let totalByteSize = 0; for await (const chunk of result) { console.log(chunk); const jsonString = JSON.stringify(chunk); const byteSize = Buffer.byteLength(jsonString, "utf-8"); totalByteSize += byteSize; } console.log("totalByteSize", totalByteSize); }); test("stream error handling hosted langserve", async () => { const remote = new RemoteRunnable({ url: `https://chat-langchain-backend.langchain.dev/nonexistent`, }); await expect(async () => { const result = await remote.stream({ question: "What is a document loader?", }); for await (const chunk of result) { console.log(chunk); } }).rejects.toThrowError(); }); test("streamLog hosted langserve", async () => { const remote = new RemoteRunnable({ url: `https://chat-langchain-backend.langchain.dev/chat`, }); const result = await remote.streamLog({ question: "What is a document loader?", }); let totalByteSize = 0; // eslint-disable-next-line @typescript-eslint/no-explicit-any let aggregate: any = {}; for await (const chunk of result) { const jsonString = JSON.stringify(chunk); aggregate = applyPatch(aggregate, chunk.ops).newDocument; const byteSize = Buffer.byteLength(jsonString, "utf-8"); totalByteSize += byteSize; } console.log("aggregate", aggregate); console.log("totalByteSize", totalByteSize); }); test("streamLog error handling hosted langserve", async () => { const remote = new RemoteRunnable({ url: `https://chat-langchain-backend.langchain.dev/nonexistent`, }); const result = await remote.streamLog({ question: "What is a document loader?", }); // eslint-disable-next-line @typescript-eslint/no-explicit-any await expect(async () => { for await (const chunk of result) { console.log(chunk); } }).rejects.toThrowError(); }); test("streamLog hosted langserve with concat syntax", async () => { const remote = new RemoteRunnable({ url: `https://chat-langchain-backend.langchain.dev/chat`, }); const result = await remote.streamLog({ question: "What is a document loader?", }); let totalByteSize = 0; let state; for await (const chunk of result) { if (!state) { state = chunk; } else { state = state.concat(chunk); } const jsonString = JSON.stringify(chunk); const byteSize = Buffer.byteLength(jsonString, "utf-8"); totalByteSize += byteSize; } console.log("final state", state); console.log("totalByteSize", totalByteSize); }); test.skip("stream events hosted langserve with concat syntax", async () => { const remote = new RemoteRunnable({ url: `https://privateurl.com/pirate-speak/`, }); const result = await remote.streamEvents( { input: "What is a document loader?", chat_history: [new HumanMessage("What is a document loader?")], }, { version: "v1" } ); let totalByteSize = 0; const state = []; for await (const chunk of result) { console.log(chunk); state.push(chunk); const jsonString = JSON.stringify(chunk); const byteSize = Buffer.byteLength(jsonString, "utf-8"); totalByteSize += byteSize; } // console.log("final state", state); console.log("totalByteSize", totalByteSize); }); test.skip("streamLog with raw messages", async () => { const chain = new RemoteRunnable({ url: "https://aimor-deployment-bf1e4ebc87365334b3b8a6b175fb4151-ffoprvkqsa-uc.a.run.app/", }); const events = chain.streamLog([new HumanMessage("I'd like some advice!")], { configurable: { thread_id: "THREAD_ID", user_id: "USER_ID" }, }); for await (const logEvent of events) { console.log(logEvent); } });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_interface.test.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { test, expect } from "@jest/globals"; import { StringOutputParser } from "../../output_parsers/string.js"; import { PromptTemplate } from "../../prompts/prompt.js"; import { RunnableSequence } from "../base.js"; type RunnableBatchOptionsV0 = { maxConcurrency?: number; returnExceptions?: boolean; }; interface RunnableInterfaceV0<RunInput, RunOutput, CallOptions = any> { invoke(input: RunInput, options?: Partial<CallOptions>): Promise<RunOutput>; batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptionsV0 & { returnExceptions?: false } ): Promise<RunOutput[]>; batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptionsV0 & { returnExceptions: true } ): Promise<(RunOutput | Error)[]>; batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptionsV0 ): Promise<(RunOutput | Error)[]>; batch( inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptionsV0 ): Promise<(RunOutput | Error)[]>; stream( input: RunInput, options?: Partial<CallOptions> ): Promise<IterableReadableStreamV0<RunOutput>>; transform( generator: AsyncGenerator<RunInput>, options: Partial<CallOptions> ): AsyncGenerator<RunOutput>; getName(suffix?: string): string; get lc_id(): string[]; } class IterableReadableStreamV0<T> extends ReadableStream<T> { public reader: ReadableStreamDefaultReader<T>; ensureReader() { if (!this.reader) { this.reader = this.getReader(); } } async next() { this.ensureReader(); try { const result = await this.reader.read(); if (result.done) this.reader.releaseLock(); // release lock when stream becomes closed return { done: result.done, value: result.value as T, // Cloudflare Workers typing fix }; } catch (e) { this.reader.releaseLock(); // release lock when stream becomes errored throw e; } } async return() { this.ensureReader(); // If wrapped in a Node stream, cancel is already called. if (this.locked) { const cancelPromise = this.reader.cancel(); // cancel first, but don't await yet this.reader.releaseLock(); // release lock first await cancelPromise; // now await it } return { done: true, value: undefined as T }; // This cast fixes TS typing, and convention is to ignore final chunk value anyway } async throw(e: any): Promise<IteratorResult<T>> { throw e; } [Symbol.asyncIterator]() { return this as any; } static fromReadableStream<T>(stream: ReadableStream<T>) { // From https://developer.mozilla.org/en-US/docs/Web/API/Streams_API/Using_readable_streams#reading_the_stream const reader = stream.getReader(); return new IterableReadableStreamV0<T>({ start(controller) { return pump(); function pump(): Promise<T | undefined> { return reader.read().then(({ done, value }) => { // When no more data needs to be consumed, close the stream if (done) { controller.close(); return; } // Enqueue the next data chunk into our target stream controller.enqueue(value); return pump(); }); } }, cancel() { reader.releaseLock(); }, }); } static fromAsyncGenerator<T>(generator: AsyncGenerator<T>) { return new IterableReadableStreamV0<T>({ async pull(controller) { const { value, done } = await generator.next(); // When no more data needs to be consumed, close the stream if (done) { controller.close(); } // Fix: `else if (value)` will hang the streaming when nullish value (e.g. empty string) is pulled controller.enqueue(value); }, }); } } /** * Base class for all types of messages in a conversation. It includes * properties like `content`, `name`, and `additional_kwargs`. It also * includes methods like `toDict()` and `_getType()`. */ class AIMessageV0 { lc_namespace = ["langchain_core", "messages"]; lc_serializable = true; /** The content of the message. */ content: string; /** The name of the message sender in a multi-user chat. */ name?: string; /** The type of the message. */ _getType() { return "ai"; } constructor(content: string) { this.content = content; } } class StringPromptValueV0 { lc_namespace = ["langchain_core", "prompt_values"]; lc_serializable = true; value: string; constructor(value: string) { this.value = value; } toString() { return this.value; } } class RunnableV0 implements RunnableInterfaceV0<StringPromptValueV0, AIMessageV0> { lc_serializable = true; protected lc_runnable = true; async invoke( input: StringPromptValueV0, _options?: Partial<any> | undefined ): Promise<AIMessageV0> { return new AIMessageV0(input.toString()); } async batch( inputs: StringPromptValueV0[], options?: Partial<any> | Partial<any>[] | undefined, batchOptions?: | (RunnableBatchOptionsV0 & { returnExceptions?: false | undefined }) | undefined ): Promise<AIMessageV0[]>; async batch( inputs: StringPromptValueV0[], options?: Partial<any> | Partial<any>[] | undefined, batchOptions?: | (RunnableBatchOptionsV0 & { returnExceptions: true }) | undefined ): Promise<AIMessageV0[]>; async batch( inputs: StringPromptValueV0[], options?: Partial<any> | Partial<any>[] | undefined, batchOptions?: RunnableBatchOptionsV0 | undefined ): Promise<AIMessageV0[]>; async batch( inputs: StringPromptValueV0[], options?: Partial<any> | Partial<any>[] | undefined, batchOptions?: RunnableBatchOptionsV0 | undefined ): Promise<AIMessageV0[]>; async batch( _inputs: unknown, _options?: unknown, _batchOptions?: unknown ): Promise<AIMessageV0[]> { return []; } async stream( _input: StringPromptValueV0, _options?: Partial<any> | undefined ): Promise<IterableReadableStreamV0<any>> { throw new Error("Not implemented"); } // eslint-disable-next-line require-yield async *transform( _generator: AsyncGenerator<StringPromptValueV0>, _options: Partial<any> ): AsyncGenerator<AIMessageV0> { throw new Error("Not implemented"); } getName(): string { return "TEST"; } get lc_id(): string[] { return ["TEST"]; } } test("Pipe with a class that implements a runnable interface", async () => { const promptTemplate = PromptTemplate.fromTemplate("{input}"); const llm = new RunnableV0(); const outputParser = new StringOutputParser(); const runnable = promptTemplate.pipe(llm).pipe(outputParser); const result = await runnable.invoke({ input: "Hello world!!" }); console.log(result); expect(result).toBe("Hello world!!"); }); test("Runnable sequence with a class that implements a runnable interface", async () => { const promptTemplate = PromptTemplate.fromTemplate("{input}"); const llm = new RunnableV0(); const outputParser = new StringOutputParser(); const runnable = RunnableSequence.from([promptTemplate, llm, outputParser]); const result = await runnable.invoke({ input: "Hello sequence!!" }); console.log(result); expect(result).toBe("Hello sequence!!"); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_remote.test.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { jest, test, expect, describe, beforeEach, afterEach, } from "@jest/globals"; import { AIMessage, AIMessageChunk, HumanMessage, SystemMessage, } from "../../messages/index.js"; import { RemoteRunnable } from "../remote.js"; import { ChatPromptValue } from "../../prompt_values.js"; import { PromptTemplate } from "../../prompts/prompt.js"; const BASE_URL = "http://my-langserve-endpoint"; function respToStream(resp: string): ReadableStream<Uint8Array> { const chunks = resp.split("\n"); return new ReadableStream<Uint8Array>({ start(controller) { for (const chunk of chunks) { controller.enqueue(Buffer.from(`${chunk}\n`)); } controller.close(); }, }); } const aResp = `event: data data: ["a", "b", "c", "d"] event: end`; const bResp = `event: data data: {"content": "", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": "\\"", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": "object", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": "1", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": ",", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": " object", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": "2", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": ",", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": " object", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": "3", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": ",", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": " object", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": "4", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": ",", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": " object", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": "5", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": "\\"", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: data data: {"content": "", "additional_kwargs": {}, "type": "AIMessageChunk", "example": false} event: end`; const strangeTypesResp = `event: data data: {"content": "what is a document loader", "additional_kwargs": {}, "type": "human", "example": false} event: data data: {"messages":[{"content":"You are an expert programmer and problem-solver, tasked with answering any question about Langchain.","type":"system","additional_kwargs":{}},{"content":"I am an AI","type":"ai","additional_kwargs":{}}]} event: end`; describe("RemoteRunnable", () => { beforeEach(() => { // mock langserve service const returnDataByEndpoint: Record<string, BodyInit> = { "/a/invoke": JSON.stringify({ output: ["a", "b", "c"] }), "/a/batch": JSON.stringify({ output: [ ["a", "b", "c"], ["d", "e", "f"], ], }), "/a/stream": respToStream(aResp), "/b/stream": respToStream(bResp), "/strange_types/stream": respToStream(strangeTypesResp), }; const oldFetch = global.fetch; global.fetch = jest .fn() .mockImplementation(async (url: any, init?: any) => { if (!url.startsWith(BASE_URL)) return await oldFetch(url, init); const { pathname } = new URL(url); const resp: Response = new Response(returnDataByEndpoint[pathname]); return resp; }) as any; }); afterEach(() => { jest.clearAllMocks(); }); test("Invoke local langserve", async () => { // mock fetch, expect /invoke const remote = new RemoteRunnable({ url: `${BASE_URL}/a` }); const result = await remote.invoke({ text: "string" }); expect(fetch).toHaveBeenCalledWith( `${BASE_URL}/a/invoke`, expect.objectContaining({ body: '{"input":{"text":"string"},"config":{"tags":[],"metadata":{},"recursionLimit":25},"kwargs":{}}', }) ); expect(result).toEqual(["a", "b", "c"]); }); test("Invoke local langserve passing a configurable object", async () => { // mock fetch, expect /invoke const remote = new RemoteRunnable({ url: `${BASE_URL}/a` }); const result = await remote.invoke( { text: "string" }, { configurable: { destination: "destination", integration_id: "integration_id", user_id: "user_id", }, } ); expect(fetch).toHaveBeenCalledWith( `${BASE_URL}/a/invoke`, expect.objectContaining({ body: expect.any(String), }) ); expect(result).toEqual(["a", "b", "c"]); }); test("Batch local langserve", async () => { const returnData = [ ["a", "b", "c"], ["d", "e", "f"], ]; const remote = new RemoteRunnable({ url: `${BASE_URL}/a` }); const result = await remote.batch([{ text: "1" }, { text: "2" }]); expect(result).toEqual(returnData); }); test("Stream local langserve", async () => { const remote = new RemoteRunnable({ url: `${BASE_URL}/a` }); const stream = await remote.stream({ text: "What are the 5 best apples?" }); let chunkCount = 0; for await (const chunk of stream) { expect(chunk).toEqual(["a", "b", "c", "d"]); chunkCount += 1; } expect(chunkCount).toBe(1); }); test("Stream model output", async () => { const remote = new RemoteRunnable({ url: `${BASE_URL}/b` }); const stream = await remote.stream({ text: "What are the 5 best apples?" }); let chunkCount = 0; let accumulator: AIMessageChunk | null = null; for await (const chunk of stream) { const innerChunk = chunk as AIMessageChunk; accumulator = accumulator ? accumulator.concat(innerChunk) : innerChunk; chunkCount += 1; } expect(chunkCount).toBe(18); expect(accumulator?.content).toEqual( '"object1, object2, object3, object4, object5"' ); }); test("Stream legacy data type formats", async () => { const remote = new RemoteRunnable({ url: `${BASE_URL}/strange_types` }); const stream = await remote.stream({ text: "What are the 5 best apples?" }); const chunks = []; for await (const chunk of stream) { console.log(chunk); chunks.push(chunk); } expect(chunks[0]).toBeInstanceOf(HumanMessage); expect(chunks[1]).toBeInstanceOf(ChatPromptValue); expect((chunks[1] as ChatPromptValue).messages[0]).toBeInstanceOf( SystemMessage ); expect((chunks[1] as ChatPromptValue).messages[1]).toBeInstanceOf( AIMessage ); }); test("Streaming in a chain with model output", async () => { const remote = new RemoteRunnable({ url: `${BASE_URL}/b` }); const prompt = PromptTemplate.fromTemplate(""); const chunks = await prompt .pipe(remote) .stream({ text: "What are the 5 best apples?" }); let chunkCount = 0; let accumulator: AIMessageChunk | null = null; for await (const chunk of chunks) { const innerChunk = chunk as AIMessageChunk; accumulator = accumulator ? accumulator.concat(innerChunk) : innerChunk; chunkCount += 1; } expect(chunkCount).toBe(18); expect(accumulator?.content).toEqual( '"object1, object2, object3, object4, object5"' ); }); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_map.test.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { test, expect } from "@jest/globals"; import { StringOutputParser } from "../../output_parsers/string.js"; import { ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, } from "../../prompts/chat.js"; import { concat } from "../../utils/stream.js"; import { FakeLLM, FakeChatModel, FakeRetriever, FakeStreamingLLM, } from "../../utils/testing/index.js"; import { RunnableSequence, RunnableMap } from "../base.js"; import { RunnablePassthrough } from "../passthrough.js"; test("Create a runnable sequence with a runnable map", async () => { const promptTemplate = ChatPromptTemplate.fromMessages<{ documents: string; question: string; }>([ SystemMessagePromptTemplate.fromTemplate(`You are a nice assistant.`), HumanMessagePromptTemplate.fromTemplate( `Context:\n{documents}\n\nQuestion:\n{question}` ), ]); const llm = new FakeChatModel({}); const inputs = { question: (input: string) => input, documents: RunnableSequence.from([ new FakeRetriever(), (docs: Document[]) => JSON.stringify(docs), ]), extraField: new FakeLLM({}), }; const runnable = new RunnableMap({ steps: inputs }) .pipe(promptTemplate) .pipe(llm); const result = await runnable.invoke("Do you know the Muffin Man?"); console.log(result); expect(result.content).toEqual( `You are a nice assistant.\nContext:\n[{"pageContent":"foo","metadata":{}},{"pageContent":"bar","metadata":{}}]\n\nQuestion:\nDo you know the Muffin Man?` ); }); test("Test map inference in a sequence", async () => { const prompt = ChatPromptTemplate.fromTemplate( "context: {context}, question: {question}" ); const chain = RunnableSequence.from([ { question: new RunnablePassthrough(), context: async () => "SOME STUFF", }, prompt, new FakeLLM({}), new StringOutputParser(), ]); const response = await chain.invoke("Just passing through."); console.log(response); expect(response).toBe( `Human: context: SOME STUFF, question: Just passing through.` ); }); test("Should not allow mismatched inputs", async () => { const prompt = ChatPromptTemplate.fromTemplate( "context: {context}, question: {question}" ); const badChain = RunnableSequence.from([ { // @ts-expect-error TS compiler should flag mismatched input types question: new FakeLLM({}), context: async (input: number) => input, }, prompt, new FakeLLM({}), new StringOutputParser(), ]); console.log(badChain); }); test("Should not allow improper inputs into a map in a sequence", async () => { const prompt = ChatPromptTemplate.fromTemplate( "context: {context}, question: {question}" ); const map = RunnableMap.from({ question: new FakeLLM({}), context: async (_input: string) => 9, }); // @ts-expect-error TS compiler should flag mismatched output types const runnable = prompt.pipe(map); console.log(runnable); }); test("Should not allow improper outputs from a map into the next item in a sequence", async () => { const map = RunnableMap.from({ question: new FakeLLM({}), context: async (_input: string) => 9, }); // @ts-expect-error TS compiler should flag mismatched output types const runnable = map.pipe(new FakeLLM({})); console.log(runnable); }); test("Should stream chunks from each step as they are produced", async () => { const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are a nice assistant."], "{question}", ]); const chat = new FakeChatModel({}); const llm = new FakeStreamingLLM({ sleep: 0 }); const chain = RunnableSequence.from([ prompt, RunnableMap.from({ passthrough: new RunnablePassthrough(), chat, llm, }), ]); const stream = await chain.stream({ question: "What is your name?" }); const chunks = []; for await (const chunk of stream) { if (chunk.chat?.id !== undefined) { chunk.chat.id = expect.any(String) as any; chunk.chat.lc_kwargs.id = expect.any(String); } chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(3); expect(chunks.reduce(concat)).toEqual( await chain.invoke({ question: "What is your name?" }) ); const chainWithSelect = chain.pipe((output) => output.llm); expect(await chainWithSelect.invoke({ question: "What is your name?" })) .toEqual(`System: You are a nice assistant. Human: What is your name?`); }); test("Should stream chunks through runnable passthrough and assign", async () => { const llm = new FakeStreamingLLM({ sleep: 0 }); const chain = RunnableSequence.from([ llm, RunnableMap.from({ llm: new RunnablePassthrough(), }), ]); const stream = await chain.stream("What is your name?"); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks).toEqual([ { llm: "W" }, { llm: "h" }, { llm: "a" }, { llm: "t" }, { llm: " " }, { llm: "i" }, { llm: "s" }, { llm: " " }, { llm: "y" }, { llm: "o" }, { llm: "u" }, { llm: "r" }, { llm: " " }, { llm: "n" }, { llm: "a" }, { llm: "m" }, { llm: "e" }, { llm: "?" }, ]); expect(chunks.reduce(concat)).toEqual( await chain.invoke("What is your name?") ); const chainWithAssign = chain.pipe( RunnablePassthrough.assign({ chat: RunnableSequence.from([(input) => input.llm, llm]), }) ); const stream2 = await chainWithAssign.stream("What is your name?"); const chunks2 = []; for await (const chunk of stream2) { chunks2.push(chunk); } expect(chunks2).toEqual([ { llm: "W" }, { llm: "h" }, { llm: "a" }, { llm: "t" }, { llm: " " }, { llm: "i" }, { llm: "s" }, { llm: " " }, { llm: "y" }, { llm: "o" }, { llm: "u" }, { llm: "r" }, { llm: " " }, { llm: "n" }, { llm: "a" }, { llm: "m" }, { llm: "e" }, { llm: "?" }, { chat: "W" }, { chat: "h" }, { chat: "a" }, { chat: "t" }, { chat: " " }, { chat: "i" }, { chat: "s" }, { chat: " " }, { chat: "y" }, { chat: "o" }, { chat: "u" }, { chat: "r" }, { chat: " " }, { chat: "n" }, { chat: "a" }, { chat: "m" }, { chat: "e" }, { chat: "?" }, ]); expect(chunks2.reduce(concat)).toEqual( await chainWithAssign.invoke("What is your name?") ); const chainWithPick = chainWithAssign.pick("llm"); const chunks3 = []; for await (const chunk of await chainWithPick.stream("What is your name?")) { chunks3.push(chunk); } expect(chunks3).toEqual([ "W", "h", "a", "t", " ", "i", "s", " ", "y", "o", "u", "r", " ", "n", "a", "m", "e", "?", ]); expect(chunks3.reduce(concat)).toEqual( await chainWithPick.invoke("What is your name?") ); const chainWithPickMulti = chainWithAssign.pick(["llm"]); const chunks4 = []; for await (const chunk of await chainWithPickMulti.stream( "What is your name?" )) { chunks4.push(chunk); } expect(chunks4).toEqual([ { llm: "W" }, { llm: "h" }, { llm: "a" }, { llm: "t" }, { llm: " " }, { llm: "i" }, { llm: "s" }, { llm: " " }, { llm: "y" }, { llm: "o" }, { llm: "u" }, { llm: "r" }, { llm: " " }, { llm: "n" }, { llm: "a" }, { llm: "m" }, { llm: "e" }, { llm: "?" }, ]); expect(chunks4.reduce(concat)).toEqual( await chainWithPickMulti.invoke("What is your name?") ); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_tools.test.ts
import { z } from "zod"; import { test, expect } from "@jest/globals"; import { RunnableLambda, RunnableToolLike } from "../base.js"; import { FakeRetriever } from "../../utils/testing/index.js"; import { Document } from "../../documents/document.js"; test("Runnable asTool works", async () => { const schema = z.object({ foo: z.string(), }); const runnable = RunnableLambda.from<z.infer<typeof schema>, string>( (input, config) => { return `${input.foo}${config?.configurable.foo}`; } ); const tool = runnable.asTool({ schema, }); expect(tool).toBeInstanceOf(RunnableToolLike); expect(tool.schema).toBe(schema); expect(tool.name).toBe(runnable.getName()); }); test("Runnable asTool works with all populated fields", async () => { const schema = z.object({ foo: z.string(), }); const runnable = RunnableLambda.from<z.infer<typeof schema>, string>( (input, config) => { return `${input.foo}${config?.configurable.foo}`; } ); const tool = runnable.asTool({ schema, name: "test", description: "test", }); expect(tool).toBeInstanceOf(RunnableToolLike); expect(tool.schema).toBe(schema); expect(tool.description).toBe("test"); expect(tool.name).toBe("test"); }); test("Runnable asTool can invoke", async () => { const schema = z.object({ foo: z.string(), }); const runnable = RunnableLambda.from<z.infer<typeof schema>, string>( (input, config) => { return `${input.foo}${config?.configurable.foo}`; } ); const tool = runnable.asTool({ schema, }); const toolResponse = await tool.invoke( { foo: "bar", }, { configurable: { foo: "bar", }, } ); expect(toolResponse).toBe("barbar"); }); test("asTool should type error with mismatched schema", async () => { // asTool infers the type of the Zod schema from the existing runnable's RunInput generic. // If the Zod schema does not match the RunInput, it should throw a type error. const schema = z.object({ foo: z.string(), }); const runnable = RunnableLambda.from<{ bar: string }, string>( (input, config) => { return `${input.bar}${config?.configurable.foo}`; } ); runnable.asTool({ // @ts-expect-error - Should error. If this does not give a type error, the generics/typing of `asTool` is broken. schema, }); }); test("Create a runnable tool directly from RunnableToolLike", async () => { const schema = z.object({ foo: z.string(), }); const adderFunc = (_: z.infer<typeof schema>): Promise<boolean> => { return Promise.resolve(true); }; const tool = new RunnableToolLike({ schema, name: "test", description: "test", bound: RunnableLambda.from(adderFunc), }); const result = await tool.invoke({ foo: "bar" }); expect(result).toBe(true); }); test("asTool can take a single string input", async () => { const firstRunnable = RunnableLambda.from<string, string>((input) => { return `${input}a`; }); const secondRunnable = RunnableLambda.from<string, string>((input) => { return `${input}z`; }); const runnable = firstRunnable.pipe(secondRunnable); const asTool = runnable.asTool({ schema: z.string(), }); const result = await asTool.invoke("b"); expect(result).toBe("baz"); }); test("Runnable asTool uses Zod schema description if not provided", async () => { const description = "Test schema"; const schema = z .object({ foo: z.string(), }) .describe(description); const runnable = RunnableLambda.from<z.infer<typeof schema>, string>( (input, config) => { return `${input.foo}${config?.configurable.foo}`; } ); const tool = runnable.asTool({ schema, }); expect(tool.description).toBe(description); }); test("Runnable asTool can accept a string zod schema", async () => { const lambda = RunnableLambda.from<string, string>((input) => { return `${input}a`; }).asTool({ name: "string_tool", description: "A tool that appends 'a' to the input string", schema: z.string(), }); const result = await lambda.invoke("b"); expect(result).toBe("ba"); }); test("Runnables which dont accept ToolCalls as inputs can accept ToolCalls", async () => { const pageContent = "Dogs are pretty cool, man!"; const retriever = new FakeRetriever({ output: [ new Document({ pageContent, }), ], }); const tool = retriever.asTool({ name: "pet_info_retriever", description: "Get information about pets.", schema: z.string(), }); const result = await tool.invoke({ type: "tool_call", name: "pet_info_retriever", args: { input: "dogs", }, id: "string", }); expect(result).toHaveLength(1); expect(result[0].pageContent).toBe(pageContent); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_stream_events.test.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { test, expect, afterEach } from "@jest/globals"; import { z } from "zod"; import { RunnableLambda, RunnableMap, RunnablePassthrough, RunnablePick, } from "../index.js"; import { ChatPromptTemplate } from "../../prompts/chat.js"; import { FakeListChatModel, FakeRetriever, FakeStreamingLLM, } from "../../utils/testing/index.js"; import { AIMessageChunk, HumanMessage, SystemMessage, } from "../../messages/index.js"; import { ChatGenerationChunk, GenerationChunk } from "../../outputs.js"; import { DynamicStructuredTool, DynamicTool } from "../../tools/index.js"; import { Document } from "../../documents/document.js"; function reverse(s: string) { // Reverse a string. return s.split("").reverse().join(""); } const anyString = expect.any(String) as unknown as string; const originalCallbackValue = process.env.LANGCHAIN_CALLBACKS_BACKGROUND; afterEach(() => { process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalCallbackValue; }); test("Runnable streamEvents method", async () => { const chain = RunnableLambda.from(reverse).withConfig({ runName: "reverse", }); const events = []; const eventStream = await chain.streamEvents("hello", { version: "v1" }); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { data: { input: "hello" }, event: "on_chain_start", metadata: {}, name: "reverse", run_id: expect.any(String), tags: [], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "reverse", run_id: expect.any(String), tags: [], }, { data: { output: "olleh" }, event: "on_chain_end", metadata: {}, name: "reverse", run_id: expect.any(String), tags: [], }, ]); }); test("Runnable streamEvents method with three runnables", async () => { const r = RunnableLambda.from(reverse); const chain = r .withConfig({ runName: "1" }) .pipe(r.withConfig({ runName: "2" })) .pipe(r.withConfig({ runName: "3" })); const events = []; const eventStream = await chain.streamEvents("hello", { version: "v1" }); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { data: { input: "hello" }, event: "on_chain_start", metadata: {}, name: "RunnableSequence", run_id: expect.any(String), tags: [], }, { data: {}, event: "on_chain_start", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: {}, event: "on_chain_start", metadata: {}, name: "2", run_id: expect.any(String), tags: ["seq:step:2"], }, { data: { input: "hello", output: "olleh" }, event: "on_chain_end", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: { chunk: "hello" }, event: "on_chain_stream", metadata: {}, name: "2", run_id: expect.any(String), tags: ["seq:step:2"], }, { data: {}, event: "on_chain_start", metadata: {}, name: "3", run_id: expect.any(String), tags: ["seq:step:3"], }, { data: { input: "olleh", output: "hello" }, event: "on_chain_end", metadata: {}, name: "2", run_id: expect.any(String), tags: ["seq:step:2"], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "3", run_id: expect.any(String), tags: ["seq:step:3"], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "RunnableSequence", run_id: expect.any(String), tags: [], }, { data: { input: "hello", output: "olleh" }, event: "on_chain_end", metadata: {}, name: "3", run_id: expect.any(String), tags: ["seq:step:3"], }, { data: { output: "olleh" }, event: "on_chain_end", metadata: {}, name: "RunnableSequence", run_id: expect.any(String), tags: [], }, ]); }); test("Runnable streamEvents method with three runnables with backgrounded callbacks set to true", async () => { process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "true"; const r = RunnableLambda.from(reverse); const chain = r .withConfig({ runName: "1" }) .pipe(r.withConfig({ runName: "2" })) .pipe(r.withConfig({ runName: "3" })); const events = []; const eventStream = await chain.streamEvents("hello", { version: "v1" }); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { data: { input: "hello" }, event: "on_chain_start", metadata: {}, name: "RunnableSequence", run_id: expect.any(String), tags: [], }, { data: {}, event: "on_chain_start", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: {}, event: "on_chain_start", metadata: {}, name: "2", run_id: expect.any(String), tags: ["seq:step:2"], }, { data: { input: "hello", output: "olleh" }, event: "on_chain_end", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: { chunk: "hello" }, event: "on_chain_stream", metadata: {}, name: "2", run_id: expect.any(String), tags: ["seq:step:2"], }, { data: {}, event: "on_chain_start", metadata: {}, name: "3", run_id: expect.any(String), tags: ["seq:step:3"], }, { data: { input: "olleh", output: "hello" }, event: "on_chain_end", metadata: {}, name: "2", run_id: expect.any(String), tags: ["seq:step:2"], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "3", run_id: expect.any(String), tags: ["seq:step:3"], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "RunnableSequence", run_id: expect.any(String), tags: [], }, { data: { input: "hello", output: "olleh" }, event: "on_chain_end", metadata: {}, name: "3", run_id: expect.any(String), tags: ["seq:step:3"], }, { data: { output: "olleh" }, event: "on_chain_end", metadata: {}, name: "RunnableSequence", run_id: expect.any(String), tags: [], }, ]); }); test("Runnable streamEvents method with three runnables with filtering", async () => { const r = RunnableLambda.from(reverse); const chain = r .withConfig({ runName: "1" }) .pipe(r.withConfig({ runName: "2", tags: ["my_tag"] })) .pipe(r.withConfig({ runName: "3", tags: ["my_tag"] })); const events = []; const eventStream = await chain.streamEvents( "hello", { version: "v1" }, { includeNames: ["1"], } ); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { data: {}, event: "on_chain_start", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: { input: "hello", output: "olleh" }, event: "on_chain_end", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, ]); const events2 = []; const eventStream2 = await chain.streamEvents( "hello", { version: "v1" }, { excludeNames: ["2"], includeTags: ["my_tag"], } ); for await (const event of eventStream2) { events2.push(event); } expect(events2).toEqual([ { data: {}, event: "on_chain_start", metadata: {}, name: "3", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:3", "my_tag"]), }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "3", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:3", "my_tag"]), }, { data: { input: "hello", output: "olleh" }, event: "on_chain_end", metadata: {}, name: "3", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:3", "my_tag"]), }, ]); }); test("Runnable streamEvents method with a runnable map", async () => { const r = RunnableLambda.from(reverse); const chain = RunnableMap.from({ reversed: r, original: new RunnablePassthrough(), }).pipe(new RunnablePick("reversed")); const events = []; const eventStream = await chain.streamEvents("hello", { version: "v1" }); for await (const event of eventStream) { events.push(event); } console.log(events); expect(events).toEqual([ { run_id: expect.any(String), event: "on_chain_start", name: "RunnableSequence", tags: [], metadata: {}, data: { input: "hello" }, }, { event: "on_chain_start", name: "RunnableMap", run_id: expect.any(String), tags: ["seq:step:1"], metadata: {}, data: {}, }, { event: "on_chain_start", name: "RunnableLambda", run_id: expect.any(String), tags: ["map:key:reversed"], metadata: {}, data: {}, }, { event: "on_chain_start", name: "RunnablePassthrough", run_id: expect.any(String), tags: ["map:key:original"], metadata: {}, data: {}, }, { event: "on_chain_stream", name: "RunnablePassthrough", run_id: expect.any(String), tags: ["map:key:original"], metadata: {}, data: { chunk: "hello" }, }, { event: "on_chain_stream", name: "RunnableLambda", run_id: expect.any(String), tags: ["map:key:reversed"], metadata: {}, data: { chunk: "olleh" }, }, { event: "on_chain_stream", name: "RunnableMap", run_id: expect.any(String), tags: ["seq:step:1"], metadata: {}, data: { chunk: { original: "hello", }, }, }, { event: "on_chain_start", name: "RunnablePick", run_id: expect.any(String), tags: ["seq:step:2"], metadata: {}, data: {}, }, { event: "on_chain_stream", name: "RunnableMap", run_id: expect.any(String), tags: ["seq:step:1"], metadata: {}, data: { chunk: { reversed: "olleh", }, }, }, { event: "on_chain_end", name: "RunnablePassthrough", run_id: expect.any(String), tags: ["map:key:original"], metadata: {}, data: { input: "hello", output: "hello" }, }, { event: "on_chain_stream", name: "RunnablePick", run_id: expect.any(String), tags: ["seq:step:2"], metadata: {}, data: { chunk: "olleh" }, }, { event: "on_chain_stream", run_id: expect.any(String), tags: [], metadata: {}, name: "RunnableSequence", data: { chunk: "olleh" }, }, { event: "on_chain_end", name: "RunnableLambda", run_id: expect.any(String), tags: ["map:key:reversed"], metadata: {}, data: { input: "hello", output: "olleh" }, }, { event: "on_chain_end", name: "RunnableMap", run_id: expect.any(String), tags: ["seq:step:1"], metadata: {}, data: { input: "hello", output: { original: "hello", reversed: "olleh", }, }, }, { event: "on_chain_end", name: "RunnablePick", run_id: expect.any(String), tags: ["seq:step:2"], metadata: {}, data: { output: "olleh" }, }, { event: "on_chain_end", name: "RunnableSequence", run_id: expect.any(String), tags: [], metadata: {}, data: { output: "olleh" }, }, ]); }); test("Runnable streamEvents method with llm", async () => { const model = new FakeStreamingLLM({ responses: ["hey!"], }).withConfig({ metadata: { a: "b" }, tags: ["my_model"], runName: "my_model", }); const events = []; const eventStream = await model.streamEvents("hello", { version: "v1" }); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { event: "on_llm_start", name: "my_model", run_id: expect.any(String), tags: expect.arrayContaining(["my_model"]), metadata: { a: "b", }, data: { input: "hello", }, }, { event: "on_llm_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_model"]), metadata: { a: "b", }, name: "my_model", data: { chunk: "h" }, }, { event: "on_llm_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_model"]), metadata: { a: "b", }, name: "my_model", data: { chunk: "e" }, }, { event: "on_llm_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_model"]), metadata: { a: "b", }, name: "my_model", data: { chunk: "y" }, }, { event: "on_llm_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_model"]), metadata: { a: "b", }, name: "my_model", data: { chunk: "!" }, }, { event: "on_llm_end", name: "my_model", run_id: expect.any(String), tags: expect.arrayContaining(["my_model"]), metadata: { a: "b", }, data: { output: { generations: [ [ new GenerationChunk({ generationInfo: {}, text: "hey!", }), ], ], }, }, }, ]); }); test("Runnable streamEvents method with chat model chain", async () => { const template = ChatPromptTemplate.fromMessages([ ["system", "You are Godzilla"], ["human", "{question}"], ]).withConfig({ runName: "my_template", tags: ["my_template"], }); const model = new FakeListChatModel({ responses: ["ROAR"], }).withConfig({ metadata: { a: "b" }, tags: ["my_model"], runName: "my_model", }); const chain = template.pipe(model).withConfig({ metadata: { foo: "bar" }, tags: ["my_chain"], runName: "my_chain", }); const events = []; const eventStream = await chain.streamEvents( { question: "hello" }, { version: "v1" } ); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { run_id: expect.any(String), event: "on_chain_start", name: "my_chain", tags: ["my_chain"], metadata: { foo: "bar", }, data: { input: { question: "hello", }, }, }, { data: { input: { question: "hello" } }, event: "on_prompt_start", metadata: { foo: "bar" }, name: "my_template", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "seq:step:1", "my_template"]), }, { event: "on_prompt_end", name: "my_template", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:1", "my_template", "my_chain"]), metadata: { foo: "bar", }, data: { input: { question: "hello", }, output: await template.invoke({ question: "hello" }), }, }, { event: "on_llm_start", name: "my_model", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:2", "my_model", "my_chain"]), metadata: { foo: "bar", a: "b", ls_model_type: "chat", ls_provider: model.getName(), ls_stop: undefined, }, data: { input: { messages: [ [new SystemMessage("You are Godzilla"), new HumanMessage("hello")], ], }, }, }, { event: "on_llm_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]), metadata: { a: "b", foo: "bar", ls_model_type: "chat", ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", data: { chunk: new AIMessageChunk({ id: anyString, content: "R" }) }, }, { event: "on_chain_stream", run_id: expect.any(String), tags: ["my_chain"], metadata: { foo: "bar", }, name: "my_chain", data: { chunk: new AIMessageChunk({ id: anyString, content: "R" }) }, }, { event: "on_llm_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]), metadata: { a: "b", foo: "bar", ls_model_type: "chat", ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", data: { chunk: new AIMessageChunk({ id: anyString, content: "O" }) }, }, { event: "on_chain_stream", run_id: expect.any(String), tags: ["my_chain"], metadata: { foo: "bar", }, name: "my_chain", data: { chunk: new AIMessageChunk({ id: anyString, content: "O" }) }, }, { event: "on_llm_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]), metadata: { a: "b", foo: "bar", ls_model_type: "chat", ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", data: { chunk: new AIMessageChunk({ id: anyString, content: "A" }) }, }, { event: "on_chain_stream", run_id: expect.any(String), tags: ["my_chain"], metadata: { foo: "bar", }, name: "my_chain", data: { chunk: new AIMessageChunk({ id: anyString, content: "A" }) }, }, { event: "on_llm_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]), metadata: { a: "b", foo: "bar", ls_model_type: "chat", ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", data: { chunk: new AIMessageChunk({ id: anyString, content: "R" }) }, }, { event: "on_chain_stream", run_id: expect.any(String), tags: ["my_chain"], metadata: { foo: "bar", }, name: "my_chain", data: { chunk: new AIMessageChunk({ id: anyString, content: "R" }) }, }, { event: "on_llm_end", name: "my_model", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:2", "my_model", "my_chain"]), metadata: { foo: "bar", a: "b", ls_model_type: "chat", ls_provider: model.getName(), ls_stop: undefined, }, data: { input: { messages: [ [new SystemMessage("You are Godzilla"), new HumanMessage("hello")], ], }, output: { generations: [ [ new ChatGenerationChunk({ generationInfo: {}, message: new AIMessageChunk({ id: anyString, content: "ROAR" }), text: "ROAR", }), ], ], }, }, }, { event: "on_chain_end", name: "my_chain", run_id: expect.any(String), tags: ["my_chain"], metadata: { foo: "bar", }, data: { output: new AIMessageChunk({ id: anyString, content: "ROAR" }), }, }, ]); }); test("Runnable streamEvents method with simple tools", async () => { const tool = new DynamicTool({ func: async () => "hello", name: "parameterless", description: "A tool that does nothing", }); const events = []; const eventStream = await tool.streamEvents({}, { version: "v1" }); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { data: { input: {} }, event: "on_tool_start", metadata: {}, name: "parameterless", run_id: expect.any(String), tags: [], }, { data: { chunk: "hello" }, event: "on_tool_stream", metadata: {}, name: "parameterless", run_id: expect.any(String), tags: [], }, { data: { output: "hello" }, event: "on_tool_end", metadata: {}, name: "parameterless", run_id: expect.any(String), tags: [], }, ]); const toolWithParams = new DynamicStructuredTool({ func: async (params: { x: number; y: string }) => JSON.stringify({ x: params.x, y: params.y }), schema: z.object({ x: z.number(), y: z.string(), }), name: "with_parameters", description: "A tool that does nothing", }); const events2 = []; const eventStream2 = await toolWithParams.streamEvents( { x: 1, y: "2" }, { version: "v1" } ); for await (const event of eventStream2) { events2.push(event); } expect(events2).toEqual([ { data: { input: { x: 1, y: "2" } }, event: "on_tool_start", metadata: {}, name: "with_parameters", run_id: expect.any(String), tags: [], }, { data: { chunk: JSON.stringify({ x: 1, y: "2" }) }, event: "on_tool_stream", metadata: {}, name: "with_parameters", run_id: expect.any(String), tags: [], }, { data: { output: JSON.stringify({ x: 1, y: "2" }) }, event: "on_tool_end", metadata: {}, name: "with_parameters", run_id: expect.any(String), tags: [], }, ]); }); test("Runnable streamEvents method with a retriever", async () => { const retriever = new FakeRetriever({ output: [ new Document({ pageContent: "hello world!", metadata: { foo: "bar" } }), new Document({ pageContent: "goodbye world!", metadata: { food: "spare" }, }), ], }); const events = []; const eventStream = await retriever.streamEvents("hello", { version: "v1", }); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { data: { input: "hello", }, event: "on_retriever_start", metadata: {}, name: "FakeRetriever", run_id: expect.any(String), tags: [], }, { data: { chunk: [ new Document({ pageContent: "hello world!", metadata: { foo: "bar" }, }), new Document({ pageContent: "goodbye world!", metadata: { food: "spare" }, }), ], }, event: "on_retriever_stream", metadata: {}, name: "FakeRetriever", run_id: expect.any(String), tags: [], }, { data: { output: { documents: [ new Document({ pageContent: "hello world!", metadata: { foo: "bar" }, }), new Document({ pageContent: "goodbye world!", metadata: { food: "spare" }, }), ], }, }, event: "on_retriever_end", metadata: {}, name: "FakeRetriever", run_id: expect.any(String), tags: [], }, ]); }); test("Runnable streamEvents method with text/event-stream encoding", async () => { const chain = RunnableLambda.from(reverse).withConfig({ runName: "reverse", }); const events = []; const eventStream = await chain.streamEvents("hello", { version: "v1", encoding: "text/event-stream", runId: "1234", }); for await (const event of eventStream) { events.push(event); } const decoder = new TextDecoder(); expect(events.length).toEqual(4); const dataEvents = events .slice(0, 3) .map((event) => decoder.decode(event).split("event: data\ndata: ")[1]); const expectedPayloads = [ { data: { input: "hello" }, event: "on_chain_start", metadata: {}, name: "reverse", run_id: "1234", tags: [], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "reverse", run_id: "1234", tags: [], }, { data: { output: "olleh" }, event: "on_chain_end", metadata: {}, name: "reverse", run_id: "1234", tags: [], }, ]; for (let i = 0; i < dataEvents.length; i += 1) { expect(dataEvents[i].endsWith("\n\n")).toBe(true); expect(JSON.parse(dataEvents[i].replace("\n\n", ""))).toEqual( expectedPayloads[i] ); } expect(decoder.decode(events[3])).toEqual("event: end\n\n"); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_tracing.int.test.ts
import { Client } from "langsmith"; import { test } from "@jest/globals"; import { LangChainTracer } from "../../tracers/tracer_langchain.js"; import { BaseOutputParser, FormatInstructionsOptions, } from "../../output_parsers/base.js"; import { FakeChatModel } from "../../utils/testing/index.js"; import { getEnvironmentVariable } from "../../utils/env.js"; class FakeDateOutputParser extends BaseOutputParser<Date> { lc_namespace = ["langchain_core", "output_parsers", "testing"]; async parse(_text: string): Promise<Date> { return new Date(); } getFormatInstructions(_options?: FormatInstructionsOptions): string { return ""; } } test("Should handle tracing with a date output", async () => { const client = new Client({ apiUrl: getEnvironmentVariable("LANGCHAIN_ENDPOINT"), apiKey: getEnvironmentVariable("LANGCHAIN_API_KEY"), }); const tracer = new LangChainTracer({ projectName: getEnvironmentVariable("LANGCHAIN_SESSION"), client, }); const model = new FakeChatModel({}); const parser = new FakeDateOutputParser(); const chain = model.pipe(parser); const result = await chain.invoke("test", { callbacks: [tracer] }); console.log(result); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_graph.test.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { test, expect } from "@jest/globals"; import { StringOutputParser } from "../../output_parsers/string.js"; import { FakeLLM } from "../../utils/testing/index.js"; import { PromptTemplate } from "../../prompts/prompt.js"; import { CommaSeparatedListOutputParser } from "../../output_parsers/list.js"; test("Test graph single runnable", async () => { const jsonOutputParser = new StringOutputParser(); const graph = jsonOutputParser.getGraph(); const firstNode = graph.firstNode(); expect(firstNode).not.toBeNull(); const lastNode = graph.lastNode(); expect(lastNode).not.toBeNull(); expect(graph.edges.length).toBe(2); expect(Object.keys(graph.nodes).length).toBe(3); }); test("Test graph sequence", async () => { const llm = new FakeLLM({}); const prompt = PromptTemplate.fromTemplate("Hello, {name}!"); const listParser = new CommaSeparatedListOutputParser(); const sequence = prompt.pipe(llm).pipe(listParser); const graph = sequence.getGraph(); const firstNode = graph.firstNode(); expect(firstNode).not.toBeNull(); const lastNode = graph.lastNode(); expect(lastNode).not.toBeNull(); expect(graph.edges.length).toBe(4); expect(Object.keys(graph.nodes).length).toBe(5); expect(graph.toJSON()).toStrictEqual({ nodes: [ { id: 0, type: "schema", data: { title: "PromptTemplateInput", $schema: "http://json-schema.org/draft-07/schema#", }, }, { id: 1, type: "runnable", data: { id: ["langchain_core", "prompts", "prompt", "PromptTemplate"], name: "PromptTemplate", }, }, { id: 2, type: "runnable", data: { id: ["langchain", "llms", "fake", "FakeLLM"], name: "FakeLLM", }, }, { id: 3, type: "runnable", data: { id: [ "langchain_core", "output_parsers", "list", "CommaSeparatedListOutputParser", ], name: "CommaSeparatedListOutputParser", }, }, { id: 4, type: "schema", data: { title: "CommaSeparatedListOutputParserOutput", $schema: "http://json-schema.org/draft-07/schema#", }, }, ], edges: [ { source: 0, target: 1 }, { source: 1, target: 2 }, { source: 3, target: 4 }, { source: 2, target: 3 }, ], }); expect(graph.drawMermaid()) .toEqual(`%%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; \tPromptTemplateInput([PromptTemplateInput]):::first \tPromptTemplate(PromptTemplate) \tFakeLLM(FakeLLM) \tCommaSeparatedListOutputParser(CommaSeparatedListOutputParser) \tCommaSeparatedListOutputParserOutput([CommaSeparatedListOutputParserOutput]):::last \tPromptTemplateInput --> PromptTemplate; \tPromptTemplate --> FakeLLM; \tCommaSeparatedListOutputParser --> CommaSeparatedListOutputParserOutput; \tFakeLLM --> CommaSeparatedListOutputParser; \tclassDef default fill:#f2f0ff,line-height:1.2; \tclassDef first fill-opacity:0; \tclassDef last fill:#bfb6fc; `); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable.test.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable @typescript-eslint/no-explicit-any */ /* eslint-disable @typescript-eslint/no-unused-vars */ import { Run } from "langsmith"; import { v4 as uuidv4 } from "uuid"; import { jest, test, expect, describe } from "@jest/globals"; import { createChatMessageChunkEncoderStream } from "../../language_models/chat_models.js"; import { BaseMessage, HumanMessage } from "../../messages/index.js"; import { OutputParserException } from "../../output_parsers/base.js"; import { StringOutputParser } from "../../output_parsers/string.js"; import { ChatPromptTemplate, SystemMessagePromptTemplate, } from "../../prompts/chat.js"; import { PromptTemplate } from "../../prompts/prompt.js"; import { FakeLLM, FakeChatModel, FakeStreamingLLM, FakeSplitIntoListParser, FakeRunnable, FakeListChatModel, SingleRunExtractor, FakeStreamingChatModel, } from "../../utils/testing/index.js"; import { RunnableSequence, RunnableLambda } from "../base.js"; import { RouterRunnable } from "../router.js"; import { RunnableConfig } from "../config.js"; import { JsonOutputParser } from "../../output_parsers/json.js"; test("Test batch", async () => { const llm = new FakeLLM({}); const results = await llm.batch(["Hi there!", "Hey hey"]); expect(results.length).toBe(2); }); test("Test stream", async () => { const llm = new FakeLLM({}); const stream = await llm.stream("Hi there!"); const reader = stream .pipeThrough(new TextEncoderStream()) .pipeThrough(new TextDecoderStream()) .getReader(); let done = false; while (!done) { const chunk = await reader.read(); done = chunk.done; } }); test("Test stream with an immediate thrown error", async () => { const llm = new FakeStreamingLLM({ thrownErrorString: "testing", }); try { await llm.stream("Hi there!"); } catch (e: any) { expect(e.message).toEqual("testing"); } }); test("Test chat model stream", async () => { const llm = new FakeChatModel({}); const stream = await llm.stream("Hi there!"); const reader = stream .pipeThrough(createChatMessageChunkEncoderStream()) .pipeThrough(new TextDecoderStream()) .getReader(); let done = false; while (!done) { const chunk = await reader.read(); console.log(chunk); done = chunk.done; } }); test("Pipe from one runnable to the next", async () => { const promptTemplate = PromptTemplate.fromTemplate("{input}"); const llm = new FakeLLM({}); const runnable = promptTemplate.pipe(llm); const result = await runnable.invoke({ input: "Hello world!" }); console.log(result); expect(result).toBe("Hello world!"); }); test("Stream the entire way through", async () => { const llm = new FakeStreamingLLM({}); const stream = await llm.pipe(new StringOutputParser()).stream("Hi there!"); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); console.log(chunk); } expect(chunks.length).toEqual("Hi there!".length); expect(chunks.join("")).toEqual("Hi there!"); }); test("Callback order with transform streaming", async () => { const prompt = ChatPromptTemplate.fromTemplate(`{input}`); const llm = new FakeStreamingLLM({}); const order: string[] = []; const stream = await prompt .pipe(llm) .pipe(new StringOutputParser()) .stream( { input: "Hi there!" }, { callbacks: [ { handleChainStart: (chain) => order.push(chain.id[chain.id.length - 1]), handleLLMStart: (llm) => order.push(llm.id[llm.id.length - 1]), }, ], } ); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); console.log(chunk); } expect(order).toEqual([ "RunnableSequence", "ChatPromptTemplate", "FakeStreamingLLM", "StrOutputParser", ]); expect(chunks.length).toEqual("Human: Hi there!".length); expect(chunks.join("")).toEqual("Human: Hi there!"); }); test("Don't use intermediate streaming", async () => { const llm = new FakeStreamingLLM({}); const stream = await llm .pipe(new StringOutputParser()) .pipe(new FakeLLM({})) .stream("Hi there!"); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); console.log(chunk); } expect(chunks.length).toEqual(1); expect(chunks[0]).toEqual("Hi there!"); }); test("Router runnables", async () => { const mathLLM = new FakeLLM({}); mathLLM.response = "I am a math genius!"; const chain1 = PromptTemplate.fromTemplate( "You are a math genius. Answer the question: {question}" ).pipe(mathLLM); const englishLLM = new FakeLLM({}); englishLLM.response = "I am an English genius!"; const chain2 = PromptTemplate.fromTemplate( "You are an english major. Answer the question: {question}" ).pipe(englishLLM); const router = new RouterRunnable({ runnables: { math: chain1, english: chain2 }, }); type RouterChainInput = { key: string; question: string; }; const chain = RunnableSequence.from([ { key: (x: RouterChainInput) => x.key, input: { question: (x: RouterChainInput) => x.question }, }, router, ]); const result = await chain.invoke({ key: "math", question: "2 + 2" }); expect(result).toEqual("I am a math genius!"); const result2 = await chain.batch([ { key: "math", question: "2 + 2", }, { key: "english", question: "2 + 2", }, ]); expect(result2).toEqual(["I am a math genius!", "I am an English genius!"]); }); test("RunnableLambda that returns a runnable should invoke the runnable", async () => { const runnable = new RunnableLambda({ func: () => new RunnableLambda({ func: () => "testing", }), }); const result = await runnable.invoke({}); expect(result).toEqual("testing"); }); test("RunnableLambda that returns an async iterator should consume it", async () => { const runnable = new RunnableLambda({ async *func() { yield "test"; yield "ing"; }, }); const result = await runnable.invoke({}); expect(result).toEqual("testing"); const chunks = []; const stream = await runnable.stream({}); for await (const chunk of stream) { chunks.push(chunk); } expect(chunks).toEqual(["test", "ing"]); }); test("RunnableLambda that returns an async iterable should consume it", async () => { const runnable = new RunnableLambda({ func() { return new ReadableStream({ async start(controller) { controller.enqueue("test"); controller.enqueue("ing"); controller.close(); }, }); }, }); const result = await runnable.invoke({}); expect(result).toEqual("testing"); const chunks = []; const stream = await runnable.stream({}); for await (const chunk of stream) { chunks.push(chunk); } expect(chunks).toEqual(["test", "ing"]); }); test("RunnableLambda that returns a promise for async iterable should consume it", async () => { const runnable = new RunnableLambda({ async func() { return new ReadableStream({ async start(controller) { controller.enqueue("test"); controller.enqueue("ing"); controller.close(); }, }); }, }); const result = await runnable.invoke({}); expect(result).toEqual("testing"); const chunks = []; const stream = await runnable.stream({}); for await (const chunk of stream) { chunks.push(chunk); } expect(chunks).toEqual(["test", "ing"]); }); test("RunnableLambda that returns an iterator should consume it", async () => { const runnable = new RunnableLambda({ *func() { yield "test"; yield "ing"; }, }); const result = await runnable.invoke({}); expect(result).toEqual("testing"); const chunks = []; const stream = await runnable.stream({}); for await (const chunk of stream) { chunks.push(chunk); } expect(chunks).toEqual(["test", "ing"]); }); test("RunnableLambda that returns a streaming runnable should stream output from the inner runnable", async () => { const runnable = new RunnableLambda({ func: () => new FakeStreamingLLM({}), }); const stream = await runnable.stream("hello"); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks).toEqual(["h", "e", "l", "l", "o"]); }); test("RunnableEach", async () => { const parser = new FakeSplitIntoListParser(); expect(await parser.invoke("first item, second item")).toEqual([ "first item", "second item", ]); expect(await parser.map().invoke(["a, b", "c"])).toEqual([["a", "b"], ["c"]]); expect( await parser .map() .map() .invoke([["a, b", "c"], ["c, e"]]) ).toEqual([[["a", "b"], ["c"]], [["c", "e"]]]); }); test("Runnable withConfig", async () => { const fake = new FakeRunnable({ returnOptions: true, }); const result = await fake.withConfig({ tags: ["a-tag"] }).invoke("hello"); expect(result.tags).toEqual(["a-tag"]); const stream = await fake .withConfig({ metadata: { a: "b", b: "c", }, tags: ["a-tag"], }) .stream("hi", { tags: ["b-tag"], metadata: { a: "updated" } }); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toEqual(1); expect(chunks[0]?.tags).toEqual(["a-tag", "b-tag"]); expect(chunks[0]?.metadata).toEqual({ a: "updated", b: "c" }); }); test("Listeners work", async () => { const prompt = ChatPromptTemplate.fromMessages([ SystemMessagePromptTemplate.fromTemplate("You are a nice assistant."), ["human", "{question}"], ]); const model = new FakeListChatModel({ responses: ["foo"], }); const chain = prompt.pipe(model); const mockStart = jest.fn(); const mockEnd = jest.fn(); await chain .withListeners({ onStart: (run: Run) => { mockStart(run); }, onEnd: (run: Run) => { mockEnd(run); }, }) .invoke({ question: "What is the meaning of life?" }); expect(mockStart).toHaveBeenCalledTimes(1); expect((mockStart.mock.calls[0][0] as { name: string }).name).toBe( "RunnableSequence" ); expect(mockEnd).toHaveBeenCalledTimes(1); }); test("Listeners work with async handlers", async () => { const prompt = ChatPromptTemplate.fromMessages([ SystemMessagePromptTemplate.fromTemplate("You are a nice assistant."), ["human", "{question}"], ]); const model = new FakeListChatModel({ responses: ["foo"], }); const chain = prompt.pipe(model); const mockStart = jest.fn(); const mockEnd = jest.fn(); await chain .withListeners({ onStart: async (run: Run) => { const promise = new Promise((resolve) => setTimeout(resolve, 2000)); await promise; mockStart(run); }, // eslint-disable-next-line @typescript-eslint/no-misused-promises onEnd: async (run: Run) => { const promise = new Promise((resolve) => setTimeout(resolve, 2000)); await promise; mockEnd(run); }, }) .invoke({ question: "What is the meaning of life?" }); expect(mockStart).toHaveBeenCalledTimes(1); expect((mockStart.mock.calls[0][0] as { name: string }).name).toBe( "RunnableSequence" ); expect(mockEnd).toHaveBeenCalledTimes(1); }); test("Create a runnable sequence and run it", async () => { const promptTemplate = PromptTemplate.fromTemplate("{input}"); const llm = new FakeChatModel({}); const parser = new StringOutputParser(); const text = `Jello world`; const runnable = promptTemplate.pipe(llm).pipe(parser); const result = await runnable.invoke({ input: text }); console.log(result); expect(result).toEqual("Jello world"); }); test("Create a runnable sequence with a static method with invalid output and catch the error", async () => { const promptTemplate = PromptTemplate.fromTemplate("{input}"); const llm = new FakeChatModel({}); const parser = (input: BaseMessage) => { console.log(input); try { const parsedInput = typeof input.content === "string" ? JSON.parse(input.content) : input.content; if ( !("outputValue" in parsedInput) || parsedInput.outputValue !== "Hello sequence!" ) { throw new Error("Test failed!"); } else { return input; } } catch (e) { throw new OutputParserException("Invalid output"); } }; const runnable = RunnableSequence.from([promptTemplate, llm, parser]); let error: any | undefined; try { await runnable.invoke({ input: "Hello sequence!" }); } catch (e: any) { error = e; } expect(error).toBeInstanceOf(OutputParserException); expect(error?.lc_error_code).toEqual("OUTPUT_PARSING_FAILURE"); }); test("Create a runnable sequence with a static method with no tags", async () => { const seq = RunnableSequence.from([() => "foo", () => "bar"], { omitSequenceTags: true, }); const events = []; for await (const event of seq.streamEvents({}, { version: "v2" })) { events.push(event); } expect(events.length).toBeGreaterThan(1); for (const event of events) { expect(event.tags?.find((tag) => tag.startsWith("seq:"))).toBeUndefined(); } }); test("RunnableSequence can pass config to every step in batched request", async () => { let numSeen = 0; const addOne = (x: number, options?: RunnableConfig) => { if (options?.configurable?.isPresent === true) { numSeen += 1; } return x + 1; }; const addTwo = (x: number, options?: RunnableConfig) => { if (options?.configurable?.isPresent === true) { numSeen += 1; } return x + 2; }; const addThree = (x: number, options?: RunnableConfig) => { if (options?.configurable?.isPresent === true) { numSeen += 1; } return x + 3; }; const sequence = RunnableSequence.from([addOne, addTwo, addThree]); await sequence.batch([1], { configurable: { isPresent: true, }, }); expect(numSeen).toBe(3); }); test("Should aggregate properly", async () => { const model = new FakeStreamingLLM({ responses: [ `{"countries": [{"name": "France", "population": 67391582}, {"name": "Spain", "population": 46754778}, {"name": "Japan", "population": 126476461}]}`, ], }); // A function that does not operates on input streams and breaks streaming. const extractCountryNames = (inputs: Record<string, any>) => { if (!Array.isArray(inputs.countries)) { return ""; } return inputs.countries.map((country) => country.name); }; const chain = model.pipe(new JsonOutputParser()).pipe(extractCountryNames); const stream = await chain.stream( `output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of "countries" which contains a list of countries. Each country should have the key "name" and "population"` ); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toEqual(1); expect(chunks[0]).toEqual(["France", "Spain", "Japan"]); }); describe("runId config", () => { test("invoke", async () => { const tracer = new SingleRunExtractor(); const llm = new FakeChatModel({}); const testId = uuidv4(); await llm.invoke("gg", { callbacks: [tracer], runId: testId, }); const run = await tracer.extract(); expect(run.id).toBe(testId); }); test("batch", async () => { jest.spyOn(console, "warn").mockImplementation(() => {}); const tracer = new SingleRunExtractor(); const llm = new FakeChatModel({}); const message = new HumanMessage("hello world"); const testId = uuidv4(); const res = await llm.batch([[message], [message]], { callbacks: [tracer], runId: testId, }); const run = await tracer.extract(); expect(run.id).toBe(testId); expect(res.length).toBe(2); // .batch will warn if a runId is passed // along with multiple messages expect(console.warn).toBeCalled(); }); test("stream", async () => { const tracer = new SingleRunExtractor(); const llm = new FakeStreamingChatModel({}); const testId = uuidv4(); const stream = await llm.stream("gg", { callbacks: [tracer], runId: testId, }); for await (const _ of stream) { // no-op } const run = await tracer.extract(); expect(run.id).toBe(testId); }); test("stream (via llm)", async () => { const tracer = new SingleRunExtractor(); const llm = new FakeStreamingLLM({}); const testId = uuidv4(); const stream = await llm.stream("gg", { callbacks: [tracer], runId: testId, }); for await (const _ of stream) { // no-op } const run = await tracer.extract(); expect(run.id).toBe(testId); }); test("invoke (via llm)", async () => { const tracer = new SingleRunExtractor(); const llm = new FakeLLM({}); const testId = uuidv4(); await llm.invoke("gg", { callbacks: [tracer], runId: testId, }); const run = await tracer.extract(); expect(run.id).toBe(testId); }); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_branch.test.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { test, expect } from "@jest/globals"; import { RunnableBranch } from "../branch.js"; import { ChatPromptTemplate } from "../../prompts/chat.js"; import { FakeStreamingLLM } from "../../utils/testing/index.js"; import { RunnableSequence } from "../base.js"; import { StringOutputParser } from "../../output_parsers/string.js"; test("RunnableBranch invoke", async () => { const condition = (x: number) => x > 0; const add = (x: number) => x + 1; const subtract = (x: number) => x - 1; const branch = RunnableBranch.from([ [condition, add], [condition, add], subtract, ]); const result = await branch.invoke(1); expect(result).toEqual(2); const result2 = await branch.invoke(-1); expect(result2).toEqual(-2); }); test("RunnableBranch batch", async () => { const branch = RunnableBranch.from([ [(x: number) => x > 0 && x < 5, (x: number) => x + 1], [(x: number) => x > 5, (x: number) => x * 10], (x: number) => x - 1, ]); const batchResult = await branch.batch([1, 10, 0]); expect(batchResult).toEqual([2, 100, -1]); }); test("RunnableBranch handles error", async () => { let error; const branch = RunnableBranch.from([ [ (x: string) => x.startsWith("a"), () => { throw new Error("Testing"); }, ], (x) => `${x} passed`, ]); const result = await branch.invoke("branch", { callbacks: [ { handleChainError: (e) => { error = e; }, }, ], }); // If callbacks are backgrounded await new Promise((resolve) => setTimeout(resolve, 1000)); expect(result).toBe("branch passed"); expect(error).toBeUndefined(); await expect(async () => { await branch.invoke("alpha", { callbacks: [ { handleChainError: (e) => { error = e; }, }, ], }); }).rejects.toThrow(); expect(error).toBeDefined(); }); test("RunnableBranch invoke", async () => { const promptTemplate = ChatPromptTemplate.fromTemplate(`{question}`); const model = new FakeStreamingLLM({ sleep: 1, }); const classificationChain = RunnableSequence.from([ promptTemplate, model, new StringOutputParser(), ]); const generalChain = ChatPromptTemplate.fromTemplate(`GENERAL CHAIN`).pipe(model); const langChainChain = ChatPromptTemplate.fromTemplate(`LANGCHAIN CHAIN`).pipe(model); const branch = RunnableBranch.from([ [ (x: { topic: string; question: string }) => x.topic.toLowerCase().includes("langchain"), langChainChain, ], generalChain, ]); const fullChain = RunnableSequence.from([ { topic: classificationChain, question: (input: { question: string }) => input.question, }, branch, new StringOutputParser(), ]); const stream = await fullChain.stream({ question: "How do I use langchain? Explain in one sentence", }); const chunks = []; for await (const chunk of stream) { console.log(chunk); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); expect(chunks.join("")).toContain("LANGCHAIN"); const stream2 = await fullChain.stream({ question: "What is up? Explain in one sentence", }); const chunks2 = []; for await (const chunk of stream2) { console.log(chunk); chunks2.push(chunk); } expect(chunks2.length).toBeGreaterThan(1); expect(chunks2.join("")).toContain("GENERAL"); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_binding.test.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { test, expect } from "@jest/globals"; import { StringOutputParser } from "../../output_parsers/string.js"; import { FakeChatModel, FakeStreamingLLM } from "../../utils/testing/index.js"; test("Bind kwargs to a runnable", async () => { const llm = new FakeChatModel({}); const result = await llm .bind({ stop: ["testing"] }) .pipe(new StringOutputParser()) .invoke("Hi there!"); console.log(result); expect(result).toEqual("testing"); }); test("Bind kwargs to a runnable with a batch call", async () => { const llm = new FakeChatModel({}); const result = await llm .bind({ stop: ["testing"] }) .pipe(new StringOutputParser()) .batch(["Hi there!", "hey hey", "Hi there!", "hey hey"]); console.log(result); expect(result).toEqual(["testing", "testing", "testing", "testing"]); }); test("Stream with RunnableBinding", async () => { const llm = new FakeStreamingLLM({}).bind({ stop: ["dummy"] }); const stream = await llm.pipe(new StringOutputParser()).stream("Hi there!"); const chunks: string[] = []; for await (const chunk of stream) { chunks.push(chunk); console.log(chunk); } expect(chunks.length).toEqual("Hi there!".length); expect(chunks.join("")).toEqual("Hi there!"); }); test("Stream through a RunnableBinding if the bound runnable implements transform", async () => { const llm = new FakeStreamingLLM({}).bind({ stop: ["dummy"] }); const outputParser = new StringOutputParser().bind({ callbacks: [] }); const stream = await llm.pipe(outputParser).stream("Hi there!"); const chunks: string[] = []; for await (const chunk of stream) { chunks.push(chunk); console.log(chunk); } expect(chunks.length).toEqual("Hi there!".length); expect(chunks.join("")).toEqual("Hi there!"); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_history.test.ts
import { test, expect } from "@jest/globals"; import { AIMessage, AIMessageChunk, BaseMessage, HumanMessage, } from "../../messages/index.js"; import { RunnableLambda } from "../base.js"; import { RunnableConfig } from "../config.js"; import { RunnableWithMessageHistory } from "../history.js"; import { BaseChatMessageHistory, BaseListChatMessageHistory, } from "../../chat_history.js"; import { FakeChatMessageHistory, FakeLLM, FakeListChatMessageHistory, FakeListChatModel, FakeStreamingLLM, } from "../../utils/testing/index.js"; import { ChatPromptTemplate, MessagesPlaceholder } from "../../prompts/chat.js"; import { StringOutputParser } from "../../output_parsers/string.js"; const anyString = expect.any(String) as unknown as string; // For `BaseChatMessageHistory` async function getGetSessionHistory(): Promise< (sessionId: string) => Promise<BaseChatMessageHistory> > { const chatHistoryStore: { [key: string]: BaseChatMessageHistory } = {}; async function getSessionHistory( sessionId: string ): Promise<BaseChatMessageHistory> { if (!(sessionId in chatHistoryStore)) { chatHistoryStore[sessionId] = new FakeChatMessageHistory(); } return chatHistoryStore[sessionId]; } return getSessionHistory; } // Extends `BaseListChatMessageHistory` async function getListSessionHistory(): Promise< (sessionId: string) => Promise<BaseListChatMessageHistory> > { const chatHistoryStore: { [key: string]: BaseListChatMessageHistory } = {}; async function getSessionHistory( sessionId: string ): Promise<BaseListChatMessageHistory> { if (!(sessionId in chatHistoryStore)) { chatHistoryStore[sessionId] = new FakeListChatMessageHistory(); } return chatHistoryStore[sessionId]; } return getSessionHistory; } test("Runnable with message history", async () => { const runnable = new RunnableLambda({ func: (messages: BaseMessage[]) => `you said: ${messages .filter((m) => m._getType() === "human") .map((m) => m.content) .join("\n")}`, }); const getMessageHistory = await getGetSessionHistory(); const withHistory = new RunnableWithMessageHistory({ runnable, config: {}, getMessageHistory, }); const config: RunnableConfig = { configurable: { sessionId: "1" } }; let output = await withHistory.invoke([new HumanMessage("hello")], config); expect(output).toBe("you said: hello"); output = await withHistory.invoke([new HumanMessage("good bye")], config); expect(output).toBe("you said: hello\ngood bye"); }); test("Runnable with message history with a chat model", async () => { const runnable = new FakeListChatModel({ responses: ["Hello world!"], }); const getMessageHistory = await getGetSessionHistory(); const withHistory = new RunnableWithMessageHistory({ runnable, config: {}, getMessageHistory, }); const config: RunnableConfig = { configurable: { sessionId: "2" } }; const output = await withHistory.invoke([new HumanMessage("hello")], config); expect(output.content).toBe("Hello world!"); const stream = await withHistory.stream( [new HumanMessage("good bye")], config ); const chunks = []; for await (const chunk of stream) { console.log(chunk); chunks.push(chunk); } expect(chunks.map((chunk) => chunk.content).join("")).toEqual("Hello world!"); const sessionHistory = await getMessageHistory("2"); expect(await sessionHistory.getMessages()).toEqual([ new HumanMessage("hello"), new AIMessage({ id: anyString, content: "Hello world!", }), new HumanMessage("good bye"), new AIMessageChunk({ id: anyString, content: "Hello world!", }), ]); }); test("Runnable with message history with a messages in, messages out chain", async () => { const prompt = ChatPromptTemplate.fromMessages([ ["system", "you are a robot"], ["placeholder", "{messages}"], ]); const model = new FakeListChatModel({ responses: ["So long and thanks for the fish!!"], }); const runnable = prompt.pipe(model); const getMessageHistory = await getGetSessionHistory(); const withHistory = new RunnableWithMessageHistory({ runnable, config: {}, getMessageHistory, }); const config: RunnableConfig = { configurable: { sessionId: "2" } }; const output = await withHistory.invoke([new HumanMessage("hello")], config); expect(output.content).toBe("So long and thanks for the fish!!"); const stream = await withHistory.stream( [new HumanMessage("good bye")], config ); const chunks = []; for await (const chunk of stream) { console.log(chunk); chunks.push(chunk); } expect(chunks.map((chunk) => chunk.content).join("")).toEqual( "So long and thanks for the fish!!" ); const sessionHistory = await getMessageHistory("2"); expect(await sessionHistory.getMessages()).toEqual([ new HumanMessage("hello"), new AIMessage({ id: anyString, content: "So long and thanks for the fish!!", }), new HumanMessage("good bye"), new AIMessageChunk({ id: anyString, content: "So long and thanks for the fish!!", }), ]); }); test("Runnable with message history work with chat list memory", async () => { const runnable = new RunnableLambda({ func: (messages: BaseMessage[]) => `you said: ${messages .filter((m) => m._getType() === "human") .map((m) => m.content) .join("\n")}`, }); const getListMessageHistory = await getListSessionHistory(); const withHistory = new RunnableWithMessageHistory({ runnable, config: {}, getMessageHistory: getListMessageHistory, }); const config: RunnableConfig = { configurable: { sessionId: "3" } }; let output = await withHistory.invoke([new HumanMessage("hello")], config); expect(output).toBe("you said: hello"); output = await withHistory.invoke([new HumanMessage("good bye")], config); expect(output).toBe("you said: hello\ngood bye"); }); test("Runnable with message history and RunnableSequence", async () => { const prompt = ChatPromptTemplate.fromMessages([ ["ai", "You are a helpful assistant"], new MessagesPlaceholder("history"), ["human", "{input}"], ]); const model = new FakeLLM({}); const chain = prompt.pipe(model); const getListMessageHistory = await getListSessionHistory(); const withHistory = new RunnableWithMessageHistory({ runnable: chain, config: {}, getMessageHistory: getListMessageHistory, inputMessagesKey: "input", historyMessagesKey: "history", }); const config: RunnableConfig = { configurable: { sessionId: "4" } }; let output = await withHistory.invoke({ input: "hello" }, config); expect(output).toBe("AI: You are a helpful assistant\nHuman: hello"); output = await withHistory.invoke({ input: "good bye" }, config); expect(output).toBe(`AI: You are a helpful assistant Human: hello AI: AI: You are a helpful assistant Human: hello Human: good bye`); }); test("Runnable with message history should stream through", async () => { const prompt = ChatPromptTemplate.fromMessages([ ["ai", "You are a helpful assistant"], new MessagesPlaceholder("history"), ["human", "{input}"], ]); const model = new FakeStreamingLLM({}); const chain = prompt.pipe(model); const getListMessageHistory = await getListSessionHistory(); const withHistory = new RunnableWithMessageHistory({ runnable: chain, config: {}, getMessageHistory: getListMessageHistory, inputMessagesKey: "input", historyMessagesKey: "history", }).pipe(new StringOutputParser()); const config: RunnableConfig = { configurable: { sessionId: "5" } }; const stream = await withHistory.stream({ input: "hello" }, config); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_stream_events_v2.test.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { test, expect, afterEach } from "@jest/globals"; import { z } from "zod"; import { AsyncLocalStorage } from "node:async_hooks"; import { RunnableLambda, RunnableMap, RunnablePassthrough, RunnablePick, } from "../index.js"; import { ChatPromptTemplate } from "../../prompts/chat.js"; import { FakeChatModel, FakeLLM, FakeListChatModel, FakeRetriever, FakeStreamingLLM, } from "../../utils/testing/index.js"; import { AIMessage, AIMessageChunk, HumanMessage, SystemMessage, } from "../../messages/index.js"; import { DynamicStructuredTool, DynamicTool, tool } from "../../tools/index.js"; import { Document } from "../../documents/document.js"; import { PromptTemplate } from "../../prompts/prompt.js"; import { GenerationChunk } from "../../outputs.js"; // Import from web to avoid top-level side-effects from AsyncLocalStorage import { dispatchCustomEvent } from "../../callbacks/dispatch/web.js"; import { AsyncLocalStorageProviderSingleton } from "../../singletons/index.js"; function reverse(s: string) { // Reverse a string. return s.split("").reverse().join(""); } const originalCallbackValue = process.env.LANGCHAIN_CALLBACKS_BACKGROUND; afterEach(() => { process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalCallbackValue; }); test("Runnable streamEvents method", async () => { const chain = RunnableLambda.from(reverse).withConfig({ runName: "reverse", }); const events = []; const eventStream = await chain.streamEvents("hello", { version: "v2" }); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { data: { input: "hello" }, event: "on_chain_start", metadata: {}, name: "reverse", run_id: expect.any(String), tags: [], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "reverse", run_id: expect.any(String), tags: [], }, { data: { output: "olleh" }, event: "on_chain_end", metadata: {}, name: "reverse", run_id: expect.any(String), tags: [], }, ]); }); test("Runnable streamEvents method on a chat model", async () => { const model = new FakeListChatModel({ responses: ["abc"], }); const events = []; const eventStream = await model.streamEvents("hello", { version: "v2" }); for await (const event of eventStream) { events.push(event); } // used here to avoid casting every ID const anyString = expect.any(String) as unknown as string; expect(events).toMatchObject([ { data: { input: "hello" }, event: "on_chat_model_start", name: "FakeListChatModel", metadata: expect.any(Object), run_id: expect.any(String), tags: [], }, { data: { chunk: new AIMessageChunk({ id: anyString, content: "a" }) }, event: "on_chat_model_stream", name: "FakeListChatModel", metadata: expect.any(Object), run_id: expect.any(String), tags: [], }, { data: { chunk: new AIMessageChunk({ id: anyString, content: "b" }) }, event: "on_chat_model_stream", name: "FakeListChatModel", metadata: expect.any(Object), run_id: expect.any(String), tags: [], }, { data: { chunk: new AIMessageChunk({ id: anyString, content: "c" }) }, event: "on_chat_model_stream", name: "FakeListChatModel", metadata: expect.any(Object), run_id: expect.any(String), tags: [], }, { data: { output: new AIMessageChunk({ id: anyString, content: "abc" }) }, event: "on_chat_model_end", name: "FakeListChatModel", metadata: expect.any(Object), run_id: expect.any(String), tags: [], }, ]); }); test("Runnable streamEvents call nested in another runnable + passed callbacks should still work", async () => { AsyncLocalStorageProviderSingleton.initializeGlobalInstance( new AsyncLocalStorage() ); const model = new FakeListChatModel({ responses: ["abc"], }); const events: any[] = []; const container = RunnableLambda.from(async (_) => { const eventStream = model.streamEvents("hello", { version: "v2" }); for await (const event of eventStream) { events.push(event); } return events; }); await container.invoke({}, { callbacks: [{ handleLLMStart: () => {} }] }); // used here to avoid casting every ID const anyString = expect.any(String) as unknown as string; expect(events).toMatchObject([ { data: { input: "hello" }, event: "on_chat_model_start", name: "FakeListChatModel", metadata: expect.any(Object), run_id: expect.any(String), tags: [], }, { data: { chunk: new AIMessageChunk({ id: anyString, content: "a" }) }, event: "on_chat_model_stream", name: "FakeListChatModel", metadata: expect.any(Object), run_id: expect.any(String), tags: [], }, { data: { chunk: new AIMessageChunk({ id: anyString, content: "b" }) }, event: "on_chat_model_stream", name: "FakeListChatModel", metadata: expect.any(Object), run_id: expect.any(String), tags: [], }, { data: { chunk: new AIMessageChunk({ id: anyString, content: "c" }) }, event: "on_chat_model_stream", name: "FakeListChatModel", metadata: expect.any(Object), run_id: expect.any(String), tags: [], }, { data: { output: new AIMessageChunk({ id: anyString, content: "abc" }) }, event: "on_chat_model_end", name: "FakeListChatModel", metadata: expect.any(Object), run_id: expect.any(String), tags: [], }, ]); }); test("Runnable streamEvents method with three runnables", async () => { const r = RunnableLambda.from(reverse); const chain = r .withConfig({ runName: "1" }) .pipe(r.withConfig({ runName: "2" })) .pipe(r.withConfig({ runName: "3" })); const events = []; const eventStream = await chain.streamEvents("hello", { version: "v2" }); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { data: { input: "hello" }, event: "on_chain_start", metadata: {}, name: "RunnableSequence", run_id: expect.any(String), tags: [], }, { data: {}, event: "on_chain_start", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: {}, event: "on_chain_start", metadata: {}, name: "2", run_id: expect.any(String), tags: ["seq:step:2"], }, { data: { input: "hello", output: "olleh" }, event: "on_chain_end", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: { chunk: "hello" }, event: "on_chain_stream", metadata: {}, name: "2", run_id: expect.any(String), tags: ["seq:step:2"], }, { data: {}, event: "on_chain_start", metadata: {}, name: "3", run_id: expect.any(String), tags: ["seq:step:3"], }, { data: { input: "olleh", output: "hello" }, event: "on_chain_end", metadata: {}, name: "2", run_id: expect.any(String), tags: ["seq:step:2"], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "3", run_id: expect.any(String), tags: ["seq:step:3"], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "RunnableSequence", run_id: expect.any(String), tags: [], }, { data: { input: "hello", output: "olleh" }, event: "on_chain_end", metadata: {}, name: "3", run_id: expect.any(String), tags: ["seq:step:3"], }, { data: { output: "olleh" }, event: "on_chain_end", metadata: {}, name: "RunnableSequence", run_id: expect.any(String), tags: [], }, ]); }); test("Runnable streamEvents method with three runnables with backgrounded callbacks set to true", async () => { process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "true"; const r = RunnableLambda.from(reverse); const chain = r .withConfig({ runName: "1" }) .pipe(r.withConfig({ runName: "2" })) .pipe(r.withConfig({ runName: "3" })); const events = []; const eventStream = await chain.streamEvents("hello", { version: "v2" }); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { data: { input: "hello" }, event: "on_chain_start", metadata: {}, name: "RunnableSequence", run_id: expect.any(String), tags: [], }, { data: {}, event: "on_chain_start", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: {}, event: "on_chain_start", metadata: {}, name: "2", run_id: expect.any(String), tags: ["seq:step:2"], }, { data: { input: "hello", output: "olleh" }, event: "on_chain_end", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: { chunk: "hello" }, event: "on_chain_stream", metadata: {}, name: "2", run_id: expect.any(String), tags: ["seq:step:2"], }, { data: {}, event: "on_chain_start", metadata: {}, name: "3", run_id: expect.any(String), tags: ["seq:step:3"], }, { data: { input: "olleh", output: "hello" }, event: "on_chain_end", metadata: {}, name: "2", run_id: expect.any(String), tags: ["seq:step:2"], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "3", run_id: expect.any(String), tags: ["seq:step:3"], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "RunnableSequence", run_id: expect.any(String), tags: [], }, { data: { input: "hello", output: "olleh" }, event: "on_chain_end", metadata: {}, name: "3", run_id: expect.any(String), tags: ["seq:step:3"], }, { data: { output: "olleh" }, event: "on_chain_end", metadata: {}, name: "RunnableSequence", run_id: expect.any(String), tags: [], }, ]); }); test("Runnable streamEvents method with three runnables with filtering", async () => { const r = RunnableLambda.from(reverse); const chain = r .withConfig({ runName: "1" }) .pipe(r.withConfig({ runName: "2", tags: ["my_tag"] })) .pipe(r.withConfig({ runName: "3", tags: ["my_tag"] })); const events = []; const eventStream = await chain.streamEvents( "hello", { version: "v2" }, { includeNames: ["1"], } ); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { data: { input: "hello" }, event: "on_chain_start", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, { data: { output: "olleh" }, event: "on_chain_end", metadata: {}, name: "1", run_id: expect.any(String), tags: ["seq:step:1"], }, ]); const events2 = []; const eventStream2 = await chain.streamEvents( "hello", { version: "v2" }, { excludeNames: ["2"], includeTags: ["my_tag"], } ); for await (const event of eventStream2) { events2.push(event); } expect(events2).toEqual([ { data: { input: "hello", }, event: "on_chain_start", metadata: {}, name: "3", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:3", "my_tag"]), }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "3", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:3", "my_tag"]), }, { data: { output: "olleh" }, event: "on_chain_end", metadata: {}, name: "3", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:3", "my_tag"]), }, ]); }); test("Runnable streamEvents method with a runnable map", async () => { const r = RunnableLambda.from(reverse); const chain = RunnableMap.from({ reversed: r, original: new RunnablePassthrough(), }).pipe(new RunnablePick("reversed")); const events = []; const eventStream = await chain.streamEvents("hello", { version: "v2" }); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { run_id: expect.any(String), event: "on_chain_start", name: "RunnableSequence", tags: [], metadata: {}, data: { input: "hello" }, }, { event: "on_chain_start", name: "RunnableMap", run_id: expect.any(String), tags: ["seq:step:1"], metadata: {}, data: {}, }, { event: "on_chain_start", name: "RunnableLambda", run_id: expect.any(String), tags: ["map:key:reversed"], metadata: {}, data: {}, }, { event: "on_chain_start", name: "RunnablePassthrough", run_id: expect.any(String), tags: ["map:key:original"], metadata: {}, data: {}, }, { event: "on_chain_stream", name: "RunnablePassthrough", run_id: expect.any(String), tags: ["map:key:original"], metadata: {}, data: { chunk: "hello" }, }, { event: "on_chain_stream", name: "RunnableLambda", run_id: expect.any(String), tags: ["map:key:reversed"], metadata: {}, data: { chunk: "olleh" }, }, { event: "on_chain_stream", name: "RunnableMap", run_id: expect.any(String), tags: ["seq:step:1"], metadata: {}, data: { chunk: { original: "hello", }, }, }, { event: "on_chain_start", name: "RunnablePick", run_id: expect.any(String), tags: ["seq:step:2"], metadata: {}, data: {}, }, { event: "on_chain_stream", name: "RunnableMap", run_id: expect.any(String), tags: ["seq:step:1"], metadata: {}, data: { chunk: { reversed: "olleh", }, }, }, { event: "on_chain_end", name: "RunnablePassthrough", run_id: expect.any(String), tags: ["map:key:original"], metadata: {}, data: { input: "hello", output: "hello" }, }, { event: "on_chain_stream", name: "RunnablePick", run_id: expect.any(String), tags: ["seq:step:2"], metadata: {}, data: { chunk: "olleh" }, }, { event: "on_chain_stream", run_id: expect.any(String), tags: [], metadata: {}, name: "RunnableSequence", data: { chunk: "olleh" }, }, { event: "on_chain_end", name: "RunnableLambda", run_id: expect.any(String), tags: ["map:key:reversed"], metadata: {}, data: { input: "hello", output: "olleh" }, }, { event: "on_chain_end", name: "RunnableMap", run_id: expect.any(String), tags: ["seq:step:1"], metadata: {}, data: { input: "hello", output: { original: "hello", reversed: "olleh", }, }, }, { event: "on_chain_end", name: "RunnablePick", run_id: expect.any(String), tags: ["seq:step:2"], metadata: {}, data: { input: { original: "hello", reversed: "olleh", }, output: "olleh", }, }, { event: "on_chain_end", name: "RunnableSequence", run_id: expect.any(String), tags: [], metadata: {}, data: { output: "olleh" }, }, ]); }); test("Runnable streamEvents method with llm", async () => { const model = new FakeStreamingLLM({ responses: ["hey!"], }).withConfig({ metadata: { a: "b" }, tags: ["my_model"], runName: "my_model", }); const events = []; const eventStream = await model.streamEvents("hello", { version: "v2" }); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { event: "on_llm_start", data: { input: "hello", }, name: "my_model", tags: ["my_model"], run_id: expect.any(String), metadata: { a: "b", }, }, { event: "on_llm_stream", data: { chunk: { text: "h", }, }, run_id: expect.any(String), name: "my_model", tags: ["my_model"], metadata: { a: "b", }, }, { event: "on_llm_stream", data: { chunk: { text: "e", }, }, run_id: expect.any(String), name: "my_model", tags: ["my_model"], metadata: { a: "b", }, }, { event: "on_llm_stream", data: { chunk: { text: "y", }, }, run_id: expect.any(String), name: "my_model", tags: ["my_model"], metadata: { a: "b", }, }, { event: "on_llm_stream", data: { chunk: { text: "!", }, }, run_id: expect.any(String), name: "my_model", tags: ["my_model"], metadata: { a: "b", }, }, { event: "on_llm_end", data: { output: { generations: [ [ { text: "hey!", generationInfo: {}, }, ], ], llmOutput: {}, }, }, run_id: expect.any(String), name: "my_model", tags: ["my_model"], metadata: { a: "b", }, }, ]); }); test("Runnable streamEvents method with chat model chain", async () => { const template = ChatPromptTemplate.fromMessages([ ["system", "You are Godzilla"], ["human", "{question}"], ]).withConfig({ runName: "my_template", tags: ["my_template"], }); const model = new FakeListChatModel({ responses: ["ROAR"], }).withConfig({ metadata: { a: "b" }, tags: ["my_model"], runName: "my_model", }); const chain = template.pipe(model).withConfig({ metadata: { foo: "bar" }, tags: ["my_chain"], runName: "my_chain", }); const events = []; const eventStream = await chain.streamEvents( { question: "hello" }, { version: "v2" } ); for await (const event of eventStream) { events.push(event); } // used here to avoid casting every ID const anyString = expect.any(String) as unknown as string; expect(events).toEqual([ { run_id: expect.any(String), event: "on_chain_start", name: "my_chain", tags: ["my_chain"], metadata: { foo: "bar", }, data: { input: { question: "hello", }, }, }, { data: { input: { question: "hello" } }, event: "on_prompt_start", metadata: { foo: "bar" }, name: "my_template", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "seq:step:1", "my_template"]), }, { event: "on_prompt_end", name: "my_template", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:1", "my_template", "my_chain"]), metadata: { foo: "bar", }, data: { input: { question: "hello", }, output: await template.invoke({ question: "hello" }), }, }, { event: "on_chat_model_start", name: "my_model", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:2", "my_model", "my_chain"]), metadata: { foo: "bar", a: "b", ls_model_type: "chat", ls_provider: model.getName(), ls_stop: undefined, }, data: { input: { messages: [ [new SystemMessage("You are Godzilla"), new HumanMessage("hello")], ], }, }, }, { event: "on_chat_model_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]), metadata: { a: "b", foo: "bar", ls_model_type: "chat", ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", data: { chunk: new AIMessageChunk({ content: "R", id: anyString }) }, }, { event: "on_chain_stream", run_id: expect.any(String), tags: ["my_chain"], metadata: { foo: "bar", }, name: "my_chain", data: { chunk: new AIMessageChunk({ content: "R", id: anyString }) }, }, { event: "on_chat_model_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]), metadata: { a: "b", foo: "bar", ls_model_type: "chat", ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", data: { chunk: new AIMessageChunk({ content: "O", id: anyString }) }, }, { event: "on_chain_stream", run_id: expect.any(String), tags: ["my_chain"], metadata: { foo: "bar", }, name: "my_chain", data: { chunk: new AIMessageChunk({ content: "O", id: anyString }) }, }, { event: "on_chat_model_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]), metadata: { a: "b", foo: "bar", ls_model_type: "chat", ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", data: { chunk: new AIMessageChunk({ content: "A", id: anyString }) }, }, { event: "on_chain_stream", run_id: expect.any(String), tags: ["my_chain"], metadata: { foo: "bar", }, name: "my_chain", data: { chunk: new AIMessageChunk({ content: "A", id: anyString }) }, }, { event: "on_chat_model_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]), metadata: { a: "b", foo: "bar", ls_model_type: "chat", ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", data: { chunk: new AIMessageChunk({ content: "R", id: anyString }) }, }, { event: "on_chain_stream", run_id: expect.any(String), tags: ["my_chain"], metadata: { foo: "bar", }, name: "my_chain", data: { chunk: new AIMessageChunk({ content: "R", id: anyString }) }, }, { event: "on_chat_model_end", name: "my_model", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:2", "my_model", "my_chain"]), metadata: { foo: "bar", a: "b", ls_model_type: "chat", ls_provider: model.getName(), ls_stop: undefined, }, data: { input: { messages: [ [new SystemMessage("You are Godzilla"), new HumanMessage("hello")], ], }, output: new AIMessageChunk({ content: "ROAR", id: anyString }), }, }, { event: "on_chain_end", name: "my_chain", run_id: expect.any(String), tags: ["my_chain"], metadata: { foo: "bar", }, data: { output: new AIMessageChunk({ content: "ROAR", id: anyString }) }, }, ]); }); test("Chat model that supports streaming, but is invoked, should still emit on_stream events", async () => { const template = ChatPromptTemplate.fromMessages([ ["system", "You are Godzilla"], ["human", "{question}"], ]).withConfig({ runName: "my_template", tags: ["my_template"], }); const model = new FakeListChatModel({ responses: ["ROAR"], }).withConfig({ metadata: { a: "b" }, tags: ["my_model"], runName: "my_model", }); const chain = template .pipe(async (val, config) => { const result = await model.invoke(val, config); return result; }) .withConfig({ metadata: { foo: "bar" }, tags: ["my_chain"], runName: "my_chain", }); const events = []; const eventStream = await chain.streamEvents( { question: "hello" }, { version: "v2" } ); for await (const event of eventStream) { events.push(event); } // used here to avoid casting every ID const anyString = expect.any(String) as unknown as string; expect(events).toEqual([ { run_id: expect.any(String), event: "on_chain_start", name: "my_chain", tags: ["my_chain"], metadata: { foo: "bar", }, data: { input: { question: "hello", }, }, }, { data: { input: { question: "hello" } }, event: "on_prompt_start", metadata: { foo: "bar" }, name: "my_template", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "seq:step:1", "my_template"]), }, { event: "on_prompt_end", name: "my_template", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:1", "my_template", "my_chain"]), metadata: { foo: "bar", }, data: { input: { question: "hello", }, output: await template.invoke({ question: "hello" }), }, }, { event: "on_chain_start", data: {}, name: "RunnableLambda", tags: ["seq:step:2", "my_chain"], run_id: expect.any(String), metadata: { foo: "bar", }, }, { event: "on_chat_model_start", name: "my_model", run_id: expect.any(String), tags: expect.arrayContaining(["my_model", "my_chain"]), metadata: { foo: "bar", a: "b", ls_model_type: "chat", ls_stop: undefined, ls_provider: model.getName(), }, data: { input: { messages: [ [new SystemMessage("You are Godzilla"), new HumanMessage("hello")], ], }, }, }, { event: "on_chat_model_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "my_model"]), metadata: { a: "b", foo: "bar", ls_model_type: "chat", ls_stop: undefined, ls_provider: model.getName(), }, name: "my_model", data: { chunk: new AIMessageChunk({ id: anyString, content: "R" }) }, }, { event: "on_chat_model_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "my_model"]), metadata: { a: "b", foo: "bar", ls_model_type: "chat", ls_stop: undefined, ls_provider: model.getName(), }, name: "my_model", data: { chunk: new AIMessageChunk({ id: anyString, content: "O" }) }, }, { event: "on_chat_model_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "my_model"]), metadata: { a: "b", foo: "bar", ls_model_type: "chat", ls_stop: undefined, ls_provider: model.getName(), }, name: "my_model", data: { chunk: new AIMessageChunk({ id: anyString, content: "A" }) }, }, { event: "on_chat_model_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "my_model"]), metadata: { a: "b", foo: "bar", ls_model_type: "chat", ls_stop: undefined, ls_provider: model.getName(), }, name: "my_model", data: { chunk: new AIMessageChunk({ id: anyString, content: "R" }) }, }, { event: "on_chat_model_end", name: "my_model", run_id: expect.any(String), tags: expect.arrayContaining(["my_model", "my_chain"]), metadata: { foo: "bar", a: "b", ls_model_type: "chat", ls_stop: undefined, ls_provider: model.getName(), }, data: { input: { messages: [ [new SystemMessage("You are Godzilla"), new HumanMessage("hello")], ], }, output: new AIMessageChunk({ id: anyString, content: "ROAR" }), }, }, { event: "on_chain_stream", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:2", "my_chain"]), metadata: { foo: "bar", }, name: "RunnableLambda", data: { chunk: new AIMessageChunk({ id: anyString, content: "ROAR" }) }, }, { event: "on_chain_stream", run_id: expect.any(String), tags: ["my_chain"], metadata: { foo: "bar", }, name: "my_chain", data: { chunk: new AIMessageChunk({ id: anyString, content: "ROAR" }) }, }, { event: "on_chain_end", name: "RunnableLambda", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:2", "my_chain"]), metadata: { foo: "bar", }, data: { input: await template.invoke({ question: "hello" }), output: new AIMessageChunk({ id: anyString, content: "ROAR" }), }, }, { event: "on_chain_end", name: "my_chain", run_id: expect.any(String), tags: ["my_chain"], metadata: { foo: "bar", }, data: { output: new AIMessageChunk({ id: anyString, content: "ROAR" }), }, }, ]); }); test("Chat model that doesn't support streaming, but is invoked, should emit one on_stream event", async () => { const template = ChatPromptTemplate.fromMessages([ ["system", "You are Godzilla"], ["human", "{question}"], ]).withConfig({ runName: "my_template", tags: ["my_template"], }); const model = new FakeChatModel({}).withConfig({ metadata: { a: "b" }, tags: ["my_model"], runName: "my_model", }); const chain = template .pipe(async (val, config) => { const result = await model.invoke(val, config); return result; }) .withConfig({ metadata: { foo: "bar" }, tags: ["my_chain"], runName: "my_chain", }); const events = []; const eventStream = await chain.streamEvents( { question: "hello" }, { version: "v2" } ); for await (const event of eventStream) { events.push(event); } const anyString = expect.any(String) as unknown as string; expect(events).toEqual([ { run_id: expect.any(String), event: "on_chain_start", name: "my_chain", tags: ["my_chain"], metadata: { foo: "bar", }, data: { input: { question: "hello", }, }, }, { data: { input: { question: "hello" } }, event: "on_prompt_start", metadata: { foo: "bar" }, name: "my_template", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "seq:step:1", "my_template"]), }, { event: "on_prompt_end", name: "my_template", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:1", "my_template", "my_chain"]), metadata: { foo: "bar", }, data: { input: { question: "hello", }, output: await template.invoke({ question: "hello" }), }, }, { event: "on_chain_start", data: {}, name: "RunnableLambda", tags: ["seq:step:2", "my_chain"], run_id: expect.any(String), metadata: { foo: "bar", }, }, { event: "on_chat_model_start", name: "my_model", run_id: expect.any(String), tags: expect.arrayContaining(["my_model", "my_chain"]), metadata: { foo: "bar", a: "b", ls_model_type: "chat", ls_stop: undefined, ls_provider: model.getName(), }, data: { input: { messages: [ [new SystemMessage("You are Godzilla"), new HumanMessage("hello")], ], }, }, }, { event: "on_chat_model_stream", run_id: expect.any(String), tags: expect.arrayContaining(["my_chain", "my_model"]), metadata: { a: "b", foo: "bar", ls_model_type: "chat", ls_stop: undefined, ls_provider: model.getName(), }, name: "my_model", data: { chunk: new AIMessageChunk({ id: anyString, content: "You are Godzilla\nhello", }), }, }, { event: "on_chat_model_end", name: "my_model", run_id: expect.any(String), tags: expect.arrayContaining(["my_model", "my_chain"]), metadata: { foo: "bar", a: "b", ls_model_type: "chat", ls_stop: undefined, ls_provider: model.getName(), }, data: { input: { messages: [ [new SystemMessage("You are Godzilla"), new HumanMessage("hello")], ], }, output: new AIMessage({ id: anyString, content: "You are Godzilla\nhello", }), }, }, { event: "on_chain_stream", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:2", "my_chain"]), metadata: { foo: "bar", }, name: "RunnableLambda", data: { chunk: new AIMessage({ id: anyString, content: "You are Godzilla\nhello", }), }, }, { event: "on_chain_stream", run_id: expect.any(String), tags: ["my_chain"], metadata: { foo: "bar", }, name: "my_chain", data: { chunk: new AIMessage({ id: anyString, content: "You are Godzilla\nhello", }), }, }, { event: "on_chain_end", name: "RunnableLambda", run_id: expect.any(String), tags: expect.arrayContaining(["seq:step:2", "my_chain"]), metadata: { foo: "bar", }, data: { input: await template.invoke({ question: "hello" }), output: new AIMessage({ id: anyString, content: "You are Godzilla\nhello", }), }, }, { event: "on_chain_end", name: "my_chain", run_id: expect.any(String), tags: ["my_chain"], metadata: { foo: "bar", }, data: { output: new AIMessage({ id: anyString, content: "You are Godzilla\nhello", }), }, }, ]); }); test("LLM that supports streaming, but is invoked, should still emit on_stream events", async () => { const template = PromptTemplate.fromTemplate( `You are Godzilla\n{question}` ).withConfig({ runName: "my_template", tags: ["my_template"], }); const model = new FakeStreamingLLM({ responses: ["ROAR"], }).withConfig({ metadata: { a: "b" }, tags: ["my_model"], runName: "my_model", }); const chain = template .pipe(async (val, config) => { const result = await model.invoke(val, config); return result; }) .withConfig({ metadata: { foo: "bar" }, tags: ["my_chain"], runName: "my_chain", }); const events = []; const eventStream = await chain.streamEvents( { question: "hello" }, { version: "v2" } ); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { event: "on_chain_start", data: { input: { question: "hello", }, }, name: "my_chain", tags: ["my_chain"], run_id: expect.any(String), metadata: { foo: "bar", }, }, { event: "on_prompt_start", data: { input: { question: "hello", }, }, name: "my_template", tags: ["seq:step:1", "my_template", "my_chain"], run_id: expect.any(String), metadata: { foo: "bar", }, }, { event: "on_prompt_end", data: { output: await template.invoke({ question: "hello" }), input: { question: "hello", }, }, run_id: expect.any(String), name: "my_template", tags: ["seq:step:1", "my_template", "my_chain"], metadata: { foo: "bar", }, }, { event: "on_chain_start", data: {}, name: "RunnableLambda", tags: ["seq:step:2", "my_chain"], run_id: expect.any(String), metadata: { foo: "bar", }, }, { event: "on_llm_start", data: { input: { prompts: ["You are Godzilla\nhello"], }, }, name: "my_model", tags: ["my_model", "my_chain"], run_id: expect.any(String), metadata: { foo: "bar", a: "b", }, }, { event: "on_llm_stream", data: { chunk: new GenerationChunk({ text: "R", }), }, run_id: expect.any(String), name: "my_model", tags: ["my_model", "my_chain"], metadata: { foo: "bar", a: "b", }, }, { event: "on_llm_stream", data: { chunk: new GenerationChunk({ text: "O", }), }, run_id: expect.any(String), name: "my_model", tags: ["my_model", "my_chain"], metadata: { foo: "bar", a: "b", }, }, { event: "on_llm_stream", data: { chunk: new GenerationChunk({ text: "A", }), }, run_id: expect.any(String), name: "my_model", tags: ["my_model", "my_chain"], metadata: { foo: "bar", a: "b", }, }, { event: "on_llm_stream", data: { chunk: new GenerationChunk({ text: "R", }), }, run_id: expect.any(String), name: "my_model", tags: ["my_model", "my_chain"], metadata: { foo: "bar", a: "b", }, }, { event: "on_llm_end", data: { output: { generations: [ [ { text: "ROAR", generationInfo: {}, }, ], ], llmOutput: {}, }, input: { prompts: ["You are Godzilla\nhello"], }, }, run_id: expect.any(String), name: "my_model", tags: ["my_model", "my_chain"], metadata: { foo: "bar", a: "b", }, }, { event: "on_chain_stream", run_id: expect.any(String), name: "RunnableLambda", tags: ["seq:step:2", "my_chain"], metadata: { foo: "bar", }, data: { chunk: "ROAR", }, }, { event: "on_chain_stream", run_id: expect.any(String), name: "my_chain", tags: ["my_chain"], metadata: { foo: "bar", }, data: { chunk: "ROAR", }, }, { event: "on_chain_end", data: { output: "ROAR", input: await template.invoke({ question: "hello" }), }, run_id: expect.any(String), name: "RunnableLambda", tags: ["seq:step:2", "my_chain"], metadata: { foo: "bar", }, }, { event: "on_chain_end", data: { output: "ROAR", }, run_id: expect.any(String), name: "my_chain", tags: ["my_chain"], metadata: { foo: "bar", }, }, ]); }); test("LLM that doesn't support streaming, but is invoked, should emit one on_stream event", async () => { const template = PromptTemplate.fromTemplate( `You are Godzilla\n{question}` ).withConfig({ runName: "my_template", tags: ["my_template"], }); const model = new FakeLLM({}).withConfig({ metadata: { a: "b" }, tags: ["my_model"], runName: "my_model", }); const chain = template .pipe(async (val, config) => { const result = await model.invoke(val, config); return result; }) .withConfig({ metadata: { foo: "bar" }, tags: ["my_chain"], runName: "my_chain", }); const events = []; const eventStream = await chain.streamEvents( { question: "hello" }, { version: "v2" } ); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { event: "on_chain_start", data: { input: { question: "hello", }, }, name: "my_chain", tags: ["my_chain"], run_id: expect.any(String), metadata: { foo: "bar", }, }, { event: "on_prompt_start", data: { input: { question: "hello", }, }, name: "my_template", tags: ["seq:step:1", "my_template", "my_chain"], run_id: expect.any(String), metadata: { foo: "bar", }, }, { event: "on_prompt_end", data: { output: await template.invoke({ question: "hello" }), input: { question: "hello", }, }, run_id: expect.any(String), name: "my_template", tags: ["seq:step:1", "my_template", "my_chain"], metadata: { foo: "bar", }, }, { event: "on_chain_start", data: {}, name: "RunnableLambda", tags: ["seq:step:2", "my_chain"], run_id: expect.any(String), metadata: { foo: "bar", }, }, { event: "on_llm_start", data: { input: { prompts: ["You are Godzilla\nhello"], }, }, name: "my_model", tags: ["my_model", "my_chain"], run_id: expect.any(String), metadata: { foo: "bar", a: "b", }, }, { event: "on_llm_stream", data: { chunk: new GenerationChunk({ text: "You are Godzilla\nhello", }), }, run_id: expect.any(String), name: "my_model", tags: ["my_model", "my_chain"], metadata: { foo: "bar", a: "b", }, }, { event: "on_llm_end", data: { output: { generations: [ [ { text: "You are Godzilla\nhello", generationInfo: undefined, }, ], ], llmOutput: {}, }, input: { prompts: ["You are Godzilla\nhello"], }, }, run_id: expect.any(String), name: "my_model", tags: ["my_model", "my_chain"], metadata: { foo: "bar", a: "b", }, }, { event: "on_chain_stream", run_id: expect.any(String), name: "RunnableLambda", tags: ["seq:step:2", "my_chain"], metadata: { foo: "bar", }, data: { chunk: "You are Godzilla\nhello", }, }, { event: "on_chain_stream", run_id: expect.any(String), name: "my_chain", tags: ["my_chain"], metadata: { foo: "bar", }, data: { chunk: "You are Godzilla\nhello", }, }, { event: "on_chain_end", data: { output: "You are Godzilla\nhello", input: await template.invoke({ question: "hello" }), }, run_id: expect.any(String), name: "RunnableLambda", tags: ["seq:step:2", "my_chain"], metadata: { foo: "bar", }, }, { event: "on_chain_end", data: { output: "You are Godzilla\nhello", }, run_id: expect.any(String), name: "my_chain", tags: ["my_chain"], metadata: { foo: "bar", }, }, ]); }); test("Runnable streamEvents method with simple tools", async () => { const tool = new DynamicTool({ func: async () => "hello", name: "parameterless", description: "A tool that does nothing", }); const events = []; const eventStream = await tool.streamEvents({}, { version: "v2" }); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { data: { input: {} }, event: "on_tool_start", metadata: {}, name: "parameterless", run_id: expect.any(String), tags: [], }, { data: { output: "hello" }, event: "on_tool_end", metadata: {}, name: "parameterless", run_id: expect.any(String), tags: [], }, ]); const toolWithParams = new DynamicStructuredTool({ func: async (params: { x: number; y: string }) => JSON.stringify({ x: params.x, y: params.y }), schema: z.object({ x: z.number(), y: z.string(), }), name: "with_parameters", description: "A tool that does nothing", }); const events2 = []; const eventStream2 = await toolWithParams.streamEvents( { x: 1, y: "2" }, { version: "v2" } ); for await (const event of eventStream2) { events2.push(event); } expect(events2).toEqual([ { data: { input: { x: 1, y: "2" } }, event: "on_tool_start", metadata: {}, name: "with_parameters", run_id: expect.any(String), tags: [], }, { data: { output: JSON.stringify({ x: 1, y: "2" }) }, event: "on_tool_end", metadata: {}, name: "with_parameters", run_id: expect.any(String), tags: [], }, ]); }); test("Runnable streamEvents method with a custom event", async () => { const lambda = RunnableLambda.from( async (params: { x: number; y: string }, config) => { await dispatchCustomEvent("testEvent", { someval: "test" }, config); await dispatchCustomEvent("testEvent", { someval: "test2" }, config); return JSON.stringify({ x: params.x, y: params.y }); } ); const events = []; const eventStream = await lambda.streamEvents( { x: 1, y: "2" }, { version: "v2" } ); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { event: "on_chain_start", data: { input: { x: 1, y: "2" } }, name: "RunnableLambda", tags: [], run_id: expect.any(String), metadata: {}, }, { event: "on_custom_event", run_id: expect.any(String), name: "testEvent", tags: [], metadata: {}, data: { someval: "test" }, }, { event: "on_custom_event", run_id: expect.any(String), name: "testEvent", tags: [], metadata: {}, data: { someval: "test2" }, }, { event: "on_chain_stream", run_id: expect.any(String), name: "RunnableLambda", tags: [], metadata: {}, data: { chunk: '{"x":1,"y":"2"}' }, }, { event: "on_chain_end", data: { output: '{"x":1,"y":"2"}' }, run_id: expect.any(String), name: "RunnableLambda", tags: [], metadata: {}, }, ]); }); test("Custom event inside a custom tool", async () => { const customTool = tool( async (params: { x: number; y: string }, config) => { await dispatchCustomEvent("testEvent", { someval: "test" }, config); await dispatchCustomEvent("testEvent", { someval: "test2" }, config); return JSON.stringify({ x: params.x, y: params.y }); }, { schema: z.object({ x: z.number(), y: z.string() }), name: "testtool", } ); const events = []; const eventStream = await customTool.streamEvents( { x: 1, y: "2" }, { version: "v2" } ); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { event: "on_tool_start", data: { input: { x: 1, y: "2" } }, name: "testtool", tags: [], run_id: expect.any(String), metadata: {}, }, { event: "on_custom_event", run_id: expect.any(String), name: "testEvent", tags: [], metadata: {}, data: { someval: "test" }, }, { event: "on_custom_event", run_id: expect.any(String), name: "testEvent", tags: [], metadata: {}, data: { someval: "test2" }, }, { event: "on_tool_end", data: { output: '{"x":1,"y":"2"}' }, run_id: expect.any(String), name: "testtool", tags: [], metadata: {}, }, ]); }); test("Runnable streamEvents method with tools that return objects", async () => { const adderFunc = (_params: { x: number; y: number }) => { return JSON.stringify({ sum: 3 }); }; const parameterlessTool = tool(adderFunc, { name: "parameterless", }); const events = []; const eventStream = parameterlessTool.streamEvents({}, { version: "v2" }); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { data: { input: {} }, event: "on_tool_start", metadata: {}, name: "parameterless", run_id: expect.any(String), tags: [], }, { data: { output: JSON.stringify({ sum: 3 }), }, event: "on_tool_end", metadata: {}, name: "parameterless", run_id: expect.any(String), tags: [], }, ]); const adderTool = tool(adderFunc, { name: "with_parameters", description: "A tool that does nothing", schema: z.object({ x: z.number(), y: z.number(), }), }); const events2 = []; const eventStream2 = adderTool.streamEvents( { x: 1, y: 2 }, { version: "v2" } ); for await (const event of eventStream2) { events2.push(event); } expect(events2).toEqual([ { data: { input: { x: 1, y: 2 } }, event: "on_tool_start", metadata: {}, name: "with_parameters", run_id: expect.any(String), tags: [], }, { data: { output: JSON.stringify({ sum: 3 }) }, event: "on_tool_end", metadata: {}, name: "with_parameters", run_id: expect.any(String), tags: [], }, ]); }); test("Runnable streamEvents method with a retriever", async () => { const retriever = new FakeRetriever({ output: [ new Document({ pageContent: "hello world!", metadata: { foo: "bar" } }), new Document({ pageContent: "goodbye world!", metadata: { food: "spare" }, }), ], }); const events = []; const eventStream = await retriever.streamEvents("hello", { version: "v2", }); for await (const event of eventStream) { events.push(event); } expect(events).toEqual([ { data: { input: "hello", }, event: "on_retriever_start", metadata: {}, name: "FakeRetriever", run_id: expect.any(String), tags: [], }, { data: { output: [ new Document({ pageContent: "hello world!", metadata: { foo: "bar" }, }), new Document({ pageContent: "goodbye world!", metadata: { food: "spare" }, }), ], }, event: "on_retriever_end", metadata: {}, name: "FakeRetriever", run_id: expect.any(String), tags: [], }, ]); }); test("Runnable streamEvents method with text/event-stream encoding", async () => { const chain = RunnableLambda.from(reverse).withConfig({ runName: "reverse", }); const events = []; const eventStream = await chain.streamEvents("hello", { version: "v2", encoding: "text/event-stream", runId: "1234", }); for await (const event of eventStream) { events.push(event); } const decoder = new TextDecoder(); expect(events.length).toEqual(4); const dataEvents = events .slice(0, 3) .map((event) => decoder.decode(event).split("event: data\ndata: ")[1]); const expectedPayloads = [ { data: { input: "hello" }, event: "on_chain_start", metadata: {}, name: "reverse", run_id: "1234", tags: [], }, { data: { chunk: "olleh" }, event: "on_chain_stream", metadata: {}, name: "reverse", run_id: "1234", tags: [], }, { data: { output: "olleh" }, event: "on_chain_end", metadata: {}, name: "reverse", run_id: "1234", tags: [], }, ]; for (let i = 0; i < dataEvents.length; i += 1) { expect(dataEvents[i].endsWith("\n\n")).toBe(true); expect(JSON.parse(dataEvents[i].replace("\n\n", ""))).toEqual( expectedPayloads[i] ); } expect(decoder.decode(events[3])).toEqual("event: end\n\n"); }); test("Runnable streamEvents method should respect passed signal", async () => { const r = RunnableLambda.from(reverse); const chain = r .withConfig({ runName: "1" }) .pipe(r.withConfig({ runName: "2" })) .pipe(r.withConfig({ runName: "3" })); const controller = new AbortController(); const eventStream = await chain.streamEvents("hello", { version: "v2", signal: controller.signal, }); await expect(async () => { for await (const _ of eventStream) { // Abort after the first chunk controller.abort(); } }).rejects.toThrowError(); });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/signal.test.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { test, describe, expect } from "@jest/globals"; import { Runnable, RunnableLambda, RunnableMap, RunnablePassthrough, RunnableSequence, RunnableWithMessageHistory, } from "../index.js"; import { FakeChatMessageHistory, FakeListChatModel, } from "../../utils/testing/index.js"; import { StringOutputParser } from "../../output_parsers/string.js"; import { Document } from "../../documents/document.js"; import { ChatPromptTemplate } from "../../prompts/chat.js"; const chatModel = new FakeListChatModel({ responses: ["hey"], sleep: 500 }); const TEST_CASES = { map: { runnable: RunnableMap.from({ question: new RunnablePassthrough(), context: async () => { await new Promise((resolve) => setTimeout(resolve, 500)); return "SOME STUFF"; }, }), input: "testing", }, binding: { runnable: RunnableLambda.from( () => new Promise((resolve) => setTimeout(resolve, 500)) ), input: "testing", }, fallbacks: { runnable: chatModel .bind({ thrownErrorString: "expected" }) .withFallbacks({ fallbacks: [chatModel] }), input: "testing", skipStream: true, }, sequence: { runnable: RunnableSequence.from([ RunnablePassthrough.assign({ test: () => chatModel, }), () => {}, ]), input: { question: "testing" }, }, lambda: { runnable: RunnableLambda.from( () => new Promise((resolve) => setTimeout(resolve, 500)) ), input: {}, }, history: { runnable: new RunnableWithMessageHistory({ runnable: chatModel, config: {}, getMessageHistory: () => new FakeChatMessageHistory(), }), input: "testing", }, }; describe.each(Object.keys(TEST_CASES))("Test runnable %s", (name) => { const { runnable, input, skipStream, }: { runnable: Runnable; input: any; skipStream?: boolean } = TEST_CASES[name as keyof typeof TEST_CASES]; test("Test invoke with signal", async () => { await expect(async () => { const controller = new AbortController(); await Promise.all([ runnable.invoke(input, { signal: controller.signal, }), new Promise<void>((resolve) => { controller.abort(); resolve(); }), ]); }).rejects.toThrowError(); }); test("Test invoke with signal with a delay", async () => { await expect(async () => { const controller = new AbortController(); await Promise.all([ runnable.invoke(input, { signal: controller.signal, }), new Promise<void>((resolve) => { setTimeout(() => { controller.abort(); resolve(); }, 250); }), ]); }).rejects.toThrowError(); }); test("Test stream with signal", async () => { if (skipStream) { return; } const controller = new AbortController(); await expect(async () => { const stream = await runnable.stream(input, { signal: controller.signal, }); for await (const _ of stream) { controller.abort(); } }).rejects.toThrowError(); }); test("Test batch with signal", async () => { await expect(async () => { const controller = new AbortController(); await Promise.all([ runnable.batch([input, input], { signal: controller.signal, }), new Promise<void>((resolve) => { controller.abort(); resolve(); }), ]); }).rejects.toThrowError(); }); test("Test batch with signal with a delay", async () => { await expect(async () => { const controller = new AbortController(); await Promise.all([ runnable.batch([input, input], { signal: controller.signal, }), new Promise<void>((resolve) => { setTimeout(() => { controller.abort(); resolve(); }, 250); }), ]); }).rejects.toThrowError(); }); }); test("Should not raise node warning", async () => { const formatDocumentsAsString = (documents: Document[]) => { return documents.map((doc) => doc.pageContent).join("\n\n"); }; const retriever = RunnableLambda.from(() => { return [ new Document({ pageContent: "test1" }), new Document({ pageContent: "test2" }), new Document({ pageContent: "test4" }), new Document({ pageContent: "test5" }), ]; }); const ragChainWithSources = RunnableMap.from({ // Return raw documents here for now since we want to return them at // the end - we'll format in the next step of the chain context: retriever, question: new RunnablePassthrough(), }).assign({ answer: RunnableSequence.from([ (input) => { return { // Now we format the documents as strings for the prompt context: formatDocumentsAsString(input.context as Document[]), question: input.question, }; }, ChatPromptTemplate.fromTemplate("Hello"), new FakeListChatModel({ responses: ["test"] }), new StringOutputParser(), ]), }); const stream = await ragChainWithSources.stream( { question: "What is the capital of France?", }, { signal: new AbortController().signal, } ); for await (const _ of stream) { // console.log(_); } });
0
lc_public_repos/langchainjs/langchain-core/src/runnables
lc_public_repos/langchainjs/langchain-core/src/runnables/tests/runnable_retry.test.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { test, expect } from "@jest/globals"; import { RunnableLambda } from "../base.js"; test("RunnableRetry invoke", async () => { let attemptCount = 0; const runnable = new RunnableLambda({ func: (_thing: unknown) => { attemptCount += 1; if (attemptCount < 3) { throw new Error("TEST ERROR"); } else { return attemptCount; } }, }); const runnableRetry = runnable.withRetry(); const result = await runnableRetry.invoke(""); expect(result).toEqual(3); }); test("RunnableRetry invoke with a failed attempt handler", async () => { let attemptCount = 0; const runnable = new RunnableLambda({ func: (_thing: unknown) => { attemptCount += 1; if (attemptCount < 3) { throw new Error("TEST ERROR"); } else { return attemptCount; } }, }); const runnableRetry = runnable.withRetry({ onFailedAttempt: (error, input) => { expect(error.message).toBe("TEST ERROR"); expect(input).toBe("test"); }, }); const result = await runnableRetry.invoke("test"); expect(result).toEqual(3); }); test("RunnableRetry batch with thrown errors", async () => { const runnable = new RunnableLambda({ func: (_thing: unknown) => { throw new Error("TEST ERROR"); }, }); const runnableRetry = runnable.withRetry({ stopAfterAttempt: 1, }); await expect(async () => { await runnableRetry.batch(["", "", ""]); }).rejects.toThrow(); }); test("RunnableRetry batch with all returned errors", async () => { let attemptCount = 0; const runnable = new RunnableLambda({ func: (_thing: unknown) => { attemptCount += 1; if (attemptCount < 5) { throw new Error("TEST ERROR"); } else { return attemptCount; } }, }); const runnableRetry = runnable.withRetry({ stopAfterAttempt: 1, }); const result = await runnableRetry.batch(["", "", ""], undefined, { returnExceptions: true, }); expect(result).toEqual([ new Error("TEST ERROR"), new Error("TEST ERROR"), new Error("TEST ERROR"), ]); }); test("RunnableRetry batch should not retry successful requests", async () => { let attemptCount = 0; const runnable = new RunnableLambda({ func: (_thing: unknown) => { attemptCount += 1; if (attemptCount < 3) { throw new Error("TEST ERROR"); } else { return attemptCount; } }, }); const runnableRetry = runnable.withRetry({ stopAfterAttempt: 2, }); const result = await runnableRetry.batch(["", "", ""]); expect(attemptCount).toEqual(5); expect(result.sort()).toEqual([3, 4, 5]); }); test("RunnableRetry batch with an onFailedAttempt handler", async () => { let attemptCount = 0; const runnable = new RunnableLambda({ func: (_thing: unknown) => { attemptCount += 1; if (attemptCount < 3) { throw new Error("TEST ERROR"); } else { return attemptCount; } }, }); const runnableRetry = runnable.withRetry({ stopAfterAttempt: 2, onFailedAttempt: (error, input) => { expect(error.message).toEqual("TEST ERROR"); expect(input).toEqual("test1"); }, }); const result = await runnableRetry.batch(["test1", "test2", "test3"]); expect(attemptCount).toEqual(5); expect(result.sort()).toEqual([3, 4, 5]); });
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/types/type-utils.ts
// Utility for marking only some keys of an interface as optional // Compare to Partial<T> which marks all keys as optional export type Optional<T, K extends keyof T> = Omit<T, K> & Partial<Pick<T, K>>;
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/types/stream.ts
// Make this a type to override ReadableStream's async iterator type in case // the popular web-streams-polyfill is imported - the supplied types // in that case don't quite match. export type IterableReadableStreamInterface<T> = ReadableStream<T> & AsyncIterable<T>;
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/load/serializable.ts
import { type SerializedFields, keyToJson, mapKeys } from "./map_keys.js"; export interface BaseSerialized<T extends string> { lc: number; type: T; id: string[]; name?: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any graph?: Record<string, any>; } export interface SerializedConstructor extends BaseSerialized<"constructor"> { kwargs: SerializedFields; } export interface SerializedSecret extends BaseSerialized<"secret"> {} export interface SerializedNotImplemented extends BaseSerialized<"not_implemented"> {} export type Serialized = | SerializedConstructor | SerializedSecret | SerializedNotImplemented; function shallowCopy<T extends object>(obj: T): T { return Array.isArray(obj) ? ([...obj] as T) : ({ ...obj } as T); } function replaceSecrets( root: SerializedFields, secretsMap: { [key: string]: string } ): SerializedFields { const result = shallowCopy(root); for (const [path, secretId] of Object.entries(secretsMap)) { const [last, ...partsReverse] = path.split(".").reverse(); // eslint-disable-next-line @typescript-eslint/no-explicit-any let current: any = result; for (const part of partsReverse.reverse()) { if (current[part] === undefined) { break; } current[part] = shallowCopy(current[part]); current = current[part]; } if (current[last] !== undefined) { current[last] = { lc: 1, type: "secret", id: [secretId], }; } } return result; } /** * Get a unique name for the module, rather than parent class implementations. * Should not be subclassed, subclass lc_name above instead. */ export function get_lc_unique_name( // eslint-disable-next-line @typescript-eslint/no-use-before-define serializableClass: typeof Serializable ): string { // "super" here would refer to the parent class of Serializable, // when we want the parent class of the module actually calling this method. const parentClass = Object.getPrototypeOf(serializableClass); const lcNameIsSubclassed = typeof serializableClass.lc_name === "function" && (typeof parentClass.lc_name !== "function" || serializableClass.lc_name() !== parentClass.lc_name()); if (lcNameIsSubclassed) { return serializableClass.lc_name(); } else { return serializableClass.name; } } export interface SerializableInterface { get lc_id(): string[]; } export abstract class Serializable implements SerializableInterface { lc_serializable = false; lc_kwargs: SerializedFields; /** * A path to the module that contains the class, eg. ["langchain", "llms"] * Usually should be the same as the entrypoint the class is exported from. */ abstract lc_namespace: string[]; /** * The name of the serializable. Override to provide an alias or * to preserve the serialized module name in minified environments. * * Implemented as a static method to support loading logic. */ static lc_name(): string { return this.name; } /** * The final serialized identifier for the module. */ get lc_id(): string[] { return [ ...this.lc_namespace, get_lc_unique_name(this.constructor as typeof Serializable), ]; } /** * A map of secrets, which will be omitted from serialization. * Keys are paths to the secret in constructor args, e.g. "foo.bar.baz". * Values are the secret ids, which will be used when deserializing. */ get lc_secrets(): { [key: string]: string } | undefined { return undefined; } /** * A map of additional attributes to merge with constructor args. * Keys are the attribute names, e.g. "foo". * Values are the attribute values, which will be serialized. * These attributes need to be accepted by the constructor as arguments. */ get lc_attributes(): SerializedFields | undefined { return undefined; } /** * A map of aliases for constructor args. * Keys are the attribute names, e.g. "foo". * Values are the alias that will replace the key in serialization. * This is used to eg. make argument names match Python. */ get lc_aliases(): { [key: string]: string } | undefined { return undefined; } constructor(kwargs?: SerializedFields, ..._args: never[]) { this.lc_kwargs = kwargs || {}; } toJSON(): Serialized { if (!this.lc_serializable) { return this.toJSONNotImplemented(); } if ( // eslint-disable-next-line no-instanceof/no-instanceof this.lc_kwargs instanceof Serializable || typeof this.lc_kwargs !== "object" || Array.isArray(this.lc_kwargs) ) { // We do not support serialization of classes with arg not a POJO // I'm aware the check above isn't as strict as it could be return this.toJSONNotImplemented(); } const aliases: { [key: string]: string } = {}; const secrets: { [key: string]: string } = {}; const kwargs = Object.keys(this.lc_kwargs).reduce((acc, key) => { acc[key] = key in this ? this[key as keyof this] : this.lc_kwargs[key]; return acc; }, {} as SerializedFields); // get secrets, attributes and aliases from all superclasses for ( // eslint-disable-next-line @typescript-eslint/no-this-alias let current = Object.getPrototypeOf(this); current; current = Object.getPrototypeOf(current) ) { Object.assign(aliases, Reflect.get(current, "lc_aliases", this)); Object.assign(secrets, Reflect.get(current, "lc_secrets", this)); Object.assign(kwargs, Reflect.get(current, "lc_attributes", this)); } // include all secrets used, even if not in kwargs, // will be replaced with sentinel value in replaceSecrets Object.keys(secrets).forEach((keyPath) => { // eslint-disable-next-line @typescript-eslint/no-this-alias, @typescript-eslint/no-explicit-any let read: any = this; // eslint-disable-next-line @typescript-eslint/no-explicit-any let write: any = kwargs; const [last, ...partsReverse] = keyPath.split(".").reverse(); for (const key of partsReverse.reverse()) { if (!(key in read) || read[key] === undefined) return; if (!(key in write) || write[key] === undefined) { if (typeof read[key] === "object" && read[key] != null) { write[key] = {}; } else if (Array.isArray(read[key])) { write[key] = []; } } read = read[key]; write = write[key]; } if (last in read && read[last] !== undefined) { write[last] = write[last] || read[last]; } }); return { lc: 1, type: "constructor", id: this.lc_id, kwargs: mapKeys( Object.keys(secrets).length ? replaceSecrets(kwargs, secrets) : kwargs, keyToJson, aliases ), }; } toJSONNotImplemented(): SerializedNotImplemented { return { lc: 1, type: "not_implemented", id: this.lc_id, }; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/load/import_constants.ts
// Auto-generated by `scripts/create-entrypoints.js`. Do not edit manually. export const optionalImportEntrypoints: string[] = [];
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/load/map_keys.ts
import snakeCase from "decamelize"; import camelCase from "camelcase"; export interface SerializedFields { // eslint-disable-next-line @typescript-eslint/no-explicit-any [key: string]: any; } export interface SerializedKeyAlias { [key: string]: string; } export function keyToJson(key: string, map?: SerializedKeyAlias): string { return map?.[key] || snakeCase(key); } export function keyFromJson(key: string, map?: SerializedKeyAlias): string { return map?.[key] || camelCase(key); } export function mapKeys( fields: SerializedFields, mapper: typeof keyToJson, map?: SerializedKeyAlias ): SerializedFields { const mapped: SerializedFields = {}; for (const key in fields) { if (Object.hasOwn(fields, key)) { mapped[mapper(key, map)] = fields[key]; } } return mapped; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/load/import_type.ts
// Auto-generated by `scripts/create-entrypoints.js`. Do not edit manually. export interface OptionalImportMap {} export interface SecretMap { }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/load/import_map.ts
// Auto-generated by `scripts/create-entrypoints.js`. Do not edit manually. export * as agents from "../agents.js"; export * as caches from "../caches/base.js"; export * as callbacks__base from "../callbacks/base.js"; export * as callbacks__manager from "../callbacks/manager.js"; export * as callbacks__promises from "../callbacks/promises.js"; export * as chat_history from "../chat_history.js"; export * as documents from "../documents/index.js"; export * as embeddings from "../embeddings.js"; export * as example_selectors from "../example_selectors/index.js"; export * as language_models__base from "../language_models/base.js"; export * as language_models__chat_models from "../language_models/chat_models.js"; export * as language_models__llms from "../language_models/llms.js"; export * as load__serializable from "../load/serializable.js"; export * as memory from "../memory.js"; export * as messages from "../messages/index.js"; export * as output_parsers from "../output_parsers/index.js"; export * as outputs from "../outputs.js"; export * as prompts from "../prompts/index.js"; export * as prompt_values from "../prompt_values.js"; export * as runnables from "../runnables/index.js"; export * as retrievers from "../retrievers/index.js"; export * as stores from "../stores.js"; export * as tools from "../tools/index.js"; export * as tracers__base from "../tracers/base.js"; export * as tracers__console from "../tracers/console.js"; export * as tracers__initialize from "../tracers/initialize.js"; export * as tracers__log_stream from "../tracers/log_stream.js"; export * as tracers__run_collector from "../tracers/run_collector.js"; export * as tracers__tracer_langchain from "../tracers/tracer_langchain.js"; export * as tracers__tracer_langchain_v1 from "../tracers/tracer_langchain_v1.js"; export * as utils__async_caller from "../utils/async_caller.js"; export * as utils__chunk_array from "../utils/chunk_array.js"; export * as utils__env from "../utils/env.js"; export * as utils__function_calling from "../utils/function_calling.js"; export * as utils__hash from "../utils/hash.js"; export * as utils__json_patch from "../utils/json_patch.js"; export * as utils__json_schema from "../utils/json_schema.js"; export * as utils__math from "../utils/math.js"; export * as utils__stream from "../utils/stream.js"; export * as utils__testing from "../utils/testing/index.js"; export * as utils__tiktoken from "../utils/tiktoken.js"; export * as utils__types from "../utils/types/index.js"; export * as vectorstores from "../vectorstores.js";
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/load/index.ts
import { Serializable, SerializedConstructor, SerializedNotImplemented, SerializedSecret, get_lc_unique_name, } from "./serializable.js"; import { optionalImportEntrypoints as defaultOptionalImportEntrypoints } from "./import_constants.js"; import * as coreImportMap from "./import_map.js"; import type { OptionalImportMap, SecretMap } from "./import_type.js"; import { type SerializedFields, keyFromJson, mapKeys } from "./map_keys.js"; import { getEnvironmentVariable } from "../utils/env.js"; function combineAliasesAndInvert(constructor: typeof Serializable) { const aliases: { [key: string]: string } = {}; for ( // eslint-disable-next-line @typescript-eslint/no-this-alias let current = constructor; current && current.prototype; current = Object.getPrototypeOf(current) ) { Object.assign(aliases, Reflect.get(current.prototype, "lc_aliases")); } return Object.entries(aliases).reduce((acc, [key, value]) => { acc[value] = key; return acc; }, {} as Record<string, string>); } async function reviver( this: { optionalImportsMap?: OptionalImportMap; optionalImportEntrypoints?: string[]; secretsMap?: SecretMap; importMap?: Record<string, unknown>; path?: string[]; }, value: unknown ): Promise<unknown> { const { optionalImportsMap = {}, optionalImportEntrypoints = [], importMap = {}, secretsMap = {}, path = ["$"], } = this; const pathStr = path.join("."); if ( typeof value === "object" && value !== null && !Array.isArray(value) && "lc" in value && "type" in value && "id" in value && value.lc === 1 && value.type === "secret" ) { const serialized = value as SerializedSecret; const [key] = serialized.id; if (key in secretsMap) { return secretsMap[key as keyof SecretMap]; } else { const secretValueInEnv = getEnvironmentVariable(key); if (secretValueInEnv) { return secretValueInEnv; } else { throw new Error( `Missing key "${key}" for ${pathStr} in load(secretsMap={})` ); } } } else if ( typeof value === "object" && value !== null && !Array.isArray(value) && "lc" in value && "type" in value && "id" in value && value.lc === 1 && value.type === "not_implemented" ) { const serialized = value as SerializedNotImplemented; const str = JSON.stringify(serialized); throw new Error( `Trying to load an object that doesn't implement serialization: ${pathStr} -> ${str}` ); } else if ( typeof value === "object" && value !== null && !Array.isArray(value) && "lc" in value && "type" in value && "id" in value && "kwargs" in value && value.lc === 1 ) { const serialized = value as SerializedConstructor; const str = JSON.stringify(serialized); const [name, ...namespaceReverse] = serialized.id.slice().reverse(); const namespace = namespaceReverse.reverse(); const importMaps = { langchain_core: coreImportMap, langchain: importMap }; let module: | (typeof importMaps)["langchain_core"][keyof (typeof importMaps)["langchain_core"]] | (typeof importMaps)["langchain"][keyof (typeof importMaps)["langchain"]] | OptionalImportMap[keyof OptionalImportMap] | null = null; const optionalImportNamespaceAliases = [namespace.join("/")]; if (namespace[0] === "langchain_community") { optionalImportNamespaceAliases.push( ["langchain", ...namespace.slice(1)].join("/") ); } const matchingNamespaceAlias = optionalImportNamespaceAliases.find( (alias) => alias in optionalImportsMap ); if ( defaultOptionalImportEntrypoints .concat(optionalImportEntrypoints) .includes(namespace.join("/")) || matchingNamespaceAlias ) { if (matchingNamespaceAlias !== undefined) { module = await optionalImportsMap[ matchingNamespaceAlias as keyof typeof optionalImportsMap ]; } else { throw new Error( `Missing key "${namespace.join( "/" )}" for ${pathStr} in load(optionalImportsMap={})` ); } } else { let finalImportMap: | (typeof importMaps)["langchain"] | (typeof importMaps)["langchain_core"]; // Currently, we only support langchain and langchain_core imports. if (namespace[0] === "langchain" || namespace[0] === "langchain_core") { finalImportMap = importMaps[namespace[0]]; namespace.shift(); } else { throw new Error(`Invalid namespace: ${pathStr} -> ${str}`); } // The root namespace "langchain" is not a valid import. if (namespace.length === 0) { throw new Error(`Invalid namespace: ${pathStr} -> ${str}`); } // Find the longest matching namespace. let importMapKey: string; do { importMapKey = namespace.join("__"); if (importMapKey in finalImportMap) { break; } else { namespace.pop(); } } while (namespace.length > 0); // If no matching namespace is found, throw an error. if (importMapKey in finalImportMap) { module = finalImportMap[importMapKey as keyof typeof finalImportMap]; } } if (typeof module !== "object" || module === null) { throw new Error(`Invalid namespace: ${pathStr} -> ${str}`); } // Extract the builder from the import map. const builder = // look for a named export with the same name as the class module[name as keyof typeof module] ?? // look for an export with a lc_name property matching the class name // this is necessary for classes that are minified Object.values(module).find( (v) => typeof v === "function" && get_lc_unique_name(v as typeof Serializable) === name ); if (typeof builder !== "function") { throw new Error(`Invalid identifer: ${pathStr} -> ${str}`); } // Recurse on the arguments, which may be serialized objects themselves const kwargs = await reviver.call( { ...this, path: [...path, "kwargs"] }, serialized.kwargs ); // Construct the object if (serialized.type === "constructor") { // eslint-disable-next-line new-cap, @typescript-eslint/no-explicit-any const instance = new (builder as any)( mapKeys( kwargs as SerializedFields, keyFromJson, combineAliasesAndInvert(builder) ) ); // Minification in severless/edge runtimes will mange the // name of classes presented in traces. As the names in import map // are present as-is even with minification, use these names instead Object.defineProperty(instance.constructor, "name", { value: name }); return instance; } else { throw new Error(`Invalid type: ${pathStr} -> ${str}`); } } else if (typeof value === "object" && value !== null) { if (Array.isArray(value)) { return Promise.all( value.map((v, i) => reviver.call({ ...this, path: [...path, `${i}`] }, v) ) ); } else { return Object.fromEntries( await Promise.all( Object.entries(value).map(async ([key, value]) => [ key, await reviver.call({ ...this, path: [...path, key] }, value), ]) ) ); } } return value; } export async function load<T>( text: string, mappings?: { secretsMap?: SecretMap; optionalImportsMap?: OptionalImportMap; optionalImportEntrypoints?: string[]; importMap?: Record<string, unknown>; } ): Promise<T> { const json = JSON.parse(text); return reviver.call({ ...mappings }, json) as Promise<T>; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/caches/base.ts
import { insecureHash } from "../utils/hash.js"; import type { Generation, ChatGeneration } from "../outputs.js"; import { mapStoredMessageToChatMessage } from "../messages/utils.js"; import { type StoredGeneration } from "../messages/base.js"; /** * This cache key should be consistent across all versions of LangChain. * It is currently NOT consistent across versions of LangChain. * * A huge benefit of having a remote cache (like redis) is that you can * access the cache from different processes/machines. The allows you to * separate concerns and scale horizontally. * * TODO: Make cache key consistent across versions of LangChain. */ export const getCacheKey = (...strings: string[]): string => insecureHash(strings.join("_")); export function deserializeStoredGeneration( storedGeneration: StoredGeneration ) { if (storedGeneration.message !== undefined) { return { text: storedGeneration.text, message: mapStoredMessageToChatMessage(storedGeneration.message), }; } else { return { text: storedGeneration.text }; } } export function serializeGeneration(generation: Generation) { const serializedValue: StoredGeneration = { text: generation.text, }; if ((generation as ChatGeneration).message !== undefined) { serializedValue.message = (generation as ChatGeneration).message.toDict(); } return serializedValue; } /** * Base class for all caches. All caches should extend this class. */ export abstract class BaseCache<T = Generation[]> { abstract lookup(prompt: string, llmKey: string): Promise<T | null>; abstract update(prompt: string, llmKey: string, value: T): Promise<void>; } const GLOBAL_MAP = new Map(); /** * A cache for storing LLM generations that stores data in memory. */ export class InMemoryCache<T = Generation[]> extends BaseCache<T> { private cache: Map<string, T>; constructor(map?: Map<string, T>) { super(); this.cache = map ?? new Map(); } /** * Retrieves data from the cache using a prompt and an LLM key. If the * data is not found, it returns null. * @param prompt The prompt used to find the data. * @param llmKey The LLM key used to find the data. * @returns The data corresponding to the prompt and LLM key, or null if not found. */ lookup(prompt: string, llmKey: string): Promise<T | null> { return Promise.resolve(this.cache.get(getCacheKey(prompt, llmKey)) ?? null); } /** * Updates the cache with new data using a prompt and an LLM key. * @param prompt The prompt used to store the data. * @param llmKey The LLM key used to store the data. * @param value The data to be stored. */ async update(prompt: string, llmKey: string, value: T): Promise<void> { this.cache.set(getCacheKey(prompt, llmKey), value); } /** * Returns a global instance of InMemoryCache using a predefined global * map as the initial cache. * @returns A global instance of InMemoryCache. */ static global(): InMemoryCache { return new InMemoryCache(GLOBAL_MAP); } }
0
lc_public_repos/langchainjs/langchain-core/src/caches
lc_public_repos/langchainjs/langchain-core/src/caches/tests/in_memory_cache.test.ts
import { test, expect } from "@jest/globals"; import { MessageContentComplex } from "../../messages/base.js"; import { InMemoryCache } from "../base.js"; test("InMemoryCache works", async () => { const cache = new InMemoryCache(); await cache.update("prompt", "key1", [ { text: "text1", }, ]); const result = await cache.lookup("prompt", "key1"); expect(result).toBeDefined(); if (!result) { return; } expect(result[0].text).toBe("text1"); }); test("InMemoryCache works with complex message types", async () => { const cache = new InMemoryCache<MessageContentComplex[]>(); await cache.update("prompt", "key1", [ { type: "text", text: "text1", }, ]); const result = await cache.lookup("prompt", "key1"); expect(result).toBeDefined(); if (!result) { return; } expect(result[0]).toEqual({ type: "text", text: "text1", }); });
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/prompts/image.ts
import { MessageContent, MessageContentComplex } from "../messages/index.js"; import { ImagePromptValue, ImageContent } from "../prompt_values.js"; import type { InputValues, PartialValues } from "../utils/types/index.js"; import { BasePromptTemplate, BasePromptTemplateInput, TypedPromptInputValues, } from "./base.js"; import { TemplateFormat, checkValidTemplate, renderTemplate, } from "./template.js"; /** * Inputs to create a {@link ImagePromptTemplate} * @augments BasePromptTemplateInput */ export interface ImagePromptTemplateInput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any PartialVariableName extends string = any > extends BasePromptTemplateInput<RunInput, PartialVariableName> { /** * The prompt template */ template: Record<string, unknown>; /** * The format of the prompt template. Options are 'f-string' * * @defaultValue 'f-string' */ templateFormat?: TemplateFormat; /** * Whether or not to try validating the template on initialization * * @defaultValue `true` */ validateTemplate?: boolean; /** * Additional fields which should be included inside * the message content array if using a complex message * content. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any additionalContentFields?: MessageContentComplex; } /** * An image prompt template for a multimodal model. */ export class ImagePromptTemplate< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any PartialVariableName extends string = any > extends BasePromptTemplate<RunInput, ImagePromptValue, PartialVariableName> { static lc_name() { return "ImagePromptTemplate"; } lc_namespace = ["langchain_core", "prompts", "image"]; template: Record<string, unknown>; templateFormat: TemplateFormat = "f-string"; validateTemplate = true; /** * Additional fields which should be included inside * the message content array if using a complex message * content. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any additionalContentFields?: MessageContentComplex; constructor(input: ImagePromptTemplateInput<RunInput, PartialVariableName>) { super(input); this.template = input.template; this.templateFormat = input.templateFormat ?? this.templateFormat; this.validateTemplate = input.validateTemplate ?? this.validateTemplate; this.additionalContentFields = input.additionalContentFields; if (this.validateTemplate) { let totalInputVariables: string[] = this.inputVariables; if (this.partialVariables) { totalInputVariables = totalInputVariables.concat( Object.keys(this.partialVariables) ); } checkValidTemplate( [ { type: "image_url", image_url: this.template }, ] as unknown as MessageContent, this.templateFormat, totalInputVariables ); } } _getPromptType(): "prompt" { return "prompt"; } /** * Partially applies values to the prompt template. * @param values The values to be partially applied to the prompt template. * @returns A new instance of ImagePromptTemplate with the partially applied values. */ async partial<NewPartialVariableName extends string>( values: PartialValues<NewPartialVariableName> ) { const newInputVariables = this.inputVariables.filter( (iv) => !(iv in values) ) as Exclude<Extract<keyof RunInput, string>, NewPartialVariableName>[]; const newPartialVariables = { ...(this.partialVariables ?? {}), ...values, } as PartialValues<PartialVariableName | NewPartialVariableName>; const promptDict = { ...this, inputVariables: newInputVariables, partialVariables: newPartialVariables, }; return new ImagePromptTemplate< InputValues< Exclude<Extract<keyof RunInput, string>, NewPartialVariableName> > >(promptDict); } /** * Formats the prompt template with the provided values. * @param values The values to be used to format the prompt template. * @returns A promise that resolves to a string which is the formatted prompt. */ async format<FormatOutput = ImageContent>( values: TypedPromptInputValues<RunInput> ): Promise<FormatOutput> { // eslint-disable-next-line @typescript-eslint/no-explicit-any const formatted: Record<string, any> = {}; for (const [key, value] of Object.entries(this.template)) { if (typeof value === "string") { formatted[key] = renderTemplate(value, this.templateFormat, values); } else { formatted[key] = value; } } const url = values.url || formatted.url; const detail = values.detail || formatted.detail; if (!url) { throw new Error("Must provide either an image URL."); } if (typeof url !== "string") { throw new Error("url must be a string."); } const output: ImageContent = { url }; if (detail) { output.detail = detail; } return output as FormatOutput; } /** * Formats the prompt given the input values and returns a formatted * prompt value. * @param values The input values to format the prompt. * @returns A Promise that resolves to a formatted prompt value. */ async formatPromptValue( values: TypedPromptInputValues<RunInput> ): Promise<ImagePromptValue> { const formattedPrompt = await this.format(values); return new ImagePromptValue(formattedPrompt); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/prompts/chat.ts
// Default generic "any" values are for backwards compatibility. // Replace with "string" when we are comfortable with a breaking change. import type { BaseCallbackConfig } from "../callbacks/manager.js"; import { AIMessage, HumanMessage, SystemMessage, BaseMessage, ChatMessage, type BaseMessageLike, coerceMessageLikeToMessage, isBaseMessage, MessageContent, MessageContentComplex, } from "../messages/index.js"; import { type ChatPromptValueInterface, ChatPromptValue, } from "../prompt_values.js"; import type { InputValues, PartialValues } from "../utils/types/index.js"; import { Runnable } from "../runnables/base.js"; import { BaseStringPromptTemplate } from "./string.js"; import { BasePromptTemplate, type BasePromptTemplateInput, type TypedPromptInputValues, } from "./base.js"; import { PromptTemplate, type ParamsFromFString, PromptTemplateInput, ExtractedFStringParams, } from "./prompt.js"; import { ImagePromptTemplate } from "./image.js"; import { ParsedTemplateNode, TemplateFormat, parseFString, parseMustache, } from "./template.js"; import { addLangChainErrorFields } from "../errors/index.js"; /** * Abstract class that serves as a base for creating message prompt * templates. It defines how to format messages for different roles in a * conversation. */ export abstract class BaseMessagePromptTemplate< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, RunOutput extends BaseMessage[] = BaseMessage[] > extends Runnable<RunInput, RunOutput> { lc_namespace = ["langchain_core", "prompts", "chat"]; lc_serializable = true; abstract inputVariables: Array<Extract<keyof RunInput, string>>; /** * Method that takes an object of TypedPromptInputValues and returns a * promise that resolves to an array of BaseMessage instances. * @param values Object of TypedPromptInputValues * @returns Formatted array of BaseMessages */ abstract formatMessages( values: TypedPromptInputValues<RunInput> ): Promise<RunOutput>; /** * Calls the formatMessages method with the provided input and options. * @param input Input for the formatMessages method * @param options Optional BaseCallbackConfig * @returns Formatted output messages */ async invoke( input: RunInput, options?: BaseCallbackConfig ): Promise<RunOutput> { return this._callWithConfig( (input: RunInput) => this.formatMessages(input), input, { ...options, runType: "prompt" } ); } } /** * Interface for the fields of a MessagePlaceholder. */ export interface MessagesPlaceholderFields<T extends string> { variableName: T; optional?: boolean; } /** * Class that represents a placeholder for messages in a chat prompt. It * extends the BaseMessagePromptTemplate. */ export class MessagesPlaceholder< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any > extends BaseMessagePromptTemplate<RunInput> implements MessagesPlaceholderFields<Extract<keyof RunInput, string>> { static lc_name() { return "MessagesPlaceholder"; } variableName: Extract<keyof RunInput, string>; optional: boolean; constructor(variableName: Extract<keyof RunInput, string>); constructor( fields: MessagesPlaceholderFields<Extract<keyof RunInput, string>> ); constructor( fields: | Extract<keyof RunInput, string> | MessagesPlaceholderFields<Extract<keyof RunInput, string>> ) { if (typeof fields === "string") { // eslint-disable-next-line no-param-reassign fields = { variableName: fields }; } super(fields); this.variableName = fields.variableName; this.optional = fields.optional ?? false; } get inputVariables() { return [this.variableName]; } async formatMessages( values: TypedPromptInputValues<RunInput> ): Promise<BaseMessage[]> { const input = values[this.variableName]; if (this.optional && !input) { return []; } else if (!input) { const error = new Error( `Field "${this.variableName}" in prompt uses a MessagesPlaceholder, which expects an array of BaseMessages as an input value. Received: undefined` ); error.name = "InputFormatError"; throw error; } let formattedMessages; try { if (Array.isArray(input)) { formattedMessages = input.map(coerceMessageLikeToMessage); } else { formattedMessages = [coerceMessageLikeToMessage(input)]; } // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e: any) { const readableInput = typeof input === "string" ? input : JSON.stringify(input, null, 2); const error = new Error( [ `Field "${this.variableName}" in prompt uses a MessagesPlaceholder, which expects an array of BaseMessages or coerceable values as input.`, `Received value: ${readableInput}`, `Additional message: ${e.message}`, ].join("\n\n") ); error.name = "InputFormatError"; // eslint-disable-next-line @typescript-eslint/no-explicit-any (error as any).lc_error_code = e.lc_error_code; throw error; } return formattedMessages; } } /** * Interface for the fields of a MessageStringPromptTemplate. */ export interface MessageStringPromptTemplateFields< // eslint-disable-next-line @typescript-eslint/no-explicit-any T extends InputValues = any > { prompt: BaseStringPromptTemplate<T, string>; } /** * Abstract class that serves as a base for creating message string prompt * templates. It extends the BaseMessagePromptTemplate. */ export abstract class BaseMessageStringPromptTemplate< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any > extends BaseMessagePromptTemplate<RunInput> { prompt: BaseStringPromptTemplate< InputValues<Extract<keyof RunInput, string>>, string >; constructor( prompt: BaseStringPromptTemplate< InputValues<Extract<keyof RunInput, string>> > ); constructor( fields: MessageStringPromptTemplateFields< InputValues<Extract<keyof RunInput, string>> > ); constructor( fields: | MessageStringPromptTemplateFields< InputValues<Extract<keyof RunInput, string>> > | BaseStringPromptTemplate< InputValues<Extract<keyof RunInput, string>>, string > ) { if (!("prompt" in fields)) { // eslint-disable-next-line no-param-reassign fields = { prompt: fields }; } super(fields); this.prompt = fields.prompt; } get inputVariables() { return this.prompt.inputVariables; } abstract format( values: TypedPromptInputValues<RunInput> ): Promise<BaseMessage>; async formatMessages( values: TypedPromptInputValues<RunInput> ): Promise<BaseMessage[]> { return [await this.format(values)]; } } /** * Abstract class that serves as a base for creating chat prompt * templates. It extends the BasePromptTemplate. */ export abstract class BaseChatPromptTemplate< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any PartialVariableName extends string = any > extends BasePromptTemplate< RunInput, ChatPromptValueInterface, PartialVariableName > { constructor(input: BasePromptTemplateInput<RunInput, PartialVariableName>) { super(input); } abstract formatMessages( values: TypedPromptInputValues<RunInput> ): Promise<BaseMessage[]>; async format(values: TypedPromptInputValues<RunInput>): Promise<string> { return (await this.formatPromptValue(values)).toString(); } async formatPromptValue( values: TypedPromptInputValues<RunInput> ): Promise<ChatPromptValueInterface> { const resultMessages = await this.formatMessages(values); return new ChatPromptValue(resultMessages); } } /** * Interface for the fields of a ChatMessagePromptTemplate. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any export interface ChatMessagePromptTemplateFields<T extends InputValues = any> extends MessageStringPromptTemplateFields<T> { role: string; } /** * Class that represents a chat message prompt template. It extends the * BaseMessageStringPromptTemplate. */ export class ChatMessagePromptTemplate< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any > extends BaseMessageStringPromptTemplate<RunInput> { static lc_name() { return "ChatMessagePromptTemplate"; } role: string; constructor( prompt: BaseStringPromptTemplate< InputValues<Extract<keyof RunInput, string>> >, role: string ); constructor( fields: ChatMessagePromptTemplateFields< InputValues<Extract<keyof RunInput, string>> > ); constructor( fields: | ChatMessagePromptTemplateFields< InputValues<Extract<keyof RunInput, string>> > | BaseStringPromptTemplate<InputValues<Extract<keyof RunInput, string>>>, role?: string ) { if (!("prompt" in fields)) { // eslint-disable-next-line no-param-reassign, @typescript-eslint/no-non-null-assertion fields = { prompt: fields, role: role! }; } super(fields); this.role = fields.role; } async format(values: RunInput): Promise<BaseMessage> { return new ChatMessage(await this.prompt.format(values), this.role); } static fromTemplate( template: string, role: string, options?: { templateFormat?: TemplateFormat } ) { return new this( PromptTemplate.fromTemplate(template, { templateFormat: options?.templateFormat, }), role ); } } interface _TextTemplateParam { // eslint-disable-next-line @typescript-eslint/no-explicit-any text?: string | Record<string, any>; } interface _ImageTemplateParam { // eslint-disable-next-line @typescript-eslint/no-explicit-any image_url?: string | Record<string, any>; } type MessageClass = | typeof HumanMessage | typeof AIMessage | typeof SystemMessage; type ChatMessageClass = typeof ChatMessage; interface _StringImageMessagePromptTemplateOptions< Format extends TemplateFormat = TemplateFormat > extends Record<string, unknown> { templateFormat?: Format; } class _StringImageMessagePromptTemplate< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, RunOutput extends BaseMessage[] = BaseMessage[] > extends BaseMessagePromptTemplate<RunInput, RunOutput> { lc_namespace = ["langchain_core", "prompts", "chat"]; lc_serializable = true; inputVariables: Array<Extract<keyof RunInput, string>> = []; additionalOptions: _StringImageMessagePromptTemplateOptions = {}; prompt: | BaseStringPromptTemplate< InputValues<Extract<keyof RunInput, string>>, string > | Array< | BaseStringPromptTemplate< InputValues<Extract<keyof RunInput, string>>, string > | ImagePromptTemplate< InputValues<Extract<keyof RunInput, string>>, string > | MessageStringPromptTemplateFields< InputValues<Extract<keyof RunInput, string>> > >; protected messageClass?: MessageClass; static _messageClass(): MessageClass { throw new Error( "Can not invoke _messageClass from inside _StringImageMessagePromptTemplate" ); } // ChatMessage contains role field, others don't. // Because of this, we have a separate class property for ChatMessage. protected chatMessageClass?: ChatMessageClass; constructor( /** @TODO When we come up with a better way to type prompt templates, fix this */ // eslint-disable-next-line @typescript-eslint/no-explicit-any fields: any, additionalOptions?: _StringImageMessagePromptTemplateOptions ) { if (!("prompt" in fields)) { // eslint-disable-next-line no-param-reassign fields = { prompt: fields }; } super(fields); this.prompt = fields.prompt; if (Array.isArray(this.prompt)) { let inputVariables: Extract<keyof RunInput, string>[] = []; this.prompt.forEach((prompt) => { if ("inputVariables" in prompt) { inputVariables = inputVariables.concat(prompt.inputVariables); } }); this.inputVariables = inputVariables; } else { this.inputVariables = this.prompt.inputVariables; } this.additionalOptions = additionalOptions ?? this.additionalOptions; } createMessage(content: MessageContent) { // eslint-disable-next-line @typescript-eslint/no-explicit-any const constructor = this.constructor as any; if (constructor._messageClass()) { const MsgClass = constructor._messageClass(); return new MsgClass({ content }); } else if (constructor.chatMessageClass) { const MsgClass = constructor.chatMessageClass(); // Assuming ChatMessage constructor also takes a content argument return new MsgClass({ content, role: this.getRoleFromMessageClass(MsgClass.lc_name()), }); } else { throw new Error("No message class defined"); } } getRoleFromMessageClass(name: string) { switch (name) { case "HumanMessage": return "human"; case "AIMessage": return "ai"; case "SystemMessage": return "system"; case "ChatMessage": return "chat"; default: throw new Error("Invalid message class name"); } } static fromTemplate( template: string | Array<string | _TextTemplateParam | _ImageTemplateParam>, additionalOptions?: _StringImageMessagePromptTemplateOptions ) { if (typeof template === "string") { return new this(PromptTemplate.fromTemplate(template, additionalOptions)); } const prompt: Array< PromptTemplate<InputValues> | ImagePromptTemplate<InputValues> > = []; for (const item of template) { if ( typeof item === "string" || (typeof item === "object" && "text" in item) ) { let text = ""; if (typeof item === "string") { text = item; } else if (typeof item.text === "string") { text = item.text ?? ""; } const options = { ...additionalOptions, ...(typeof item !== "string" ? { additionalContentFields: item } : {}), }; prompt.push(PromptTemplate.fromTemplate(text, options)); } else if (typeof item === "object" && "image_url" in item) { let imgTemplate = item.image_url ?? ""; let imgTemplateObject: ImagePromptTemplate<InputValues>; let inputVariables: string[] = []; if (typeof imgTemplate === "string") { let parsedTemplate: ParsedTemplateNode[]; if (additionalOptions?.templateFormat === "mustache") { parsedTemplate = parseMustache(imgTemplate); } else { parsedTemplate = parseFString(imgTemplate); } const variables = parsedTemplate.flatMap((item) => item.type === "variable" ? [item.name] : [] ); if ((variables?.length ?? 0) > 0) { if (variables.length > 1) { throw new Error( `Only one format variable allowed per image template.\nGot: ${variables}\nFrom: ${imgTemplate}` ); } inputVariables = [variables[0]]; } else { inputVariables = []; } imgTemplate = { url: imgTemplate }; imgTemplateObject = new ImagePromptTemplate<InputValues>({ template: imgTemplate, inputVariables, templateFormat: additionalOptions?.templateFormat, additionalContentFields: item, }); } else if (typeof imgTemplate === "object") { if ("url" in imgTemplate) { let parsedTemplate: ParsedTemplateNode[]; if (additionalOptions?.templateFormat === "mustache") { parsedTemplate = parseMustache(imgTemplate.url); } else { parsedTemplate = parseFString(imgTemplate.url); } inputVariables = parsedTemplate.flatMap((item) => item.type === "variable" ? [item.name] : [] ); } else { inputVariables = []; } imgTemplateObject = new ImagePromptTemplate<InputValues>({ template: imgTemplate, inputVariables, templateFormat: additionalOptions?.templateFormat, additionalContentFields: item, }); } else { throw new Error("Invalid image template"); } prompt.push(imgTemplateObject); } } return new this({ prompt, additionalOptions }); } async format(input: TypedPromptInputValues<RunInput>): Promise<BaseMessage> { // eslint-disable-next-line no-instanceof/no-instanceof if (this.prompt instanceof BaseStringPromptTemplate) { const text = await this.prompt.format(input); return this.createMessage(text); } else { const content: MessageContent = []; for (const prompt of this.prompt) { // eslint-disable-next-line @typescript-eslint/no-explicit-any let inputs: Record<string, any> = {}; if (!("inputVariables" in prompt)) { throw new Error( `Prompt ${prompt} does not have inputVariables defined.` ); } for (const item of prompt.inputVariables) { if (!inputs) { inputs = { [item]: input[item] }; } inputs = { ...inputs, [item]: input[item] }; } // eslint-disable-next-line no-instanceof/no-instanceof if (prompt instanceof BaseStringPromptTemplate) { const formatted = await prompt.format( inputs as TypedPromptInputValues<RunInput> ); let additionalContentFields: MessageContentComplex | undefined; if ("additionalContentFields" in prompt) { // eslint-disable-next-line @typescript-eslint/no-explicit-any additionalContentFields = prompt.additionalContentFields as any; } content.push({ ...additionalContentFields, type: "text", text: formatted, }); /** @TODO replace this */ // eslint-disable-next-line no-instanceof/no-instanceof } else if (prompt instanceof ImagePromptTemplate) { const formatted = await prompt.format( inputs as TypedPromptInputValues<RunInput> ); let additionalContentFields: MessageContentComplex | undefined; if ("additionalContentFields" in prompt) { // eslint-disable-next-line @typescript-eslint/no-explicit-any additionalContentFields = prompt.additionalContentFields as any; } content.push({ ...additionalContentFields, type: "image_url", image_url: formatted, }); } } return this.createMessage(content); } } async formatMessages(values: RunInput): Promise<RunOutput> { return [await this.format(values)] as BaseMessage[] as RunOutput; } } /** * Class that represents a human message prompt template. It extends the * BaseMessageStringPromptTemplate. * @example * ```typescript * const message = HumanMessagePromptTemplate.fromTemplate("{text}"); * const formatted = await message.format({ text: "Hello world!" }); * * const chatPrompt = ChatPromptTemplate.fromMessages([message]); * const formattedChatPrompt = await chatPrompt.invoke({ * text: "Hello world!", * }); * ``` */ export class HumanMessagePromptTemplate< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any > extends _StringImageMessagePromptTemplate<RunInput> { static _messageClass(): typeof HumanMessage { return HumanMessage; } static lc_name() { return "HumanMessagePromptTemplate"; } } /** * Class that represents an AI message prompt template. It extends the * BaseMessageStringPromptTemplate. */ export class AIMessagePromptTemplate< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any > extends _StringImageMessagePromptTemplate<RunInput> { static _messageClass(): typeof AIMessage { return AIMessage; } static lc_name() { return "AIMessagePromptTemplate"; } } /** * Class that represents a system message prompt template. It extends the * BaseMessageStringPromptTemplate. * @example * ```typescript * const message = SystemMessagePromptTemplate.fromTemplate("{text}"); * const formatted = await message.format({ text: "Hello world!" }); * * const chatPrompt = ChatPromptTemplate.fromMessages([message]); * const formattedChatPrompt = await chatPrompt.invoke({ * text: "Hello world!", * }); * ``` */ export class SystemMessagePromptTemplate< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any > extends _StringImageMessagePromptTemplate<RunInput> { static _messageClass(): typeof SystemMessage { return SystemMessage; } static lc_name() { return "SystemMessagePromptTemplate"; } } /** * Interface for the input of a ChatPromptTemplate. */ export interface ChatPromptTemplateInput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any PartialVariableName extends string = any > extends BasePromptTemplateInput<RunInput, PartialVariableName> { /** * The prompt messages */ promptMessages: Array<BaseMessagePromptTemplate | BaseMessage>; /** * Whether to try validating the template on initialization * * @defaultValue `true` */ validateTemplate?: boolean; /** * The formatting method to use on the prompt. * @default "f-string" */ templateFormat?: TemplateFormat; } export type BaseMessagePromptTemplateLike = | BaseMessagePromptTemplate | BaseMessageLike; function _isBaseMessagePromptTemplate( baseMessagePromptTemplateLike: BaseMessagePromptTemplateLike ): baseMessagePromptTemplateLike is BaseMessagePromptTemplate { return ( typeof (baseMessagePromptTemplateLike as BaseMessagePromptTemplate) .formatMessages === "function" ); } function _coerceMessagePromptTemplateLike< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, Extra extends Omit< ChatPromptTemplateInput<RunInput>, "inputVariables" | "promptMessages" | "partialVariables" > = Omit< ChatPromptTemplateInput<RunInput>, "inputVariables" | "promptMessages" | "partialVariables" > >( messagePromptTemplateLike: BaseMessagePromptTemplateLike, extra?: Extra ): BaseMessagePromptTemplate | BaseMessage { if ( _isBaseMessagePromptTemplate(messagePromptTemplateLike) || isBaseMessage(messagePromptTemplateLike) ) { return messagePromptTemplateLike; } if ( Array.isArray(messagePromptTemplateLike) && messagePromptTemplateLike[0] === "placeholder" ) { const messageContent = messagePromptTemplateLike[1]; if ( typeof messageContent !== "string" || messageContent[0] !== "{" || messageContent[messageContent.length - 1] !== "}" ) { throw new Error( `Invalid placeholder template: "${messagePromptTemplateLike[1]}". Expected a variable name surrounded by curly braces.` ); } const variableName = messageContent.slice(1, -1); return new MessagesPlaceholder({ variableName, optional: true }); } const message = coerceMessageLikeToMessage(messagePromptTemplateLike); let templateData: | string | ( | string | _TextTemplateParam | _ImageTemplateParam // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any> )[]; if (typeof message.content === "string") { templateData = message.content; } else { // Assuming message.content is an array of complex objects, transform it. templateData = message.content.map((item) => { if ("text" in item) { return { ...item, text: item.text }; } else if ("image_url" in item) { return { ...item, image_url: item.image_url }; } else { return item; } }); } if (message._getType() === "human") { return HumanMessagePromptTemplate.fromTemplate(templateData, extra); } else if (message._getType() === "ai") { return AIMessagePromptTemplate.fromTemplate(templateData, extra); } else if (message._getType() === "system") { return SystemMessagePromptTemplate.fromTemplate(templateData, extra); } else if (ChatMessage.isInstance(message)) { return ChatMessagePromptTemplate.fromTemplate( message.content as string, message.role, extra ); } else { throw new Error( `Could not coerce message prompt template from input. Received message type: "${message._getType()}".` ); } } function isMessagesPlaceholder( x: BaseMessagePromptTemplate | BaseMessage ): x is MessagesPlaceholder { // eslint-disable-next-line @typescript-eslint/no-explicit-any return (x.constructor as any).lc_name() === "MessagesPlaceholder"; } /** * Class that represents a chat prompt. It extends the * BaseChatPromptTemplate and uses an array of BaseMessagePromptTemplate * instances to format a series of messages for a conversation. * @example * ```typescript * const message = SystemMessagePromptTemplate.fromTemplate("{text}"); * const chatPrompt = ChatPromptTemplate.fromMessages([ * ["ai", "You are a helpful assistant."], * message, * ]); * const formattedChatPrompt = await chatPrompt.invoke({ * text: "Hello world!", * }); * ``` */ export class ChatPromptTemplate< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any PartialVariableName extends string = any > extends BaseChatPromptTemplate<RunInput, PartialVariableName> implements ChatPromptTemplateInput<RunInput, PartialVariableName> { static lc_name() { return "ChatPromptTemplate"; } get lc_aliases(): Record<string, string> { return { promptMessages: "messages", }; } promptMessages: Array<BaseMessagePromptTemplate | BaseMessage>; validateTemplate = true; templateFormat: TemplateFormat = "f-string"; constructor(input: ChatPromptTemplateInput<RunInput, PartialVariableName>) { super(input); // If input is mustache and validateTemplate is not defined, set it to false if ( input.templateFormat === "mustache" && input.validateTemplate === undefined ) { this.validateTemplate = false; } Object.assign(this, input); if (this.validateTemplate) { const inputVariablesMessages = new Set<string>(); for (const promptMessage of this.promptMessages) { // eslint-disable-next-line no-instanceof/no-instanceof if (promptMessage instanceof BaseMessage) continue; for (const inputVariable of promptMessage.inputVariables) { inputVariablesMessages.add(inputVariable); } } const totalInputVariables = this.inputVariables as string[]; const inputVariablesInstance = new Set( this.partialVariables ? totalInputVariables.concat(Object.keys(this.partialVariables)) : totalInputVariables ); const difference = new Set( [...inputVariablesInstance].filter( (x) => !inputVariablesMessages.has(x) ) ); if (difference.size > 0) { throw new Error( `Input variables \`${[ ...difference, ]}\` are not used in any of the prompt messages.` ); } const otherDifference = new Set( [...inputVariablesMessages].filter( (x) => !inputVariablesInstance.has(x) ) ); if (otherDifference.size > 0) { throw new Error( `Input variables \`${[ ...otherDifference, ]}\` are used in prompt messages but not in the prompt template.` ); } } } _getPromptType(): "chat" { return "chat"; } private async _parseImagePrompts( message: BaseMessage, inputValues: InputValues< PartialVariableName | Extract<keyof RunInput, string> > ): Promise<BaseMessage> { if (typeof message.content === "string") { return message; } const formattedMessageContent = await Promise.all( message.content.map(async (item) => { if (item.type !== "image_url") { return item; } let imageUrl = ""; if (typeof item.image_url === "string") { imageUrl = item.image_url; } else { imageUrl = item.image_url.url; } const promptTemplatePlaceholder = PromptTemplate.fromTemplate( imageUrl, { templateFormat: this.templateFormat, } ); const formattedUrl = await promptTemplatePlaceholder.format( inputValues ); if (typeof item.image_url !== "string" && "url" in item.image_url) { // eslint-disable-next-line no-param-reassign item.image_url.url = formattedUrl; } else { // eslint-disable-next-line no-param-reassign item.image_url = formattedUrl; } return item; }) ); // eslint-disable-next-line no-param-reassign message.content = formattedMessageContent; return message; } async formatMessages( values: TypedPromptInputValues<RunInput> ): Promise<BaseMessage[]> { const allValues = await this.mergePartialAndUserVariables(values); let resultMessages: BaseMessage[] = []; for (const promptMessage of this.promptMessages) { // eslint-disable-next-line no-instanceof/no-instanceof if (promptMessage instanceof BaseMessage) { resultMessages.push( await this._parseImagePrompts(promptMessage, allValues) ); } else { const inputValues = promptMessage.inputVariables.reduce( (acc, inputVariable) => { if ( !(inputVariable in allValues) && !(isMessagesPlaceholder(promptMessage) && promptMessage.optional) ) { const error = addLangChainErrorFields( new Error( `Missing value for input variable \`${inputVariable.toString()}\`` ), "INVALID_PROMPT_INPUT" ); throw error; } acc[inputVariable] = allValues[inputVariable]; return acc; }, {} as InputValues ); const message = await promptMessage.formatMessages(inputValues); resultMessages = resultMessages.concat(message); } } return resultMessages; } async partial<NewPartialVariableName extends string>( values: PartialValues<NewPartialVariableName> ) { // This is implemented in a way it doesn't require making // BaseMessagePromptTemplate aware of .partial() const newInputVariables = this.inputVariables.filter( (iv) => !(iv in values) ) as Exclude<Extract<keyof RunInput, string>, NewPartialVariableName>[]; const newPartialVariables = { ...(this.partialVariables ?? {}), ...values, } as PartialValues<PartialVariableName | NewPartialVariableName>; const promptDict = { ...this, inputVariables: newInputVariables, partialVariables: newPartialVariables, }; return new ChatPromptTemplate< InputValues< Exclude<Extract<keyof RunInput, string>, NewPartialVariableName> > >(promptDict); } /** * Load prompt template from a template f-string */ static fromTemplate< // eslint-disable-next-line @typescript-eslint/ban-types RunInput extends InputValues = Symbol, T extends string = string >( template: T, options?: Omit< PromptTemplateInput<RunInput, string, "f-string">, "template" | "inputVariables" > ): ChatPromptTemplate<ExtractedFStringParams<T, RunInput>>; static fromTemplate< // eslint-disable-next-line @typescript-eslint/ban-types RunInput extends InputValues = Symbol, T extends string = string >( template: T, options?: Omit< PromptTemplateInput<RunInput, string>, "template" | "inputVariables" > ): ChatPromptTemplate<ExtractedFStringParams<T, RunInput>>; static fromTemplate< // eslint-disable-next-line @typescript-eslint/ban-types RunInput extends InputValues = Symbol, T extends string = string >( template: T, options?: Omit< PromptTemplateInput<RunInput, string, "mustache">, "template" | "inputVariables" > ): ChatPromptTemplate<InputValues>; static fromTemplate< // eslint-disable-next-line @typescript-eslint/ban-types RunInput extends InputValues = Symbol, T extends string = string >( template: T, options?: Omit< PromptTemplateInput<RunInput, string, TemplateFormat>, "template" | "inputVariables" > ): ChatPromptTemplate<ExtractedFStringParams<T, RunInput> | InputValues> { const prompt = PromptTemplate.fromTemplate(template, options); const humanTemplate = new HumanMessagePromptTemplate({ prompt }); return this.fromMessages< // eslint-disable-next-line @typescript-eslint/ban-types RunInput extends Symbol ? ParamsFromFString<T> : RunInput >([humanTemplate]); } /** * Create a chat model-specific prompt from individual chat messages * or message-like tuples. * @param promptMessages Messages to be passed to the chat model * @returns A new ChatPromptTemplate */ static fromMessages< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, Extra extends ChatPromptTemplateInput<RunInput> = ChatPromptTemplateInput<RunInput> >( promptMessages: ( | ChatPromptTemplate<InputValues, string> | BaseMessagePromptTemplateLike )[], extra?: Omit< Extra, "inputVariables" | "promptMessages" | "partialVariables" > ): ChatPromptTemplate<RunInput> { const flattenedMessages = promptMessages.reduce( (acc: Array<BaseMessagePromptTemplate | BaseMessage>, promptMessage) => acc.concat( // eslint-disable-next-line no-instanceof/no-instanceof promptMessage instanceof ChatPromptTemplate ? promptMessage.promptMessages : [ _coerceMessagePromptTemplateLike< RunInput, Omit< Extra, "inputVariables" | "promptMessages" | "partialVariables" > >(promptMessage, extra), ] ), [] ); const flattenedPartialVariables = promptMessages.reduce( (acc, promptMessage) => // eslint-disable-next-line no-instanceof/no-instanceof promptMessage instanceof ChatPromptTemplate ? Object.assign(acc, promptMessage.partialVariables) : acc, Object.create(null) as PartialValues ); const inputVariables = new Set<string>(); for (const promptMessage of flattenedMessages) { // eslint-disable-next-line no-instanceof/no-instanceof if (promptMessage instanceof BaseMessage) continue; for (const inputVariable of promptMessage.inputVariables) { if (inputVariable in flattenedPartialVariables) { continue; } inputVariables.add(inputVariable); } } return new this<RunInput>({ ...extra, inputVariables: [...inputVariables] as Extract<keyof RunInput, string>[], promptMessages: flattenedMessages, partialVariables: flattenedPartialVariables, templateFormat: extra?.templateFormat, }); } /** @deprecated Renamed to .fromMessages */ // eslint-disable-next-line @typescript-eslint/no-explicit-any static fromPromptMessages<RunInput extends InputValues = any>( promptMessages: ( | ChatPromptTemplate<InputValues, string> | BaseMessagePromptTemplateLike )[] ): ChatPromptTemplate<RunInput> { return this.fromMessages(promptMessages); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/prompts/string.ts
// Default generic "any" values are for backwards compatibility. // Replace with "string" when we are comfortable with a breaking change. import type { InputValues } from "../utils/types/index.js"; import { type StringPromptValueInterface, StringPromptValue, } from "../prompt_values.js"; import { BasePromptTemplate, type TypedPromptInputValues } from "./base.js"; /** * Base class for string prompt templates. It extends the * BasePromptTemplate class and overrides the formatPromptValue method to * return a StringPromptValue. */ export abstract class BaseStringPromptTemplate< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any PartialVariableName extends string = any > extends BasePromptTemplate< RunInput, StringPromptValueInterface, PartialVariableName > { /** * Formats the prompt given the input values and returns a formatted * prompt value. * @param values The input values to format the prompt. * @returns A Promise that resolves to a formatted prompt value. */ async formatPromptValue( values: TypedPromptInputValues<RunInput> ): Promise<StringPromptValueInterface> { const formattedPrompt = await this.format(values); return new StringPromptValue(formattedPrompt); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/prompts/structured.ts
import { ChatPromptValueInterface } from "../prompt_values.js"; import { RunnableLike, Runnable, type RunnableBinding, } from "../runnables/base.js"; import { RunnableConfig } from "../runnables/config.js"; import { InputValues } from "../utils/types/index.js"; import { BaseMessagePromptTemplateLike, ChatPromptTemplate, ChatPromptTemplateInput, } from "./chat.js"; function isWithStructuredOutput( x: unknown // eslint-disable-next-line @typescript-eslint/ban-types ): x is { withStructuredOutput: (...arg: unknown[]) => Runnable; } { return ( typeof x === "object" && x != null && "withStructuredOutput" in x && typeof x.withStructuredOutput === "function" ); } function isRunnableBinding(x: unknown): x is RunnableBinding<unknown, unknown> { return ( typeof x === "object" && x != null && "lc_id" in x && Array.isArray(x.lc_id) && x.lc_id.join("/") === "langchain_core/runnables/RunnableBinding" ); } /** * Interface for the input of a ChatPromptTemplate. */ export interface StructuredPromptInput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any PartialVariableName extends string = any > extends ChatPromptTemplateInput<RunInput, PartialVariableName> { // eslint-disable-next-line @typescript-eslint/no-explicit-any schema: Record<string, any>; } export class StructuredPrompt< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any PartialVariableName extends string = any > extends ChatPromptTemplate<RunInput, PartialVariableName> implements StructuredPromptInput<RunInput, PartialVariableName> { // eslint-disable-next-line @typescript-eslint/no-explicit-any schema: Record<string, any>; lc_namespace = ["langchain_core", "prompts", "structured"]; get lc_aliases(): Record<string, string> { return { ...super.lc_aliases, schema: "schema_", }; } constructor(input: StructuredPromptInput<RunInput, PartialVariableName>) { super(input); this.schema = input.schema; } pipe<NewRunOutput>( coerceable: RunnableLike<ChatPromptValueInterface, NewRunOutput> ): Runnable<RunInput, Exclude<NewRunOutput, Error>, RunnableConfig> { if (isWithStructuredOutput(coerceable)) { return super.pipe(coerceable.withStructuredOutput(this.schema)); } if ( isRunnableBinding(coerceable) && isWithStructuredOutput(coerceable.bound) ) { return super.pipe( coerceable.bound .withStructuredOutput(this.schema) .bind(coerceable.kwargs ?? {}) .withConfig(coerceable.config) ); } throw new Error( `Structured prompts need to be piped to a language model that supports the "withStructuredOutput()" method.` ); } // eslint-disable-next-line @typescript-eslint/no-explicit-any static fromMessagesAndSchema<RunInput extends InputValues = any>( promptMessages: ( | ChatPromptTemplate<InputValues, string> | BaseMessagePromptTemplateLike )[], schema: StructuredPromptInput["schema"] // eslint-disable-next-line @typescript-eslint/no-explicit-any ): ChatPromptTemplate<RunInput, any> { return StructuredPrompt.fromMessages< RunInput, StructuredPromptInput<RunInput> >(promptMessages, { schema }); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/prompts/template.ts
import mustache from "mustache"; import { MessageContent } from "../messages/index.js"; import type { InputValues } from "../utils/types/index.js"; import { addLangChainErrorFields } from "../errors/index.js"; function configureMustache() { // Use unescaped HTML // https://github.com/janl/mustache.js?tab=readme-ov-file#variables mustache.escape = (text) => text; } /** * Type that specifies the format of a template. */ export type TemplateFormat = "f-string" | "mustache"; /** * Type that represents a node in a parsed format string. It can be either * a literal text or a variable name. */ export type ParsedTemplateNode = | { type: "literal"; text: string } | { type: "variable"; name: string }; /** * Alias for `ParsedTemplateNode` since it is the same for * both f-string and mustache templates. */ export type ParsedFStringNode = ParsedTemplateNode; export const parseFString = (template: string): ParsedTemplateNode[] => { // Core logic replicated from internals of pythons built in Formatter class. // https://github.com/python/cpython/blob/135ec7cefbaffd516b77362ad2b2ad1025af462e/Objects/stringlib/unicode_format.h#L700-L706 const chars = template.split(""); const nodes: ParsedTemplateNode[] = []; const nextBracket = (bracket: "}" | "{" | "{}", start: number) => { for (let i = start; i < chars.length; i += 1) { if (bracket.includes(chars[i])) { return i; } } return -1; }; let i = 0; while (i < chars.length) { if (chars[i] === "{" && i + 1 < chars.length && chars[i + 1] === "{") { nodes.push({ type: "literal", text: "{" }); i += 2; } else if ( chars[i] === "}" && i + 1 < chars.length && chars[i + 1] === "}" ) { nodes.push({ type: "literal", text: "}" }); i += 2; } else if (chars[i] === "{") { const j = nextBracket("}", i); if (j < 0) { throw new Error("Unclosed '{' in template."); } nodes.push({ type: "variable", name: chars.slice(i + 1, j).join(""), }); i = j + 1; } else if (chars[i] === "}") { throw new Error("Single '}' in template."); } else { const next = nextBracket("{}", i); const text = (next < 0 ? chars.slice(i) : chars.slice(i, next)).join(""); nodes.push({ type: "literal", text }); i = next < 0 ? chars.length : next; } } return nodes; }; /** * Convert the result of mustache.parse into an array of ParsedTemplateNode, * to make it compatible with other LangChain string parsing template formats. * * @param {mustache.TemplateSpans} template The result of parsing a mustache template with the mustache.js library. * @returns {ParsedTemplateNode[]} */ const mustacheTemplateToNodes = ( template: mustache.TemplateSpans ): ParsedTemplateNode[] => template.map((temp) => { if (temp[0] === "name") { const name = temp[1].includes(".") ? temp[1].split(".")[0] : temp[1]; return { type: "variable", name }; } else if (["#", "&", "^", ">"].includes(temp[0])) { // # represents a section, "&" represents an unescaped variable. // These should both be considered variables. return { type: "variable", name: temp[1] }; } else { return { type: "literal", text: temp[1] }; } }); export const parseMustache = (template: string) => { configureMustache(); const parsed = mustache.parse(template); return mustacheTemplateToNodes(parsed); }; export const interpolateFString = (template: string, values: InputValues) => { return parseFString(template).reduce((res, node) => { if (node.type === "variable") { if (node.name in values) { const stringValue = typeof values[node.name] === "string" ? values[node.name] : JSON.stringify(values[node.name]); return res + stringValue; } throw new Error(`(f-string) Missing value for input ${node.name}`); } return res + node.text; }, ""); }; export const interpolateMustache = (template: string, values: InputValues) => { configureMustache(); return mustache.render(template, values); }; /** * Type that represents a function that takes a template string and a set * of input values, and returns a string where all variables in the * template have been replaced with their corresponding values. */ type Interpolator = (template: string, values: InputValues) => string; /** * Type that represents a function that takes a template string and * returns an array of `ParsedTemplateNode`. */ type Parser = (template: string) => ParsedTemplateNode[]; export const DEFAULT_FORMATTER_MAPPING: Record<TemplateFormat, Interpolator> = { "f-string": interpolateFString, mustache: interpolateMustache, }; export const DEFAULT_PARSER_MAPPING: Record<TemplateFormat, Parser> = { "f-string": parseFString, mustache: parseMustache, }; export const renderTemplate = ( template: string, templateFormat: TemplateFormat, inputValues: InputValues ) => { try { return DEFAULT_FORMATTER_MAPPING[templateFormat](template, inputValues); } catch (e) { const error = addLangChainErrorFields(e, "INVALID_PROMPT_INPUT"); throw error; } }; export const parseTemplate = ( template: string, templateFormat: TemplateFormat ) => DEFAULT_PARSER_MAPPING[templateFormat](template); export const checkValidTemplate = ( template: MessageContent, templateFormat: TemplateFormat, inputVariables: string[] ) => { if (!(templateFormat in DEFAULT_FORMATTER_MAPPING)) { const validFormats = Object.keys(DEFAULT_FORMATTER_MAPPING); throw new Error(`Invalid template format. Got \`${templateFormat}\`; should be one of ${validFormats}`); } try { const dummyInputs: InputValues = inputVariables.reduce((acc, v) => { acc[v] = "foo"; return acc; }, {} as Record<string, string>); if (Array.isArray(template)) { template.forEach((message) => { if (message.type === "text") { renderTemplate(message.text, templateFormat, dummyInputs); } else if (message.type === "image_url") { if (typeof message.image_url === "string") { renderTemplate(message.image_url, templateFormat, dummyInputs); } else { const imageUrl = message.image_url.url; renderTemplate(imageUrl, templateFormat, dummyInputs); } } else { throw new Error( `Invalid message template received. ${JSON.stringify( message, null, 2 )}` ); } }); } else { renderTemplate(template, templateFormat, dummyInputs); } // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e: any) { throw new Error(`Invalid prompt schema: ${e.message}`); } };
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/prompts/serde.ts
import { MessageContent } from "../messages/index.js"; import type { TemplateFormat } from "./template.js"; /** * Represents a serialized version of a prompt template. This type is used * to create dynamic prompts for language models. It contains an optional * `_type` field which, if present, is set to 'prompt'. It also includes * `input_variables`, an array of strings representing the variables to be * used in the prompt, an optional `template_format` specifying the format * of the template, and an optional `template` which is the actual * template string. */ export type SerializedPromptTemplate = { _type?: "prompt"; input_variables: string[]; template_format?: TemplateFormat; template?: MessageContent; }; /** * Represents a serialized version of a few-shot template. This type * includes an `_type` field set to 'few_shot', `input_variables` which * are an array of strings representing the variables to be used in the * template, `examples` which can be a string or an array of Example * objects, an optional `example_prompt` which is a * SerializedPromptTemplate, `example_separator` which is a string, * optional `prefix` and `suffix` strings, and `template_format` which * specifies the format of the template. */ export type SerializedFewShotTemplate = { _type: "few_shot"; input_variables: string[]; // eslint-disable-next-line @typescript-eslint/no-explicit-any examples: string | any[]; example_prompt?: SerializedPromptTemplate; example_separator: string; prefix?: string; suffix?: string; template_format: TemplateFormat; }; /** * Represents a serialized version of a base prompt template. This type * can be either a SerializedFewShotTemplate or a * SerializedPromptTemplate. */ export type SerializedBasePromptTemplate = | SerializedFewShotTemplate | SerializedPromptTemplate;
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/prompts/index.ts
export * from "./base.js"; export * from "./chat.js"; export * from "./few_shot.js"; export * from "./pipeline.js"; export * from "./prompt.js"; export * from "./serde.js"; export * from "./string.js"; export * from "./template.js"; export * from "./image.js"; export * from "./structured.js";
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/prompts/few_shot.ts
import { BaseStringPromptTemplate } from "./string.js"; import type { BasePromptTemplateInput, TypedPromptInputValues, Example, } from "./base.js"; import type { BaseExampleSelector } from "../example_selectors/base.js"; import { type TemplateFormat, checkValidTemplate, renderTemplate, } from "./template.js"; import { PromptTemplate } from "./prompt.js"; import type { SerializedFewShotTemplate } from "./serde.js"; import type { InputValues, PartialValues } from "../utils/types/index.js"; import type { BaseMessage } from "../messages/index.js"; import { BaseChatPromptTemplate, type BaseMessagePromptTemplate, } from "./chat.js"; export interface FewShotPromptTemplateInput extends BasePromptTemplateInput<InputValues> { /** * Examples to format into the prompt. Exactly one of this or * {@link exampleSelector} must be * provided. */ examples?: Example[]; /** * An {@link BaseExampleSelector} Examples to format into the prompt. Exactly one of this or * {@link examples} must be * provided. */ exampleSelector?: BaseExampleSelector; /** * An {@link PromptTemplate} used to format a single example. */ examplePrompt: PromptTemplate; /** * String separator used to join the prefix, the examples, and suffix. */ exampleSeparator?: string; /** * A prompt template string to put before the examples. * * @defaultValue `""` */ prefix?: string; /** * A prompt template string to put after the examples. */ suffix?: string; /** * The format of the prompt template. Options are: 'f-string' */ templateFormat?: TemplateFormat; /** * Whether or not to try validating the template on initialization. */ validateTemplate?: boolean; } /** * Prompt template that contains few-shot examples. * @augments BasePromptTemplate * @augments FewShotPromptTemplateInput * @example * ```typescript * const examplePrompt = PromptTemplate.fromTemplate( * "Input: {input}\nOutput: {output}", * ); * * const exampleSelector = await SemanticSimilarityExampleSelector.fromExamples( * [ * { input: "happy", output: "sad" }, * { input: "tall", output: "short" }, * { input: "energetic", output: "lethargic" }, * { input: "sunny", output: "gloomy" }, * { input: "windy", output: "calm" }, * ], * new OpenAIEmbeddings(), * HNSWLib, * { k: 1 }, * ); * * const dynamicPrompt = new FewShotPromptTemplate({ * exampleSelector, * examplePrompt, * prefix: "Give the antonym of every input", * suffix: "Input: {adjective}\nOutput:", * inputVariables: ["adjective"], * }); * * // Format the dynamic prompt with the input 'rainy' * console.log(await dynamicPrompt.format({ adjective: "rainy" })); * * ``` */ export class FewShotPromptTemplate extends BaseStringPromptTemplate implements FewShotPromptTemplateInput { lc_serializable = false; examples?: InputValues[]; exampleSelector?: BaseExampleSelector | undefined; examplePrompt: PromptTemplate; suffix = ""; exampleSeparator = "\n\n"; prefix = ""; templateFormat: TemplateFormat = "f-string"; validateTemplate = true; constructor(input: FewShotPromptTemplateInput) { super(input); Object.assign(this, input); if (this.examples !== undefined && this.exampleSelector !== undefined) { throw new Error( "Only one of 'examples' and 'example_selector' should be provided" ); } if (this.examples === undefined && this.exampleSelector === undefined) { throw new Error( "One of 'examples' and 'example_selector' should be provided" ); } if (this.validateTemplate) { let totalInputVariables: string[] = this.inputVariables; if (this.partialVariables) { totalInputVariables = totalInputVariables.concat( Object.keys(this.partialVariables) ); } checkValidTemplate( this.prefix + this.suffix, this.templateFormat, totalInputVariables ); } } _getPromptType(): "few_shot" { return "few_shot"; } static lc_name() { return "FewShotPromptTemplate"; } private async getExamples( inputVariables: InputValues ): Promise<InputValues[]> { if (this.examples !== undefined) { return this.examples; } if (this.exampleSelector !== undefined) { return this.exampleSelector.selectExamples(inputVariables); } throw new Error( "One of 'examples' and 'example_selector' should be provided" ); } async partial<NewPartialVariableName extends string>( values: PartialValues<NewPartialVariableName> ) { const newInputVariables = this.inputVariables.filter( (iv) => !(iv in values) ); const newPartialVariables = { ...(this.partialVariables ?? {}), ...values, }; const promptDict = { ...this, inputVariables: newInputVariables, partialVariables: newPartialVariables, }; return new FewShotPromptTemplate(promptDict); } /** * Formats the prompt with the given values. * @param values The values to format the prompt with. * @returns A promise that resolves to a string representing the formatted prompt. */ async format(values: InputValues): Promise<string> { const allValues = await this.mergePartialAndUserVariables(values); const examples = await this.getExamples(allValues); const exampleStrings = await Promise.all( examples.map((example) => this.examplePrompt.format(example)) ); const template = [this.prefix, ...exampleStrings, this.suffix].join( this.exampleSeparator ); return renderTemplate(template, this.templateFormat, allValues); } serialize(): SerializedFewShotTemplate { if (this.exampleSelector || !this.examples) { throw new Error( "Serializing an example selector is not currently supported" ); } if (this.outputParser !== undefined) { throw new Error( "Serializing an output parser is not currently supported" ); } return { _type: this._getPromptType(), input_variables: this.inputVariables, example_prompt: this.examplePrompt.serialize(), example_separator: this.exampleSeparator, suffix: this.suffix, prefix: this.prefix, template_format: this.templateFormat, examples: this.examples, }; } static async deserialize( data: SerializedFewShotTemplate ): Promise<FewShotPromptTemplate> { const { example_prompt } = data; if (!example_prompt) { throw new Error("Missing example prompt"); } const examplePrompt = await PromptTemplate.deserialize(example_prompt); let examples: Example[]; if (Array.isArray(data.examples)) { examples = data.examples; } else { throw new Error( "Invalid examples format. Only list or string are supported." ); } return new FewShotPromptTemplate({ inputVariables: data.input_variables, examplePrompt, examples, exampleSeparator: data.example_separator, prefix: data.prefix, suffix: data.suffix, templateFormat: data.template_format, }); } } export interface FewShotChatMessagePromptTemplateInput extends BasePromptTemplateInput<InputValues> { /** * Examples to format into the prompt. Exactly one of this or * {@link exampleSelector} must be * provided. */ examples?: Example[]; /** * An {@link BaseMessagePromptTemplate} | {@link BaseChatPromptTemplate} used to format a single example. */ examplePrompt: BaseMessagePromptTemplate | BaseChatPromptTemplate; /** * String separator used to join the prefix, the examples, and suffix. * * @defaultValue `"\n\n"` */ exampleSeparator?: string; /** * An {@link BaseExampleSelector} Examples to format into the prompt. Exactly one of this or * {@link examples} must be * provided. */ exampleSelector?: BaseExampleSelector | undefined; /** * A prompt template string to put before the examples. * * @defaultValue `""` */ prefix?: string; /** * A prompt template string to put after the examples. * * @defaultValue `""` */ suffix?: string; /** * The format of the prompt template. Options are: 'f-string' * * @defaultValue `f-string` */ templateFormat?: TemplateFormat; /** * Whether or not to try validating the template on initialization. * * @defaultValue `true` */ validateTemplate?: boolean; } /** * Chat prompt template that contains few-shot examples. * @augments BasePromptTemplateInput * @augments FewShotChatMessagePromptTemplateInput */ export class FewShotChatMessagePromptTemplate< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any PartialVariableName extends string = any > extends BaseChatPromptTemplate implements FewShotChatMessagePromptTemplateInput { lc_serializable = true; examples?: InputValues[]; exampleSelector?: BaseExampleSelector | undefined; examplePrompt: BaseMessagePromptTemplate | BaseChatPromptTemplate; suffix = ""; exampleSeparator = "\n\n"; prefix = ""; templateFormat: TemplateFormat = "f-string"; validateTemplate = true; _getPromptType(): "few_shot_chat" { return "few_shot_chat"; } static lc_name() { return "FewShotChatMessagePromptTemplate"; } constructor(fields: FewShotChatMessagePromptTemplateInput) { super(fields); this.examples = fields.examples; this.examplePrompt = fields.examplePrompt; this.exampleSeparator = fields.exampleSeparator ?? "\n\n"; this.exampleSelector = fields.exampleSelector; this.prefix = fields.prefix ?? ""; this.suffix = fields.suffix ?? ""; this.templateFormat = fields.templateFormat ?? "f-string"; this.validateTemplate = fields.validateTemplate ?? true; if (this.examples !== undefined && this.exampleSelector !== undefined) { throw new Error( "Only one of 'examples' and 'example_selector' should be provided" ); } if (this.examples === undefined && this.exampleSelector === undefined) { throw new Error( "One of 'examples' and 'example_selector' should be provided" ); } if (this.validateTemplate) { let totalInputVariables: string[] = this.inputVariables; if (this.partialVariables) { totalInputVariables = totalInputVariables.concat( Object.keys(this.partialVariables) ); } checkValidTemplate( this.prefix + this.suffix, this.templateFormat, totalInputVariables ); } } private async getExamples( inputVariables: InputValues ): Promise<InputValues[]> { if (this.examples !== undefined) { return this.examples; } if (this.exampleSelector !== undefined) { return this.exampleSelector.selectExamples(inputVariables); } throw new Error( "One of 'examples' and 'example_selector' should be provided" ); } /** * Formats the list of values and returns a list of formatted messages. * @param values The values to format the prompt with. * @returns A promise that resolves to a string representing the formatted prompt. */ async formatMessages( values: TypedPromptInputValues<RunInput> ): Promise<BaseMessage[]> { const allValues = await this.mergePartialAndUserVariables(values); let examples = await this.getExamples(allValues); examples = examples.map((example) => { // eslint-disable-next-line @typescript-eslint/no-explicit-any const result: Record<string, any> = {}; this.examplePrompt.inputVariables.forEach((inputVariable) => { result[inputVariable] = example[inputVariable]; }); return result; }); const messages: BaseMessage[] = []; for (const example of examples) { const exampleMessages = await this.examplePrompt.formatMessages(example); messages.push(...exampleMessages); } return messages; } /** * Formats the prompt with the given values. * @param values The values to format the prompt with. * @returns A promise that resolves to a string representing the formatted prompt. */ async format(values: TypedPromptInputValues<RunInput>): Promise<string> { const allValues = await this.mergePartialAndUserVariables(values); const examples = await this.getExamples(allValues); const exampleMessages = await Promise.all( examples.map((example) => this.examplePrompt.formatMessages(example)) ); const exampleStrings = exampleMessages .flat() .map((message) => message.content); const template = [this.prefix, ...exampleStrings, this.suffix].join( this.exampleSeparator ); return renderTemplate(template, this.templateFormat, allValues); } /** * Partially formats the prompt with the given values. * @param values The values to partially format the prompt with. * @returns A promise that resolves to an instance of `FewShotChatMessagePromptTemplate` with the given values partially formatted. */ async partial( values: PartialValues<PartialVariableName> ): Promise<FewShotChatMessagePromptTemplate<RunInput, PartialVariableName>> { const newInputVariables = this.inputVariables.filter( (variable) => !(variable in values) ) as Exclude<Extract<keyof RunInput, string>, PartialVariableName>[]; const newPartialVariables = { ...(this.partialVariables ?? {}), ...values, } as PartialValues<PartialVariableName | PartialVariableName>; const promptDict = { ...this, inputVariables: newInputVariables, partialVariables: newPartialVariables, }; return new FewShotChatMessagePromptTemplate< InputValues<Exclude<Extract<keyof RunInput, string>, PartialVariableName>> >(promptDict); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/prompts/base.ts
// Default generic "any" values are for backwards compatibility. // Replace with "string" when we are comfortable with a breaking change. import type { InputValues, PartialValues, StringWithAutocomplete, } from "../utils/types/index.js"; import { type BasePromptValueInterface } from "../prompt_values.js"; import { BaseOutputParser } from "../output_parsers/index.js"; import type { SerializedFields } from "../load/map_keys.js"; import { Runnable } from "../runnables/base.js"; import { BaseCallbackConfig } from "../callbacks/manager.js"; import type { SerializedBasePromptTemplate } from "../prompts/serde.js"; export type TypedPromptInputValues<RunInput> = InputValues< StringWithAutocomplete<Extract<keyof RunInput, string>> >; export type Example = Record<string, string>; /** * Input common to all prompt templates. */ export interface BasePromptTemplateInput< // eslint-disable-next-line @typescript-eslint/no-explicit-any InputVariables extends InputValues = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any PartialVariableName extends string = any > { /** * A list of variable names the prompt template expects */ inputVariables: Array<Extract<keyof InputVariables, string>>; /** * How to parse the output of calling an LLM on this formatted prompt */ outputParser?: BaseOutputParser; /** Partial variables */ partialVariables?: PartialValues<PartialVariableName>; } /** * Base class for prompt templates. Exposes a format method that returns a * string prompt given a set of input values. */ export abstract class BasePromptTemplate< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, RunOutput extends BasePromptValueInterface = BasePromptValueInterface, // eslint-disable-next-line @typescript-eslint/no-explicit-any PartialVariableName extends string = any > extends Runnable<RunInput, RunOutput> implements BasePromptTemplateInput { declare PromptValueReturnType: RunOutput; lc_serializable = true; lc_namespace = ["langchain_core", "prompts", this._getPromptType()]; get lc_attributes(): SerializedFields | undefined { return { partialVariables: undefined, // python doesn't support this yet }; } inputVariables: Array<Extract<keyof RunInput, string>>; outputParser?: BaseOutputParser; partialVariables: PartialValues<PartialVariableName>; constructor(input: BasePromptTemplateInput) { super(input); const { inputVariables } = input; if (inputVariables.includes("stop")) { throw new Error( "Cannot have an input variable named 'stop', as it is used internally, please rename." ); } Object.assign(this, input); } abstract partial( values: PartialValues ): Promise<BasePromptTemplate<RunInput, RunOutput, PartialVariableName>>; /** * Merges partial variables and user variables. * @param userVariables The user variables to merge with the partial variables. * @returns A Promise that resolves to an object containing the merged variables. */ async mergePartialAndUserVariables( userVariables: TypedPromptInputValues<RunInput> ): Promise< InputValues<Extract<keyof RunInput, string> | PartialVariableName> > { const partialVariables = this.partialVariables ?? {}; const partialValues: Record<string, string> = {}; for (const [key, value] of Object.entries(partialVariables)) { if (typeof value === "string") { partialValues[key] = value; } else { partialValues[key] = await (value as () => Promise<string>)(); } } const allKwargs = { ...(partialValues as Record<PartialVariableName, string>), ...userVariables, }; return allKwargs; } /** * Invokes the prompt template with the given input and options. * @param input The input to invoke the prompt template with. * @param options Optional configuration for the callback. * @returns A Promise that resolves to the output of the prompt template. */ async invoke( input: RunInput, options?: BaseCallbackConfig ): Promise<RunOutput> { return this._callWithConfig( (input: RunInput) => this.formatPromptValue(input), input, { ...options, runType: "prompt" } ); } /** * Format the prompt given the input values. * * @param values - A dictionary of arguments to be passed to the prompt template. * @returns A formatted prompt string. * * @example * ```ts * prompt.format({ foo: "bar" }); * ``` */ abstract format(values: TypedPromptInputValues<RunInput>): Promise<string>; /** * Format the prompt given the input values and return a formatted prompt value. * @param values * @returns A formatted PromptValue. */ abstract formatPromptValue( values: TypedPromptInputValues<RunInput> ): Promise<RunOutput>; /** * Return the string type key uniquely identifying this class of prompt template. */ abstract _getPromptType(): string; /** * Return a json-like object representing this prompt template. * @deprecated */ serialize(): SerializedBasePromptTemplate { throw new Error("Use .toJSON() instead"); } /** * @deprecated * Load a prompt template from a json-like object describing it. * * @remarks * Deserializing needs to be async because templates (e.g. {@link FewShotPromptTemplate}) can * reference remote resources that we read asynchronously with a web * request. */ static async deserialize( data: SerializedBasePromptTemplate ): Promise< BasePromptTemplate<InputValues, BasePromptValueInterface, string> > { switch (data._type) { case "prompt": { const { PromptTemplate } = await import("./prompt.js"); return PromptTemplate.deserialize(data); } case undefined: { const { PromptTemplate } = await import("./prompt.js"); return PromptTemplate.deserialize({ ...data, _type: "prompt" }); } case "few_shot": { const { FewShotPromptTemplate } = await import("./few_shot.js"); return FewShotPromptTemplate.deserialize(data); } default: throw new Error( `Invalid prompt type in config: ${ (data as SerializedBasePromptTemplate)._type }` ); } } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/prompts/pipeline.ts
import type { InputValues, PartialValues } from "../utils/types/index.js"; import type { SerializedBasePromptTemplate } from "./serde.js"; import { BasePromptTemplate, type BasePromptTemplateInput } from "./base.js"; import { ChatPromptTemplate } from "./chat.js"; /** * Type that includes the name of the prompt and the prompt itself. */ export type PipelinePromptParams< PromptTemplateType extends BasePromptTemplate > = { name: string; prompt: PromptTemplateType; }; /** * Type that extends the BasePromptTemplateInput type, excluding the * inputVariables property. It includes an array of pipelinePrompts and a * finalPrompt. */ export type PipelinePromptTemplateInput< PromptTemplateType extends BasePromptTemplate > = Omit<BasePromptTemplateInput, "inputVariables"> & { pipelinePrompts: PipelinePromptParams<PromptTemplateType>[]; finalPrompt: PromptTemplateType; }; /** * Class that handles a sequence of prompts, each of which may require * different input variables. Includes methods for formatting these * prompts, extracting required input values, and handling partial * prompts. * @example * ```typescript * const composedPrompt = new PipelinePromptTemplate({ * pipelinePrompts: [ * { * name: "introduction", * prompt: PromptTemplate.fromTemplate(`You are impersonating {person}.`), * }, * { * name: "example", * prompt: PromptTemplate.fromTemplate( * `Here's an example of an interaction: * Q: {example_q} * A: {example_a}`, * ), * }, * { * name: "start", * prompt: PromptTemplate.fromTemplate( * `Now, do this for real! * Q: {input} * A:`, * ), * }, * ], * finalPrompt: PromptTemplate.fromTemplate( * `{introduction} * {example} * {start}`, * ), * }); * * const formattedPrompt = await composedPrompt.format({ * person: "Elon Musk", * example_q: `What's your favorite car?`, * example_a: "Tesla", * input: `What's your favorite social media site?`, * }); * ``` */ export class PipelinePromptTemplate< PromptTemplateType extends BasePromptTemplate > extends BasePromptTemplate { static lc_name() { return "PipelinePromptTemplate"; } pipelinePrompts: PipelinePromptParams<PromptTemplateType>[]; finalPrompt: PromptTemplateType; constructor(input: PipelinePromptTemplateInput<PromptTemplateType>) { super({ ...input, inputVariables: [] }); this.pipelinePrompts = input.pipelinePrompts; this.finalPrompt = input.finalPrompt; this.inputVariables = this.computeInputValues(); } /** * Computes the input values required by the pipeline prompts. * @returns Array of input values required by the pipeline prompts. */ protected computeInputValues() { const intermediateValues = this.pipelinePrompts.map( (pipelinePrompt) => pipelinePrompt.name ); const inputValues = this.pipelinePrompts .map((pipelinePrompt) => pipelinePrompt.prompt.inputVariables.filter( (inputValue) => !intermediateValues.includes(inputValue) ) ) .flat(); return [...new Set(inputValues)]; } protected static extractRequiredInputValues( allValues: InputValues, requiredValueNames: string[] ) { return requiredValueNames.reduce((requiredValues, valueName) => { // eslint-disable-next-line no-param-reassign requiredValues[valueName] = allValues[valueName]; return requiredValues; }, {} as InputValues); } /** * Formats the pipeline prompts based on the provided input values. * @param values Input values to format the pipeline prompts. * @returns Promise that resolves with the formatted input values. */ protected async formatPipelinePrompts( values: InputValues ): Promise<InputValues> { const allValues = await this.mergePartialAndUserVariables(values); for (const { name: pipelinePromptName, prompt: pipelinePrompt } of this .pipelinePrompts) { const pipelinePromptInputValues = PipelinePromptTemplate.extractRequiredInputValues( allValues, pipelinePrompt.inputVariables ); // eslint-disable-next-line no-instanceof/no-instanceof if (pipelinePrompt instanceof ChatPromptTemplate) { allValues[pipelinePromptName] = await pipelinePrompt.formatMessages( pipelinePromptInputValues ); } else { allValues[pipelinePromptName] = await pipelinePrompt.format( pipelinePromptInputValues ); } } return PipelinePromptTemplate.extractRequiredInputValues( allValues, this.finalPrompt.inputVariables ); } /** * Formats the final prompt value based on the provided input values. * @param values Input values to format the final prompt value. * @returns Promise that resolves with the formatted final prompt value. */ async formatPromptValue( values: InputValues ): Promise<PromptTemplateType["PromptValueReturnType"]> { return this.finalPrompt.formatPromptValue( await this.formatPipelinePrompts(values) ); } async format(values: InputValues): Promise<string> { return this.finalPrompt.format(await this.formatPipelinePrompts(values)); } /** * Handles partial prompts, which are prompts that have been partially * filled with input values. * @param values Partial input values. * @returns Promise that resolves with a new PipelinePromptTemplate instance with updated input variables. */ async partial( values: PartialValues ): Promise<PipelinePromptTemplate<PromptTemplateType>> { const promptDict = { ...this }; promptDict.inputVariables = this.inputVariables.filter( (iv) => !(iv in values) ); promptDict.partialVariables = { ...(this.partialVariables ?? {}), ...values, }; return new PipelinePromptTemplate<PromptTemplateType>(promptDict); } serialize(): SerializedBasePromptTemplate { throw new Error("Not implemented."); } _getPromptType(): string { return "pipeline"; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/prompts/prompt.ts
// Default generic "any" values are for backwards compatibility. // Replace with "string" when we are comfortable with a breaking change. import { BaseStringPromptTemplate } from "./string.js"; import type { BasePromptTemplateInput, TypedPromptInputValues, } from "./base.js"; import { checkValidTemplate, parseTemplate, renderTemplate, type TemplateFormat, } from "./template.js"; import type { SerializedPromptTemplate } from "./serde.js"; import type { InputValues, PartialValues } from "../utils/types/index.js"; import { MessageContent, MessageContentComplex } from "../messages/index.js"; /** * Inputs to create a {@link PromptTemplate} * @augments BasePromptTemplateInput */ export interface PromptTemplateInput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any PartialVariableName extends string = any, Format extends TemplateFormat = TemplateFormat > extends BasePromptTemplateInput<RunInput, PartialVariableName> { /** * The prompt template */ template: MessageContent; /** * The format of the prompt template. Options are "f-string" and "mustache" */ templateFormat?: Format; /** * Whether or not to try validating the template on initialization * * @defaultValue `true` */ validateTemplate?: boolean; /** * Additional fields which should be included inside * the message content array if using a complex message * content. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any additionalContentFields?: MessageContentComplex; } type NonAlphanumeric = | " " | "\t" | "\n" | "\r" | '"' | "'" | "{" | "[" | "(" | "`" | ":" | ";"; /** * Recursive type to extract template parameters from a string. * @template T - The input string. * @template Result - The resulting array of extracted template parameters. */ type ExtractTemplateParamsRecursive< T extends string, Result extends string[] = [] > = T extends `${string}{${infer Param}}${infer Rest}` ? Param extends `${NonAlphanumeric}${string}` ? ExtractTemplateParamsRecursive<Rest, Result> // for non-template variables that look like template variables e.g. see https://github.com/langchain-ai/langchainjs/blob/main/langchain/src/chains/query_constructor/prompt.ts : ExtractTemplateParamsRecursive<Rest, [...Result, Param]> : Result; export type ParamsFromFString<T extends string> = { [Key in | ExtractTemplateParamsRecursive<T>[number] // eslint-disable-next-line @typescript-eslint/no-explicit-any | (string & Record<never, never>)]: any; }; export type ExtractedFStringParams< T extends string, // eslint-disable-next-line @typescript-eslint/ban-types RunInput extends InputValues = Symbol // eslint-disable-next-line @typescript-eslint/ban-types > = RunInput extends Symbol ? ParamsFromFString<T> : RunInput; /** * Schema to represent a basic prompt for an LLM. * @augments BasePromptTemplate * @augments PromptTemplateInput * * @example * ```ts * import { PromptTemplate } from "langchain/prompts"; * * const prompt = new PromptTemplate({ * inputVariables: ["foo"], * template: "Say {foo}", * }); * ``` */ export class PromptTemplate< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends InputValues = any, // eslint-disable-next-line @typescript-eslint/no-explicit-any PartialVariableName extends string = any > extends BaseStringPromptTemplate<RunInput, PartialVariableName> implements PromptTemplateInput<RunInput, PartialVariableName> { static lc_name() { return "PromptTemplate"; } template: MessageContent; templateFormat: TemplateFormat = "f-string"; validateTemplate = true; /** * Additional fields which should be included inside * the message content array if using a complex message * content. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any additionalContentFields?: MessageContentComplex; constructor(input: PromptTemplateInput<RunInput, PartialVariableName>) { super(input); // If input is mustache and validateTemplate is not defined, set it to false if ( input.templateFormat === "mustache" && input.validateTemplate === undefined ) { this.validateTemplate = false; } Object.assign(this, input); if (this.validateTemplate) { if (this.templateFormat === "mustache") { throw new Error("Mustache templates cannot be validated."); } let totalInputVariables: string[] = this.inputVariables; if (this.partialVariables) { totalInputVariables = totalInputVariables.concat( Object.keys(this.partialVariables) ); } checkValidTemplate( this.template, this.templateFormat, totalInputVariables ); } } _getPromptType(): "prompt" { return "prompt"; } /** * Formats the prompt template with the provided values. * @param values The values to be used to format the prompt template. * @returns A promise that resolves to a string which is the formatted prompt. */ async format(values: TypedPromptInputValues<RunInput>): Promise<string> { const allValues = await this.mergePartialAndUserVariables(values); return renderTemplate( this.template as string, this.templateFormat, allValues ); } /** * Take examples in list format with prefix and suffix to create a prompt. * * Intended to be used a a way to dynamically create a prompt from examples. * * @param examples - List of examples to use in the prompt. * @param suffix - String to go after the list of examples. Should generally set up the user's input. * @param inputVariables - A list of variable names the final prompt template will expect * @param exampleSeparator - The separator to use in between examples * @param prefix - String that should go before any examples. Generally includes examples. * * @returns The final prompt template generated. */ static fromExamples( examples: string[], suffix: string, inputVariables: string[], exampleSeparator = "\n\n", prefix = "" ) { const template = [prefix, ...examples, suffix].join(exampleSeparator); return new PromptTemplate({ inputVariables, template, }); } /** * Load prompt template from a template f-string */ static fromTemplate< // eslint-disable-next-line @typescript-eslint/ban-types RunInput extends InputValues = Symbol, T extends string = string >( template: T, options?: Omit< PromptTemplateInput<RunInput, string, "f-string">, "template" | "inputVariables" > ): PromptTemplate<ExtractedFStringParams<T, RunInput>>; static fromTemplate< // eslint-disable-next-line @typescript-eslint/ban-types RunInput extends InputValues = Symbol, T extends string = string >( template: T, options?: Omit< PromptTemplateInput<RunInput, string>, "template" | "inputVariables" > ): PromptTemplate<ExtractedFStringParams<T, RunInput>>; static fromTemplate< // eslint-disable-next-line @typescript-eslint/ban-types RunInput extends InputValues = Symbol, T extends string = string >( template: T, options?: Omit< PromptTemplateInput<RunInput, string, "mustache">, "template" | "inputVariables" > ): PromptTemplate<InputValues>; static fromTemplate< // eslint-disable-next-line @typescript-eslint/ban-types RunInput extends InputValues = Symbol, T extends string = string >( template: T, options?: Omit< PromptTemplateInput<RunInput, string, TemplateFormat>, "template" | "inputVariables" > ): PromptTemplate<ExtractedFStringParams<T, RunInput> | InputValues> { const { templateFormat = "f-string", ...rest } = options ?? {}; const names = new Set<string>(); parseTemplate(template, templateFormat).forEach((node) => { if (node.type === "variable") { names.add(node.name); } }); return new PromptTemplate({ // Rely on extracted types // eslint-disable-next-line @typescript-eslint/no-explicit-any inputVariables: [...names] as any[], templateFormat, template, ...rest, }); } /** * Partially applies values to the prompt template. * @param values The values to be partially applied to the prompt template. * @returns A new instance of PromptTemplate with the partially applied values. */ async partial<NewPartialVariableName extends string>( values: PartialValues<NewPartialVariableName> ) { const newInputVariables = this.inputVariables.filter( (iv) => !(iv in values) ) as Exclude<Extract<keyof RunInput, string>, NewPartialVariableName>[]; const newPartialVariables = { ...(this.partialVariables ?? {}), ...values, } as PartialValues<PartialVariableName | NewPartialVariableName>; const promptDict = { ...this, inputVariables: newInputVariables, partialVariables: newPartialVariables, }; return new PromptTemplate< InputValues< Exclude<Extract<keyof RunInput, string>, NewPartialVariableName> > >(promptDict); } serialize(): SerializedPromptTemplate { if (this.outputParser !== undefined) { throw new Error( "Cannot serialize a prompt template with an output parser" ); } return { _type: this._getPromptType(), input_variables: this.inputVariables, template: this.template, template_format: this.templateFormat, }; } static async deserialize( data: SerializedPromptTemplate ): Promise<PromptTemplate> { if (!data.template) { throw new Error("Prompt template must have a template"); } const res = new PromptTemplate({ inputVariables: data.input_variables, template: data.template, templateFormat: data.template_format, }); return res; } // TODO(from file) }
0
lc_public_repos/langchainjs/langchain-core/src/prompts
lc_public_repos/langchainjs/langchain-core/src/prompts/tests/prompt.mustache.test.ts
import { test, expect } from "@jest/globals"; import { PromptTemplate } from "../prompt.js"; import { parseTemplate } from "../template.js"; test("Single input variable.", async () => { const template = "This is a {{foo}} test."; const prompt = PromptTemplate.fromTemplate(template, { templateFormat: "mustache", }); const formattedPrompt = await prompt.format({ foo: "bar" }); expect(formattedPrompt).toBe("This is a bar test."); expect(prompt.inputVariables).toEqual(["foo"]); }); test("Multiple input variables.", async () => { const template = "This {{bar}} is a {{foo}} test."; const prompt = PromptTemplate.fromTemplate(template, { templateFormat: "mustache", }); const formattedPrompt = await prompt.format({ bar: "baz", foo: "bar" }); expect(formattedPrompt).toBe("This baz is a bar test."); expect(prompt.inputVariables).toEqual(["bar", "foo"]); }); test("Multiple input variables with repeats.", async () => { const template = "This {{bar}} is a {{foo}} test {{foo}}."; const prompt = PromptTemplate.fromTemplate(template, { templateFormat: "mustache", }); const formattedPrompt = await prompt.format({ bar: "baz", foo: "bar" }); expect(formattedPrompt).toBe("This baz is a bar test bar."); expect(prompt.inputVariables).toEqual(["bar", "foo"]); }); test("Ignores f-string inputs input variables with repeats.", async () => { const template = "This {bar} is a {foo} test {foo}."; const prompt = PromptTemplate.fromTemplate(template, { templateFormat: "mustache", }); const formattedPrompt = await prompt.format({ bar: "baz", foo: "bar" }); expect(formattedPrompt).toBe("This {bar} is a {foo} test {foo}."); expect(prompt.inputVariables).toEqual([]); }); test("Nested variables.", async () => { const template = "This {{obj.bar}} is a {{obj.foo}} test {{foo.bar.baz}}. Single: {{single}}"; const prompt = PromptTemplate.fromTemplate(template, { templateFormat: "mustache", }); const formattedPrompt = await prompt.format({ obj: { bar: "foo", foo: "bar" }, foo: { bar: { baz: "baz", }, }, single: "one", }); expect(formattedPrompt).toBe("This foo is a bar test baz. Single: one"); expect(prompt.inputVariables).toEqual(["obj", "foo", "single"]); }); test("section/context variables", async () => { const template = `This{{#foo}} {{bar}} {{/foo}}is a test.`; const prompt = PromptTemplate.fromTemplate(template, { templateFormat: "mustache", }); const formattedPrompt = await prompt.format({ foo: { bar: "yo" } }); expect(formattedPrompt).toEqual(`This yo is a test.`); expect(prompt.inputVariables).toEqual(["foo"]); }); test("section/context variables with repeats", async () => { const template = `This{{#foo}} {{bar}} {{/foo}}is a test.`; const promptWithRepeats = PromptTemplate.fromTemplate(template, { templateFormat: "mustache", }); const formattedPrompt = await promptWithRepeats.format({ foo: [{ bar: "yo" }, { bar: "hello" }], }); expect(formattedPrompt).toEqual(`This yo hello is a test.`); expect(promptWithRepeats.inputVariables).toEqual(["foo"]); }); test("Escaped variables", async () => { const template = `test: {{{text}}}`; const parsed = parseTemplate(template, "mustache"); expect(parsed[0]).toStrictEqual({ type: "literal", text: "test: ", }); expect(parsed[1]).toStrictEqual({ type: "variable", name: "text", }); const promptTemplate = PromptTemplate.fromTemplate(template, { templateFormat: "mustache", }); const result = await promptTemplate.invoke({ text: `hello i have a "quote`, }); expect(result.value).toBe(`test: hello i have a "quote`); });
0
lc_public_repos/langchainjs/langchain-core/src/prompts
lc_public_repos/langchainjs/langchain-core/src/prompts/tests/prompt.test.ts
import { expect, test } from "@jest/globals"; import { PromptTemplate } from "../prompt.js"; import { Document } from "../../documents/document.js"; test("Test using partial", async () => { const prompt = new PromptTemplate({ template: "{foo}{bar}", inputVariables: ["foo"], partialVariables: { bar: "baz" }, }); expect(await prompt.format({ foo: "foo" })).toBe("foobaz"); }); test("Test using partial with an extra variable", async () => { const prompt = new PromptTemplate({ template: "{foo}{bar}", inputVariables: ["foo"], partialVariables: { bar: "baz" }, }); expect(await prompt.format({ foo: "foo", unused: "nada" })).toBe("foobaz"); }); test("Test fromTemplate", async () => { const prompt = PromptTemplate.fromTemplate("{foo}{bar}"); expect( (await prompt.invoke({ foo: "foo", bar: "baz", unused: "eee" })).value ).toBe("foobaz"); }); test("Test fromTemplate with a non-string value", async () => { const prompt = PromptTemplate.fromTemplate("{foo}{bar}"); expect( ( await prompt.invoke({ foo: ["barbar"], bar: [new Document({ pageContent: "bar" })], }) ).value ).toBe(`["barbar"][{"pageContent":"bar","metadata":{}}]`); }); test("Test fromTemplate with escaped strings", async () => { const prompt = PromptTemplate.fromTemplate("{{foo}}{{bar}}"); expect(await prompt.format({ unused: "eee" })).toBe("{foo}{bar}"); }); test("Test fromTemplate with type parameter", async () => { const prompt = PromptTemplate.fromTemplate<{ foo: string }>("test"); // @ts-expect-error TS compiler should flag expect(await prompt.format({ unused: "eee" })).toBe("test"); }); test("Test fromTemplate with missing variable should raise compiler error", async () => { const prompt = PromptTemplate.fromTemplate("{foo}"); await expect(async () => { // @ts-expect-error TS compiler should flag missing variable await prompt.format({ unused: "eee" }); }).rejects.toThrow(); await expect(async () => { // @ts-expect-error TS compiler should flag missing variable await prompt.invoke({ unused: "eee" }); }).rejects.toThrow(); }); test("Test fromTemplate with extra variable should work", async () => { const prompt = PromptTemplate.fromTemplate("{foo}"); expect(await prompt.format({ foo: "test", unused: "eee" })).toBe("test"); expect((await prompt.invoke({ foo: "test", unused: "eee" })).value).toBe( "test" ); }); test("Test using full partial", async () => { const prompt = new PromptTemplate({ template: "{foo}{bar}", inputVariables: [], partialVariables: { bar: "baz", foo: "boo" }, }); expect(await prompt.format({})).toBe("boobaz"); }); test("Test partial", async () => { const prompt = new PromptTemplate({ template: "{foo}{bar}", inputVariables: ["foo", "bar"], }); expect(prompt.inputVariables).toEqual(["foo", "bar"]); const partialPrompt = await prompt.partial({ foo: "foo" }); // original prompt is not modified expect(prompt.inputVariables).toEqual(["foo", "bar"]); // partial prompt has only remaining variables expect(partialPrompt.inputVariables).toEqual(["bar"]); expect(await partialPrompt.format({ bar: "baz" })).toBe("foobaz"); }); test("Test partial with function", async () => { const prompt = new PromptTemplate({ template: "{foo}{bar}", inputVariables: ["foo", "bar"], }); const partialPrompt = await prompt.partial({ foo: () => Promise.resolve("boo"), }); expect(await partialPrompt.format({ bar: "baz" })).toBe("boobaz"); });
0
lc_public_repos/langchainjs/langchain-core/src/prompts
lc_public_repos/langchainjs/langchain-core/src/prompts/tests/pipeline.test.ts
import { expect, test } from "@jest/globals"; import { PromptTemplate } from "../prompt.js"; import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, } from "../chat.js"; import { PipelinePromptTemplate } from "../pipeline.js"; test("Test pipeline input variables", async () => { const prompt = new PipelinePromptTemplate({ pipelinePrompts: [ { name: "bar", prompt: PromptTemplate.fromTemplate("{foo}"), }, ], finalPrompt: PromptTemplate.fromTemplate("{bar}"), }); expect(prompt.inputVariables).toEqual(["foo"]); }); test("Test simple pipeline", async () => { const prompt = new PipelinePromptTemplate({ pipelinePrompts: [ { name: "bar", prompt: PromptTemplate.fromTemplate("{foo}"), }, ], finalPrompt: PromptTemplate.fromTemplate("{bar}"), }); expect( await prompt.format({ foo: "jim", }) ).toEqual("jim"); }); test("Test multi variable pipeline", async () => { const prompt = new PipelinePromptTemplate({ pipelinePrompts: [ { name: "bar", prompt: PromptTemplate.fromTemplate("{foo}"), }, ], finalPrompt: PromptTemplate.fromTemplate("okay {bar} {baz}"), }); expect( await prompt.format({ foo: "jim", baz: "halpert", }) ).toEqual("okay jim halpert"); }); test("Test longer pipeline", async () => { const prompt = new PipelinePromptTemplate({ pipelinePrompts: [ { name: "bar", prompt: PromptTemplate.fromTemplate("{foo}"), }, { name: "qux", prompt: PromptTemplate.fromTemplate("hi {bar}"), }, ], finalPrompt: PromptTemplate.fromTemplate("okay {qux} {baz}"), }); expect( await prompt.format({ foo: "pam", baz: "beasley", }) ).toEqual("okay hi pam beasley"); }); test("Test with .partial", async () => { const prompt = new PipelinePromptTemplate({ pipelinePrompts: [ { name: "bar", prompt: PromptTemplate.fromTemplate("{foo}"), }, ], finalPrompt: PromptTemplate.fromTemplate("okay {bar} {baz}"), }); const partialPrompt = await prompt.partial({ baz: "schrute", }); expect( await partialPrompt.format({ foo: "dwight", }) ).toEqual("okay dwight schrute"); }); test("Test with chat prompts", async () => { const prompt = new PipelinePromptTemplate({ pipelinePrompts: [ { name: "foo", prompt: ChatPromptTemplate.fromMessages([ HumanMessagePromptTemplate.fromTemplate(`{name} halpert`), ]), }, ], finalPrompt: ChatPromptTemplate.fromMessages([ SystemMessagePromptTemplate.fromTemplate("What is your name?"), new MessagesPlaceholder("foo"), ]), }); const formattedPromptValue = await prompt.formatPromptValue({ name: "pam", }); expect(formattedPromptValue.messages[1].content).toEqual("pam halpert"); });
0
lc_public_repos/langchainjs/langchain-core/src/prompts
lc_public_repos/langchainjs/langchain-core/src/prompts/tests/structured.test.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ import { ZodType, ZodTypeDef } from "zod"; import { test, expect } from "@jest/globals"; import { StructuredOutputMethodParams, StructuredOutputMethodOptions, BaseLanguageModelInput, } from "../../language_models/base.js"; import { BaseMessage } from "../../messages/index.js"; import { Runnable, RunnableLambda } from "../../runnables/base.js"; import { RunnableConfig } from "../../runnables/config.js"; import { FakeListChatModel } from "../../utils/testing/index.js"; import { StructuredPrompt } from "../structured.js"; import { load } from "../../load/index.js"; class FakeStructuredChatModel extends FakeListChatModel { withStructuredOutput< RunOutput extends Record<string, any> = Record<string, any> >( _params: | Record<string, any> | StructuredOutputMethodParams<RunOutput, false> | ZodType<RunOutput, ZodTypeDef, RunOutput>, config?: StructuredOutputMethodOptions<false> | undefined ): Runnable<BaseLanguageModelInput, RunOutput, RunnableConfig>; withStructuredOutput< RunOutput extends Record<string, any> = Record<string, any> >( _params: | Record<string, any> | StructuredOutputMethodParams<RunOutput, true> | ZodType<RunOutput, ZodTypeDef, RunOutput>, config?: StructuredOutputMethodOptions<true> | undefined ): Runnable< BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }, RunnableConfig >; withStructuredOutput< RunOutput extends Record<string, any> = Record<string, any> >( _params: | Record<string, any> | StructuredOutputMethodParams<RunOutput, boolean> | ZodType<RunOutput, ZodTypeDef, RunOutput>, _config?: StructuredOutputMethodOptions<boolean> | undefined ): | Runnable<BaseLanguageModelInput, RunOutput, RunnableConfig> | Runnable< BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }, RunnableConfig > { if (!_config?.includeRaw) { if (typeof _params === "object") { const func = RunnableLambda.from( (_: BaseLanguageModelInput) => _params ); return func as any; } } throw new Error("Invalid schema"); } } test("Test format", async () => { const schema = { name: "yo", description: "a structured output", parameters: { name: { type: "string" }, value: { type: "integer" }, }, }; const prompt = StructuredPrompt.fromMessagesAndSchema( [["human", "I'm very structured, how about you?"]], schema ); const model = new FakeStructuredChatModel({ responses: [] }); const chain = prompt.pipe(model); await chain.invoke({}); await expect(chain.invoke({})).resolves.toEqual(schema); const revived: StructuredPrompt = await load(JSON.stringify(prompt)); expect(JSON.stringify(prompt)).toEqual( '{"lc":1,"type":"constructor","id":["langchain_core","prompts","structured","StructuredPrompt"],"kwargs":{"schema_":{"name":"yo","description":"a structured output","parameters":{"name":{"type":"string"},"value":{"type":"integer"}}},"input_variables":[],"messages":[{"lc":1,"type":"constructor","id":["langchain_core","prompts","chat","HumanMessagePromptTemplate"],"kwargs":{"prompt":{"lc":1,"type":"constructor","id":["langchain_core","prompts","prompt","PromptTemplate"],"kwargs":{"input_variables":[],"template_format":"f-string","template":"I\'m very structured, how about you?","schema":{"name":"yo","description":"a structured output","parameters":{"name":{"type":"string"},"value":{"type":"integer"}}}}}}}]}}' ); const revivedChain = revived.pipe(model); await expect(revivedChain.invoke({})).resolves.toEqual(schema); const boundModel = model.bind({ runName: "boundModel" }); const chainWithBoundModel = prompt.pipe(boundModel); await expect(chainWithBoundModel.invoke({})).resolves.toEqual(schema); });
0
lc_public_repos/langchainjs/langchain-core/src/prompts
lc_public_repos/langchainjs/langchain-core/src/prompts/tests/chat.test.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ import { expect, test } from "@jest/globals"; import { AIMessagePromptTemplate, ChatPromptTemplate, ChatMessagePromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, MessagesPlaceholder, } from "../chat.js"; import { PromptTemplate } from "../prompt.js"; import { SystemMessage, HumanMessage, AIMessage, ChatMessage, FunctionMessage, } from "../../messages/index.js"; import { Document } from "../../documents/document.js"; function createChatPromptTemplate() { const systemPrompt = new PromptTemplate({ template: "Here's some context: {context}", inputVariables: ["context"], }); const userPrompt = new PromptTemplate({ template: "Hello {foo}, I'm {bar}. Thanks for the {context}", inputVariables: ["foo", "bar", "context"], }); const aiPrompt = new PromptTemplate({ template: "I'm an AI. I'm {foo}. I'm {bar}.", inputVariables: ["foo", "bar"], }); const genericPrompt = new PromptTemplate({ template: "I'm a generic message. I'm {foo}. I'm {bar}.", inputVariables: ["foo", "bar"], }); // return new ChatPromptTemplate({ // promptMessages: [ // new SystemMessagePromptTemplate(systemPrompt), // new HumanMessagePromptTemplate(userPrompt), // new AIMessagePromptTemplate({ prompt: aiPrompt }), // new ChatMessagePromptTemplate(genericPrompt, "test"), // ], // inputVariables: ["context", "foo", "bar"], // }); return ChatPromptTemplate.fromMessages<{ foo: string; bar: string; context: string; }>([ new SystemMessagePromptTemplate(systemPrompt), new HumanMessagePromptTemplate(userPrompt), new AIMessagePromptTemplate({ prompt: aiPrompt }), new ChatMessagePromptTemplate(genericPrompt, "test"), ]); } test("Test format", async () => { const chatPrompt = createChatPromptTemplate(); const messages = await chatPrompt.formatPromptValue({ context: "This is a context", foo: "Foo", bar: "Bar", unused: "extra", }); expect(messages.toChatMessages()).toEqual([ new SystemMessage("Here's some context: This is a context"), new HumanMessage("Hello Foo, I'm Bar. Thanks for the This is a context"), new AIMessage("I'm an AI. I'm Foo. I'm Bar."), new ChatMessage("I'm a generic message. I'm Foo. I'm Bar.", "test"), ]); }); test("Test format with invalid input values", async () => { const chatPrompt = createChatPromptTemplate(); let error: any | undefined; try { // @ts-expect-error TS compiler should flag missing input variables await chatPrompt.formatPromptValue({ context: "This is a context", foo: "Foo", }); } catch (e) { error = e; } expect(error?.message).toContain("Missing value for input variable `bar`"); expect(error?.lc_error_code).toEqual("INVALID_PROMPT_INPUT"); }); test("Test format with invalid input variables", async () => { const systemPrompt = new PromptTemplate({ template: "Here's some context: {context}", inputVariables: ["context"], }); const userPrompt = new PromptTemplate({ template: "Hello {foo}, I'm {bar}", inputVariables: ["foo", "bar"], }); expect( () => new ChatPromptTemplate({ promptMessages: [ new SystemMessagePromptTemplate(systemPrompt), new HumanMessagePromptTemplate(userPrompt), ], inputVariables: ["context", "foo", "bar", "baz"], }) ).toThrow( "Input variables `baz` are not used in any of the prompt messages." ); expect( () => new ChatPromptTemplate({ promptMessages: [ new SystemMessagePromptTemplate(systemPrompt), new HumanMessagePromptTemplate(userPrompt), ], inputVariables: ["context", "foo"], }) ).toThrow( "Input variables `bar` are used in prompt messages but not in the prompt template." ); }); test("Test fromTemplate", async () => { const chatPrompt = ChatPromptTemplate.fromTemplate("Hello {foo}, I'm {bar}"); expect(chatPrompt.inputVariables).toEqual(["foo", "bar"]); const messages = await chatPrompt.formatPromptValue({ foo: "Foo", bar: "Bar", }); expect(messages.toChatMessages()).toEqual([ new HumanMessage("Hello Foo, I'm Bar"), ]); }); test("Test fromTemplate", async () => { const chatPrompt = ChatPromptTemplate.fromTemplate("Hello {foo}, I'm {bar}"); expect(chatPrompt.inputVariables).toEqual(["foo", "bar"]); expect( ( await chatPrompt.invoke({ foo: ["barbar"], bar: [new Document({ pageContent: "bar" })], }) ).toChatMessages() ).toEqual([ new HumanMessage( `Hello ["barbar"], I'm [{"pageContent":"bar","metadata":{}}]` ), ]); }); test("Test fromMessages", async () => { const systemPrompt = new PromptTemplate({ template: "Here's some context: {context}", inputVariables: ["context"], }); const userPrompt = new PromptTemplate({ template: "Hello {foo}, I'm {bar}", inputVariables: ["foo", "bar"], }); // TODO: Fix autocomplete for the fromMessages method const chatPrompt = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(systemPrompt), new HumanMessagePromptTemplate(userPrompt), ]); expect(chatPrompt.inputVariables).toEqual(["context", "foo", "bar"]); const messages = await chatPrompt.formatPromptValue({ context: "This is a context", foo: "Foo", bar: "Bar", }); expect(messages.toChatMessages()).toEqual([ new SystemMessage("Here's some context: This is a context"), new HumanMessage("Hello Foo, I'm Bar"), ]); }); test("Test fromMessages with non-string inputs", async () => { const systemPrompt = new PromptTemplate({ template: "Here's some context: {context}", inputVariables: ["context"], }); const userPrompt = new PromptTemplate({ template: "Hello {foo}, I'm {bar}", inputVariables: ["foo", "bar"], }); // TODO: Fix autocomplete for the fromMessages method const chatPrompt = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(systemPrompt), new HumanMessagePromptTemplate(userPrompt), ]); expect(chatPrompt.inputVariables).toEqual(["context", "foo", "bar"]); const messages = await chatPrompt.formatPromptValue({ context: [new Document({ pageContent: "bar" })], foo: "Foo", bar: "Bar", }); expect(messages.toChatMessages()).toEqual([ new SystemMessage( `Here's some context: [{"pageContent":"bar","metadata":{}}]` ), new HumanMessage("Hello Foo, I'm Bar"), ]); }); test("Test fromMessages with a variety of ways to declare prompt messages", async () => { const systemPrompt = new PromptTemplate({ template: "Here's some context: {context}", inputVariables: ["context"], }); // TODO: Fix autocomplete for the fromMessages method const chatPrompt = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(systemPrompt), "Hello {foo}, I'm {bar}", ["assistant", "Nice to meet you, {bar}!"], ["human", "Thanks {foo}!!"], ]); const messages = await chatPrompt.formatPromptValue({ context: "This is a context", foo: "Foo", bar: "Bar", }); expect(messages.toChatMessages()).toEqual([ new SystemMessage("Here's some context: This is a context"), new HumanMessage("Hello Foo, I'm Bar"), new AIMessage("Nice to meet you, Bar!"), new HumanMessage("Thanks Foo!!"), ]); }); test("Test fromMessages with an extra input variable", async () => { const systemPrompt = new PromptTemplate({ template: "Here's some context: {context}", inputVariables: ["context"], }); const userPrompt = new PromptTemplate({ template: "Hello {foo}, I'm {bar}", inputVariables: ["foo", "bar"], }); // TODO: Fix autocomplete for the fromMessages method const chatPrompt = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(systemPrompt), new HumanMessagePromptTemplate(userPrompt), ]); expect(chatPrompt.inputVariables).toEqual(["context", "foo", "bar"]); const messages = await chatPrompt.formatPromptValue({ context: "This is a context", foo: "Foo", bar: "Bar", unused: "No problemo!", }); expect(messages.toChatMessages()).toEqual([ new SystemMessage("Here's some context: This is a context"), new HumanMessage("Hello Foo, I'm Bar"), ]); }); test("Test fromMessages is composable", async () => { const systemPrompt = new PromptTemplate({ template: "Here's some context: {context}", inputVariables: ["context"], }); const userPrompt = new PromptTemplate({ template: "Hello {foo}, I'm {bar}", inputVariables: ["foo", "bar"], }); const chatPromptInner = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(systemPrompt), new HumanMessagePromptTemplate(userPrompt), ]); const chatPrompt = ChatPromptTemplate.fromMessages([ chatPromptInner, AIMessagePromptTemplate.fromTemplate("I'm an AI. I'm {foo}. I'm {bar}."), ]); expect(chatPrompt.inputVariables).toEqual(["context", "foo", "bar"]); const messages = await chatPrompt.formatPromptValue({ context: "This is a context", foo: "Foo", bar: "Bar", }); expect(messages.toChatMessages()).toEqual([ new SystemMessage("Here's some context: This is a context"), new HumanMessage("Hello Foo, I'm Bar"), new AIMessage("I'm an AI. I'm Foo. I'm Bar."), ]); }); test("Test fromMessages is composable with partial vars", async () => { const systemPrompt = new PromptTemplate({ template: "Here's some context: {context}", inputVariables: ["context"], }); const userPrompt = new PromptTemplate({ template: "Hello {foo}, I'm {bar}", inputVariables: ["foo", "bar"], }); const chatPromptInner = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(systemPrompt), new HumanMessagePromptTemplate(userPrompt), ]); const chatPrompt = ChatPromptTemplate.fromMessages([ await chatPromptInner.partial({ context: "This is a context", foo: "Foo", }), AIMessagePromptTemplate.fromTemplate("I'm an AI. I'm {foo}. I'm {bar}."), ]); expect(chatPrompt.inputVariables).toEqual(["bar"]); const messages = await chatPrompt.formatPromptValue({ bar: "Bar", }); expect(messages.toChatMessages()).toEqual([ new SystemMessage("Here's some context: This is a context"), new HumanMessage("Hello Foo, I'm Bar"), new AIMessage("I'm an AI. I'm Foo. I'm Bar."), ]); }); test("Test SimpleMessagePromptTemplate", async () => { const prompt = new MessagesPlaceholder("foo"); const values = { foo: [new HumanMessage("Hello Foo, I'm Bar")] }; const messages = await prompt.formatMessages(values); expect(messages).toEqual([new HumanMessage("Hello Foo, I'm Bar")]); }); test("Test MessagesPlaceholder optional", async () => { const prompt = new MessagesPlaceholder({ variableName: "foo", optional: true, }); // eslint-disable-next-line @typescript-eslint/no-explicit-any const messages = await prompt.formatMessages({} as any); expect(messages).toEqual([]); }); test("Test MessagesPlaceholder optional in a chat prompt template", async () => { const prompt = ChatPromptTemplate.fromMessages([ new MessagesPlaceholder({ variableName: "foo", optional: true, }), ]); const messages = await prompt.formatMessages({}); expect(messages).toEqual([]); }); test("Test MessagesPlaceholder not optional", async () => { const prompt = new MessagesPlaceholder({ variableName: "foo", }); // eslint-disable-next-line @typescript-eslint/no-explicit-any await expect(prompt.formatMessages({} as any)).rejects.toThrow( 'Field "foo" in prompt uses a MessagesPlaceholder, which expects an array of BaseMessages as an input value. Received: undefined' ); }); test("Test MessagesPlaceholder not optional with invalid input should throw", async () => { const prompt = new MessagesPlaceholder({ variableName: "foo", }); const badInput = [new Document({ pageContent: "barbar", metadata: {} })]; await expect( prompt.formatMessages({ foo: [new Document({ pageContent: "barbar", metadata: {} })], }) ).rejects.toThrow( `Field "foo" in prompt uses a MessagesPlaceholder, which expects an array of BaseMessages or coerceable values as input.\n\nReceived value: ${JSON.stringify( badInput, null, 2 )}\n\nAdditional message: Unable to coerce message from array: only human, AI, system, or tool message coercion is currently supported.` ); }); test("Test MessagesPlaceholder shorthand in a chat prompt template should throw for invalid syntax", async () => { expect(() => ChatPromptTemplate.fromMessages([["placeholder", "foo"]]) ).toThrow(); }); test("Test MessagesPlaceholder shorthand in a chat prompt template", async () => { const prompt = ChatPromptTemplate.fromMessages([["placeholder", "{foo}"]]); const messages = await prompt.formatMessages({ foo: [new HumanMessage("Hi there!"), new AIMessage("how r u")], }); expect(messages).toEqual([ new HumanMessage("Hi there!"), new AIMessage("how r u"), ]); }); test("Test MessagesPlaceholder shorthand in a chat prompt template with object format", async () => { const prompt = ChatPromptTemplate.fromMessages([["placeholder", "{foo}"]]); const messages = await prompt.formatMessages({ foo: [ { type: "system", content: "some initial content", }, { type: "human", content: [ { text: "page: 1\ndescription: One Purchase Flow\ntimestamp: '2024-06-04T14:46:46.062Z'\ntype: navigate\nscreenshot_present: true\n", type: "text", }, { text: "page: 3\ndescription: intent_str=buy,mode_str=redirect,screenName_str=order-completed,\ntimestamp: '2024-06-04T14:46:58.846Z'\ntype: Screen View\nscreenshot_present: false\n", type: "text", }, ], }, { type: "assistant", content: "some captivating response", }, ], }); expect(messages).toEqual([ new SystemMessage("some initial content"), new HumanMessage({ content: [ { text: "page: 1\ndescription: One Purchase Flow\ntimestamp: '2024-06-04T14:46:46.062Z'\ntype: navigate\nscreenshot_present: true\n", type: "text", }, { text: "page: 3\ndescription: intent_str=buy,mode_str=redirect,screenName_str=order-completed,\ntimestamp: '2024-06-04T14:46:58.846Z'\ntype: Screen View\nscreenshot_present: false\n", type: "text", }, ], }), new AIMessage("some captivating response"), ]); }); test("Test MessagesPlaceholder with invalid shorthand should throw", async () => { const prompt = ChatPromptTemplate.fromMessages([["placeholder", "{foo}"]]); await expect(() => prompt.formatMessages({ foo: [{ badFormatting: true }], }) ).rejects.toThrow(); }); test("Test using partial", async () => { const userPrompt = new PromptTemplate({ template: "{foo}{bar}", inputVariables: ["foo", "bar"], }); const prompt = new ChatPromptTemplate({ promptMessages: [new HumanMessagePromptTemplate(userPrompt)], inputVariables: ["foo", "bar"], }); const partialPrompt = await prompt.partial({ foo: "foo" }); // original prompt is not modified expect(prompt.inputVariables).toEqual(["foo", "bar"]); // partial prompt has only remaining variables expect(partialPrompt.inputVariables).toEqual(["bar"]); expect(await partialPrompt.format({ bar: "baz" })).toMatchInlineSnapshot( `"Human: foobaz"` ); }); test("Test BaseMessage", async () => { const prompt = ChatPromptTemplate.fromMessages([ new SystemMessage("You are a chatbot {mock_variable}"), AIMessagePromptTemplate.fromTemplate("{name} is my name."), new FunctionMessage({ content: "{}", name: "get_weather" }), ]); const messages = await prompt.formatPromptValue({ name: "Bob" }); expect(prompt.inputVariables).toEqual(["name"]); expect(prompt.partialVariables).toEqual({}); expect(messages.toChatMessages()).toEqual([ new SystemMessage("You are a chatbot {mock_variable}"), new AIMessage("Bob is my name."), new FunctionMessage({ content: "{}", name: "get_weather" }), ]); }); test("Throws if trying to pass non BaseMessage inputs to MessagesPlaceholder", async () => { const prompt = ChatPromptTemplate.fromMessages([ ["system", "some string"], new MessagesPlaceholder("chatHistory"), ["human", "{question}"], ]); const value = "this is not a valid input type!"; try { await prompt.formatMessages({ chatHistory: value, question: "What is the meaning of life?", }); } catch (e) { // eslint-disable-next-line no-instanceof/no-instanceof if (e instanceof Error) { expect(e.name).toBe("InputFormatError"); } else { throw e; } } }); test("Does not throws if null or undefined is passed as input to MessagesPlaceholder", async () => { const prompt = ChatPromptTemplate.fromMessages([ ["system", "some string"], new MessagesPlaceholder("chatHistory"), new MessagesPlaceholder("chatHistory2"), ["human", "{question}"], ]); const value1 = null; const value2 = undefined; try { await prompt.formatMessages({ chatHistory: value1, chatHistory2: value2, question: "What is the meaning of life?", }); } catch (e) { // eslint-disable-next-line no-instanceof/no-instanceof if (e instanceof Error) { expect(e.name).toBe("InputFormatError"); } else { throw e; } } }); test("Multi part chat prompt template", async () => { const name = "Bob"; const objectName = "chair"; const template = ChatPromptTemplate.fromMessages([ ["system", "You are an AI assistant named {name}"], [ "human", [ { type: "text", text: "What is in this object {objectName}", }, ], ], ]); const messages = await template.formatMessages({ name, objectName, }); expect(messages).toEqual([ new SystemMessage("You are an AI assistant named Bob"), new HumanMessage({ content: [ { type: "text", text: "What is in this object chair", }, ], }), ]); }); test("Multi part chat prompt template with image", async () => { const name = "Bob"; const objectName = "chair"; const myImage = "iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAA"; const myUrl = "https://www.example.com/image.png"; const template = ChatPromptTemplate.fromMessages([ ["system", "You are an AI assistant named {name}"], [ "human", [ { type: "image_url", image_url: "data:image/jpeg;base64,{myImage}", }, { type: "text", text: "What is in this object {objectName}", }, { type: "image_url", image_url: { url: "{myUrl}", detail: "high", }, }, ], ], ]); const messages = await template.formatMessages({ name, objectName, myImage, myUrl, }); expect(messages).toEqual([ new SystemMessage("You are an AI assistant named Bob"), new HumanMessage({ content: [ { type: "image_url", image_url: { url: `data:image/jpeg;base64,${myImage}`, }, }, { type: "text", text: `What is in this object ${objectName}`, }, { type: "image_url", image_url: { url: `${myUrl}`, detail: "high", }, }, ], }), ]); }); test("Multi-modal, multi part chat prompt works with instances of BaseMessage", async () => { const name = "Bob"; const objectName = "chair"; const myImage = "iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAA"; const myUrl = "https://www.example.com/image.png"; const inlineImageUrl = new HumanMessage({ content: [ { type: "image_url", image_url: "data:image/jpeg;base64,{myImage}", }, ], }); const objectImageUrl = new HumanMessage({ content: [ { type: "image_url", image_url: { url: "data:image/jpeg;base64,{myImage}", detail: "high", }, }, ], }); const normalMessage = new HumanMessage({ content: [ { type: "text", text: "What is in this object {objectName}", }, ], }); const template = ChatPromptTemplate.fromMessages([ ["system", "You are an AI assistant named {name}"], inlineImageUrl, normalMessage, objectImageUrl, [ "human", [ { type: "text", text: "What is in this object {objectName}", }, { type: "image_url", image_url: { url: "{myUrl}", detail: "high", }, }, ], ], ]); const messages = await template.formatMessages({ name, objectName, myImage, myUrl, }); expect(messages).toMatchSnapshot(); }); test("Format complex messages and keep additional fields", async () => { const examplePrompt = ChatPromptTemplate.fromMessages([ [ "human", [ { type: "text", text: "{input}", cache_control: { type: "ephemeral" }, }, ], ], [ "ai", [ { type: "text", text: "{output}", cache_control: { type: "ephemeral" }, }, ], ], ]); const formatted = await examplePrompt.formatMessages({ input: "hello", output: "ciao", }); expect(formatted).toHaveLength(2); expect(formatted[0]._getType()).toBe("human"); expect(formatted[0].content[0]).toHaveProperty("cache_control"); // eslint-disable-next-line @typescript-eslint/no-explicit-any expect((formatted[0].content[0] as any).cache_control).toEqual({ type: "ephemeral", }); expect(formatted[1]._getType()).toBe("ai"); expect(formatted[1].content[0]).toHaveProperty("cache_control"); // eslint-disable-next-line @typescript-eslint/no-explicit-any expect((formatted[1].content[0] as any).cache_control).toEqual({ type: "ephemeral", }); }); test("Format image content messages and keep additional fields", async () => { const examplePrompt = ChatPromptTemplate.fromMessages([ [ "human", [ { type: "image_url", image_url: "{image_url}", cache_control: { type: "ephemeral" }, }, ], ], ]); const formatted = await examplePrompt.formatMessages({ image_url: "image_url", }); expect(formatted).toHaveLength(1); expect(formatted[0]._getType()).toBe("human"); expect(formatted[0].content[0]).toHaveProperty("cache_control"); // eslint-disable-next-line @typescript-eslint/no-explicit-any expect((formatted[0].content[0] as any).cache_control).toEqual({ type: "ephemeral", }); });
0
lc_public_repos/langchainjs/langchain-core/src/prompts
lc_public_repos/langchainjs/langchain-core/src/prompts/tests/chat.mustache.test.ts
import { test, expect } from "@jest/globals"; import { AIMessage } from "../../messages/ai.js"; import { HumanMessage } from "../../messages/human.js"; import { SystemMessage } from "../../messages/system.js"; import { ChatPromptTemplate, HumanMessagePromptTemplate } from "../chat.js"; test("Test creating a chat prompt template from role string messages", async () => { const template = ChatPromptTemplate.fromMessages( [ ["system", "You are a helpful AI bot. Your name is {{name}}."], ["human", "Hello, how are you doing?"], ["ai", "I'm doing well, thanks!"], ["human", "{{userInput}}"], ], { templateFormat: "mustache", } ); const messages = await template.formatMessages({ name: "Bob", userInput: "What is your name?", }); expect(messages).toEqual([ new SystemMessage({ content: "You are a helpful AI bot. Your name is Bob.", }), new HumanMessage({ content: "Hello, how are you doing?", }), new AIMessage({ content: "I'm doing well, thanks!", }), new HumanMessage({ content: "What is your name?", }), ]); }); test("Multiple input variables with repeats.", async () => { const template = "This {{bar}} is a {{foo}} test {{foo}}."; const prompt = ChatPromptTemplate.fromTemplate(template, { templateFormat: "mustache", }); expect(prompt.inputVariables).toEqual(["bar", "foo"]); const formattedPrompt = await prompt.formatPromptValue({ bar: "baz", foo: "bar", }); expect(formattedPrompt.toChatMessages()).toEqual([ new HumanMessage("This baz is a bar test bar."), ]); }); test("Ignores f-string inputs input variables with repeats.", async () => { const template = "This {bar} is a {foo} test {foo}."; const prompt = ChatPromptTemplate.fromTemplate(template, { templateFormat: "mustache", }); expect(prompt.inputVariables).toEqual([]); const formattedPrompt = await prompt.formatPromptValue({ bar: "baz", foo: "bar", }); expect(formattedPrompt.toChatMessages()).toEqual([ new HumanMessage("This {bar} is a {foo} test {foo}."), ]); }); test("Mustache template with image and chat prompts inside one template (fromMessages)", async () => { const template = ChatPromptTemplate.fromMessages( [ [ "human", [ { type: "image_url", image_url: "{{image_url}}", }, { type: "text", text: "{{other_var}}", }, ], ], ["human", "hello {{name}}"], ], { templateFormat: "mustache", } ); const messages = await template.formatMessages({ name: "Bob", image_url: "https://foo.com/bar.png", other_var: "bar", }); expect(messages).toEqual([ new HumanMessage({ content: [ { type: "image_url", image_url: { url: "https://foo.com/bar.png" } }, { type: "text", text: "bar" }, ], }), new HumanMessage({ content: "hello Bob", }), ]); expect(template.inputVariables.sort()).toEqual([ "image_url", "name", "other_var", ]); }); test("Mustache image template with nested URL and chat prompts HumanMessagePromptTemplate.fromTemplate", async () => { const template = HumanMessagePromptTemplate.fromTemplate( [ { text: "{{name}}", }, { image_url: { url: "{{image_url}}", }, }, ], { templateFormat: "mustache", } ); const messages = await template.formatMessages({ name: "Bob", image_url: "https://foo.com/bar.png", }); expect(messages).toEqual([ new HumanMessage({ content: [ { type: "text", text: "Bob" }, { type: "image_url", image_url: { url: "https://foo.com/bar.png" } }, ], }), ]); expect(template.inputVariables.sort()).toEqual(["image_url", "name"]); });
0
lc_public_repos/langchainjs/langchain-core/src/prompts
lc_public_repos/langchainjs/langchain-core/src/prompts/tests/few_shot.test.ts
import { expect, describe, test } from "@jest/globals"; import { FewShotChatMessagePromptTemplate, FewShotPromptTemplate, } from "../few_shot.js"; import { ChatPromptTemplate } from "../index.js"; import { PromptTemplate } from "../prompt.js"; import { LengthBasedExampleSelector } from "../../example_selectors/length_based.js"; import { AIMessage, HumanMessage } from "../../messages/index.js"; describe("FewShotPromptTemplate", () => { test("Test using partial", async () => { const examplePrompt = PromptTemplate.fromTemplate("{foo}{bar}"); const prompt = new FewShotPromptTemplate({ prefix: "{foo}{bar}", examples: [], suffix: "", templateFormat: "f-string", exampleSeparator: "\n", examplePrompt, inputVariables: ["foo"], partialVariables: { bar: "baz" }, }); expect(await prompt.format({ foo: "foo" })).toBe("foobaz\n"); }); test("Test using full partial", async () => { const examplePrompt = PromptTemplate.fromTemplate("{foo}{bar}"); const prompt = new FewShotPromptTemplate({ prefix: "{foo}{bar}", examples: [], suffix: "", templateFormat: "f-string", exampleSeparator: "\n", examplePrompt, inputVariables: [], partialVariables: { bar: "baz", foo: "boo" }, }); expect(await prompt.format({})).toBe("boobaz\n"); }); test("Test partial with string", async () => { const examplePrompt = PromptTemplate.fromTemplate("{foo}{bar}"); const prompt = new FewShotPromptTemplate({ prefix: "{foo}{bar}", examples: [], suffix: "", templateFormat: "f-string", exampleSeparator: "\n", examplePrompt, inputVariables: ["foo", "bar"], }); const partialPrompt = await prompt.partial({ foo: "foo" }); expect(await partialPrompt.format({ bar: "baz" })).toBe("foobaz\n"); expect(prompt.inputVariables).toEqual(["foo", "bar"]); }); test("Test partial with function", async () => { const examplePrompt = PromptTemplate.fromTemplate("{foo}{bar}"); const prompt = new FewShotPromptTemplate({ prefix: "{foo}{bar}", examples: [], suffix: "", templateFormat: "f-string", exampleSeparator: "\n", examplePrompt, inputVariables: ["foo", "bar"], }); const partialPrompt = await prompt.partial({ foo: () => Promise.resolve("boo"), }); expect(await partialPrompt.format({ bar: "baz" })).toBe("boobaz\n"); }); test("Test partial with function and examples", async () => { const examplePrompt = PromptTemplate.fromTemplate("An example about {x}"); const prompt = new FewShotPromptTemplate({ prefix: "{foo}{bar}", examples: [{ x: "foo" }, { x: "bar" }], suffix: "", templateFormat: "f-string", exampleSeparator: "\n", examplePrompt, inputVariables: ["foo", "bar"], }); const partialPrompt = await prompt.partial({ foo: () => Promise.resolve("boo"), }); expect(await partialPrompt.format({ bar: "baz" })).toBe( `boobaz An example about foo An example about bar ` ); }); test("Test partial with function and example selector", async () => { const examplePrompt = PromptTemplate.fromTemplate("An example about {x}"); const exampleSelector = await LengthBasedExampleSelector.fromExamples( [{ x: "foo" }, { x: "bar" }], { examplePrompt, maxLength: 200 } ); const prompt = new FewShotPromptTemplate({ prefix: "{foo}{bar}", exampleSelector, suffix: "", templateFormat: "f-string", exampleSeparator: "\n", examplePrompt, inputVariables: ["foo", "bar"], }); const partialPrompt = await prompt.partial({ foo: () => Promise.resolve("boo"), }); expect(await partialPrompt.format({ bar: "baz" })).toBe( `boobaz An example about foo An example about bar ` ); }); }); describe("FewShotChatMessagePromptTemplate", () => { test("Format messages", async () => { const examplePrompt = ChatPromptTemplate.fromMessages([ ["ai", "{ai_input_var}"], ["human", "{human_input_var}"], ]); const examples = [ { ai_input_var: "ai-foo", human_input_var: "human-bar", }, { ai_input_var: "ai-foo2", human_input_var: "human-bar2", }, ]; const prompt = new FewShotChatMessagePromptTemplate({ examplePrompt, inputVariables: ["ai_input_var", "human_input_var"], examples, }); const messages = await prompt.formatMessages({}); expect(messages).toEqual([ new AIMessage("ai-foo"), new HumanMessage("human-bar"), new AIMessage("ai-foo2"), new HumanMessage("human-bar2"), ]); }); test("Test using partial", async () => { const examplePrompt = ChatPromptTemplate.fromMessages([ ["ai", "{foo}{bar}"], ]); const prompt = new FewShotChatMessagePromptTemplate({ prefix: "{foo}{bar}", examples: [], suffix: "", templateFormat: "f-string", exampleSeparator: "\n", examplePrompt, inputVariables: ["foo"], partialVariables: { bar: "baz" }, }); expect(await prompt.format({ foo: "foo" })).toBe("foobaz\n"); }); test("Test using full partial", async () => { const examplePrompt = ChatPromptTemplate.fromMessages([ ["ai", "{foo}{bar}"], ]); const prompt = new FewShotChatMessagePromptTemplate({ prefix: "{foo}{bar}", examples: [], suffix: "", templateFormat: "f-string", exampleSeparator: "\n", examplePrompt, inputVariables: [], partialVariables: { bar: "baz", foo: "boo" }, }); expect(await prompt.format({})).toBe("boobaz\n"); }); test("Test partial with string", async () => { const examplePrompt = ChatPromptTemplate.fromMessages([ ["ai", "{foo}{bar}"], ]); const prompt = new FewShotChatMessagePromptTemplate({ prefix: "{foo}{bar}", examples: [], suffix: "", templateFormat: "f-string", exampleSeparator: "\n", examplePrompt, inputVariables: ["foo", "bar"], }); const partialPrompt = await prompt.partial({ foo: "foo" }); expect(await partialPrompt.format({ bar: "baz" })).toBe("foobaz\n"); expect(prompt.inputVariables).toEqual(["foo", "bar"]); }); test("Test partial with function", async () => { const examplePrompt = ChatPromptTemplate.fromMessages([ ["ai", "{foo}{bar}"], ]); const prompt = new FewShotChatMessagePromptTemplate({ prefix: "{foo}{bar}", examples: [], suffix: "", templateFormat: "f-string", exampleSeparator: "\n", examplePrompt, inputVariables: ["foo", "bar"], }); const partialPrompt = await prompt.partial({ foo: () => Promise.resolve("boo"), }); expect(await partialPrompt.format({ bar: "baz" })).toBe("boobaz\n"); }); test("Test partial with function and examples", async () => { const examplePrompt = ChatPromptTemplate.fromMessages([ ["ai", "An example about {x}"], ]); const prompt = new FewShotChatMessagePromptTemplate({ prefix: "{foo}{bar}", examples: [{ x: "foo" }, { x: "bar" }], suffix: "", templateFormat: "f-string", exampleSeparator: "\n", examplePrompt, inputVariables: ["foo", "bar"], }); const partialPrompt = await prompt.partial({ foo: () => Promise.resolve("boo"), }); expect(await partialPrompt.format({ bar: "baz" })).toBe( `boobaz An example about foo An example about bar ` ); }); });
0
lc_public_repos/langchainjs/langchain-core/src/prompts
lc_public_repos/langchainjs/langchain-core/src/prompts/tests/template.test.ts
import { expect, test, describe } from "@jest/globals"; import { interpolateFString } from "../template.js"; describe.each([ ["{foo}", { foo: "bar" }, "bar"], ["pre{foo}post", { foo: "bar" }, "prebarpost"], ["{{pre{foo}post}}", { foo: "bar" }, "{prebarpost}"], ["text", {}, "text"], ["}}{{", {}, "}{"], ["{first}_{second}", { first: "foo", second: "bar" }, "foo_bar"], ])("Valid f-string", (template, variables, result) => { test(`Interpolation works: ${template}`, () => { expect(interpolateFString(template, variables)).toBe(result); }); }); describe.each([ ["{", {}], ["}", {}], ["{foo", {}], ["foo}", {}], ])("Invalid f-string", (template, variables) => { test(`Interpolation throws: ${template}`, () => { expect(() => interpolateFString(template, variables)).toThrow(); }); });
0
lc_public_repos/langchainjs/langchain-core/src/prompts/tests
lc_public_repos/langchainjs/langchain-core/src/prompts/tests/__snapshots__/chat.test.ts.snap
// Jest Snapshot v1, https://goo.gl/fbAQLP exports[`Multi-modal, multi part chat prompt works with instances of BaseMessage 1`] = ` [ { "id": [ "langchain_core", "messages", "SystemMessage", ], "kwargs": { "additional_kwargs": {}, "content": "You are an AI assistant named Bob", "response_metadata": {}, }, "lc": 1, "type": "constructor", }, { "id": [ "langchain_core", "messages", "HumanMessage", ], "kwargs": { "additional_kwargs": {}, "content": [ { "image_url": "data:image/jpeg;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAA", "type": "image_url", }, ], "response_metadata": {}, }, "lc": 1, "type": "constructor", }, { "id": [ "langchain_core", "messages", "HumanMessage", ], "kwargs": { "additional_kwargs": {}, "content": [ { "text": "What is in this object {objectName}", "type": "text", }, ], "response_metadata": {}, }, "lc": 1, "type": "constructor", }, { "id": [ "langchain_core", "messages", "HumanMessage", ], "kwargs": { "additional_kwargs": {}, "content": [ { "image_url": { "detail": "high", "url": "data:image/jpeg;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAA", }, "type": "image_url", }, ], "response_metadata": {}, }, "lc": 1, "type": "constructor", }, { "id": [ "langchain_core", "messages", "HumanMessage", ], "kwargs": { "additional_kwargs": {}, "content": [ { "text": "What is in this object chair", "type": "text", }, { "image_url": { "detail": "high", "url": "https://www.example.com/image.png", }, "type": "image_url", }, ], "response_metadata": {}, }, "lc": 1, "type": "constructor", }, ] `;
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/tools/index.ts
import { z } from "zod"; import { CallbackManager, CallbackManagerForToolRun, Callbacks, parseCallbackConfigArg, } from "../callbacks/manager.js"; import { BaseLangChain, type BaseLangChainParams, } from "../language_models/base.js"; import { ensureConfig, patchConfig, pickRunnableConfigKeys, type RunnableConfig, } from "../runnables/config.js"; import type { RunnableFunc, RunnableInterface } from "../runnables/base.js"; import { ToolCall, ToolMessage } from "../messages/tool.js"; import { MessageContent } from "../messages/base.js"; import { AsyncLocalStorageProviderSingleton } from "../singletons/index.js"; import { _isToolCall, ToolInputParsingException } from "./utils.js"; import { isZodSchema } from "../utils/types/is_zod_schema.js"; export { ToolInputParsingException }; export type ResponseFormat = "content" | "content_and_artifact" | string; // eslint-disable-next-line @typescript-eslint/no-explicit-any type ToolReturnType = any; // eslint-disable-next-line @typescript-eslint/no-explicit-any export type ContentAndArtifact = [MessageContent, any]; // eslint-disable-next-line @typescript-eslint/no-explicit-any type ZodObjectAny = z.ZodObject<any, any, any, any>; /** * Parameters for the Tool classes. */ export interface ToolParams extends BaseLangChainParams { /** * The tool response format. * * If "content" then the output of the tool is interpreted as the contents of a * ToolMessage. If "content_and_artifact" then the output is expected to be a * two-tuple corresponding to the (content, artifact) of a ToolMessage. * * @default "content" */ responseFormat?: ResponseFormat; /** * Whether to show full details in the thrown parsing errors. * * @default false */ verboseParsingErrors?: boolean; } /** * Schema for defining tools. * * @version 0.2.19 */ export interface StructuredToolParams extends Pick<StructuredToolInterface, "name" | "schema"> { /** * An optional description of the tool to pass to the model. */ description?: string; } export interface StructuredToolInterface<T extends ZodObjectAny = ZodObjectAny> extends RunnableInterface< (z.output<T> extends string ? string : never) | z.input<T> | ToolCall, ToolReturnType > { lc_namespace: string[]; /** * A Zod schema representing the parameters of the tool. */ schema: T | z.ZodEffects<T>; /** * @deprecated Use .invoke() instead. Will be removed in 0.3.0. * * Calls the tool with the provided argument, configuration, and tags. It * parses the input according to the schema, handles any errors, and * manages callbacks. * @param arg The input argument for the tool. * @param configArg Optional configuration or callbacks for the tool. * @param tags Optional tags for the tool. * @returns A Promise that resolves with a string. */ call( arg: (z.output<T> extends string ? string : never) | z.input<T> | ToolCall, configArg?: Callbacks | RunnableConfig, /** @deprecated */ tags?: string[] ): Promise<ToolReturnType>; /** * The name of the tool. */ name: string; /** * A description of the tool. */ description: string; returnDirect: boolean; } /** * Base class for Tools that accept input of any shape defined by a Zod schema. */ export abstract class StructuredTool< T extends ZodObjectAny = ZodObjectAny > extends BaseLangChain< (z.output<T> extends string ? string : never) | z.input<T> | ToolCall, ToolReturnType > { abstract name: string; abstract description: string; abstract schema: T | z.ZodEffects<T>; returnDirect = false; // TODO: Make default in 0.3 verboseParsingErrors = false; get lc_namespace() { return ["langchain", "tools"]; } /** * The tool response format. * * If "content" then the output of the tool is interpreted as the contents of a * ToolMessage. If "content_and_artifact" then the output is expected to be a * two-tuple corresponding to the (content, artifact) of a ToolMessage. * * @default "content" */ responseFormat?: ResponseFormat = "content"; constructor(fields?: ToolParams) { super(fields ?? {}); this.verboseParsingErrors = fields?.verboseParsingErrors ?? this.verboseParsingErrors; this.responseFormat = fields?.responseFormat ?? this.responseFormat; } protected abstract _call( arg: z.output<T>, runManager?: CallbackManagerForToolRun, parentConfig?: RunnableConfig ): Promise<ToolReturnType>; /** * Invokes the tool with the provided input and configuration. * @param input The input for the tool. * @param config Optional configuration for the tool. * @returns A Promise that resolves with a string. */ async invoke( input: | (z.output<T> extends string ? string : never) | z.input<T> | ToolCall, config?: RunnableConfig ): Promise<ToolReturnType> { let tool_call_id: string | undefined; let toolInput: | (z.output<T> extends string ? string : never) | z.input<T> | ToolCall | undefined; if (_isToolCall(input)) { tool_call_id = input.id; toolInput = input.args; } else { toolInput = input; } const ensuredConfig = ensureConfig(config); return this.call(toolInput, { ...ensuredConfig, configurable: { ...ensuredConfig.configurable, tool_call_id, }, }); } /** * @deprecated Use .invoke() instead. Will be removed in 0.3.0. * * Calls the tool with the provided argument, configuration, and tags. It * parses the input according to the schema, handles any errors, and * manages callbacks. * @param arg The input argument for the tool. * @param configArg Optional configuration or callbacks for the tool. * @param tags Optional tags for the tool. * @returns A Promise that resolves with a string. */ async call( arg: (z.output<T> extends string ? string : never) | z.input<T> | ToolCall, configArg?: Callbacks | RunnableConfig, /** @deprecated */ tags?: string[] ): Promise<ToolReturnType> { let parsed; try { parsed = await this.schema.parseAsync(arg); // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e: any) { let message = `Received tool input did not match expected schema`; if (this.verboseParsingErrors) { message = `${message}\nDetails: ${e.message}`; } throw new ToolInputParsingException(message, JSON.stringify(arg)); } const config = parseCallbackConfigArg(configArg); const callbackManager_ = await CallbackManager.configure( config.callbacks, this.callbacks, config.tags || tags, this.tags, config.metadata, this.metadata, { verbose: this.verbose } ); const runManager = await callbackManager_?.handleToolStart( this.toJSON(), typeof parsed === "string" ? parsed : JSON.stringify(parsed), config.runId, undefined, undefined, undefined, config.runName ); delete config.runId; let result; try { result = await this._call(parsed, runManager, config); // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e: any) { await runManager?.handleToolError(e); throw e; } let content; let artifact; if (this.responseFormat === "content_and_artifact") { if (Array.isArray(result) && result.length === 2) { [content, artifact] = result; } else { throw new Error( `Tool response format is "content_and_artifact" but the output was not a two-tuple.\nResult: ${JSON.stringify( result )}` ); } } else { content = result; } let toolCallId: string | undefined; if (config && "configurable" in config) { toolCallId = (config.configurable as Record<string, string | undefined>) .tool_call_id; } const formattedOutput = _formatToolOutput({ content, artifact, toolCallId, name: this.name, }); await runManager?.handleToolEnd(formattedOutput); return formattedOutput; } } export interface ToolInterface<T extends ZodObjectAny = ZodObjectAny> extends StructuredToolInterface<T> { /** * @deprecated Use .invoke() instead. Will be removed in 0.3.0. * * Calls the tool with the provided argument and callbacks. It handles * string inputs specifically. * @param arg The input argument for the tool, which can be a string, undefined, or an input of the tool's schema. * @param callbacks Optional callbacks for the tool. * @returns A Promise that resolves with a string. */ call( arg: string | undefined | z.input<this["schema"]> | ToolCall, callbacks?: Callbacks | RunnableConfig ): Promise<ToolReturnType>; } /** * Base class for Tools that accept input as a string. */ export abstract class Tool extends StructuredTool<ZodObjectAny> { schema = z .object({ input: z.string().optional() }) .transform((obj) => obj.input); constructor(fields?: ToolParams) { super(fields); } /** * @deprecated Use .invoke() instead. Will be removed in 0.3.0. * * Calls the tool with the provided argument and callbacks. It handles * string inputs specifically. * @param arg The input argument for the tool, which can be a string, undefined, or an input of the tool's schema. * @param callbacks Optional callbacks for the tool. * @returns A Promise that resolves with a string. */ call( arg: string | undefined | z.input<this["schema"]> | ToolCall, callbacks?: Callbacks | RunnableConfig ): Promise<ToolReturnType> { return super.call( typeof arg === "string" || !arg ? { input: arg } : arg, callbacks ); } } export interface BaseDynamicToolInput extends ToolParams { name: string; description: string; returnDirect?: boolean; } /** * Interface for the input parameters of the DynamicTool class. */ export interface DynamicToolInput extends BaseDynamicToolInput { func: ( input: string, runManager?: CallbackManagerForToolRun, config?: RunnableConfig ) => Promise<ToolReturnType>; } /** * Interface for the input parameters of the DynamicStructuredTool class. */ export interface DynamicStructuredToolInput< // eslint-disable-next-line @typescript-eslint/no-explicit-any T extends ZodObjectAny | Record<string, any> = ZodObjectAny > extends BaseDynamicToolInput { func: ( input: BaseDynamicToolInput["responseFormat"] extends "content_and_artifact" ? ToolCall : T extends ZodObjectAny ? z.infer<T> : T, runManager?: CallbackManagerForToolRun, config?: RunnableConfig ) => Promise<ToolReturnType>; schema: T extends ZodObjectAny ? T : T; } /** * A tool that can be created dynamically from a function, name, and description. */ export class DynamicTool extends Tool { static lc_name() { return "DynamicTool"; } name: string; description: string; func: DynamicToolInput["func"]; constructor(fields: DynamicToolInput) { super(fields); this.name = fields.name; this.description = fields.description; this.func = fields.func; this.returnDirect = fields.returnDirect ?? this.returnDirect; } /** * @deprecated Use .invoke() instead. Will be removed in 0.3.0. */ async call( arg: string | undefined | z.input<this["schema"]> | ToolCall, configArg?: RunnableConfig | Callbacks ): Promise<ToolReturnType> { const config = parseCallbackConfigArg(configArg); if (config.runName === undefined) { config.runName = this.name; } return super.call(arg, config); } /** @ignore */ async _call( input: string, runManager?: CallbackManagerForToolRun, parentConfig?: RunnableConfig ): Promise<ToolReturnType> { return this.func(input, runManager, parentConfig); } } /** * A tool that can be created dynamically from a function, name, and * description, designed to work with structured data. It extends the * StructuredTool class and overrides the _call method to execute the * provided function when the tool is called. * * Schema can be passed as Zod or JSON schema. The tool will not validate * input if JSON schema is passed. */ export class DynamicStructuredTool< // eslint-disable-next-line @typescript-eslint/no-explicit-any T extends ZodObjectAny | Record<string, any> = ZodObjectAny > extends StructuredTool<T extends ZodObjectAny ? T : ZodObjectAny> { static lc_name() { return "DynamicStructuredTool"; } name: string; description: string; func: DynamicStructuredToolInput<T>["func"]; schema: T extends ZodObjectAny ? T : ZodObjectAny; constructor(fields: DynamicStructuredToolInput<T>) { super(fields); this.name = fields.name; this.description = fields.description; this.func = fields.func; this.returnDirect = fields.returnDirect ?? this.returnDirect; this.schema = ( isZodSchema(fields.schema) ? fields.schema : z.object({}).passthrough() ) as T extends ZodObjectAny ? T : ZodObjectAny; } /** * @deprecated Use .invoke() instead. Will be removed in 0.3.0. */ async call( arg: (T extends ZodObjectAny ? z.output<T> : T) | ToolCall, configArg?: RunnableConfig | Callbacks, /** @deprecated */ tags?: string[] ): Promise<ToolReturnType> { const config = parseCallbackConfigArg(configArg); if (config.runName === undefined) { config.runName = this.name; } return super.call(arg, config, tags); } protected _call( arg: (T extends ZodObjectAny ? z.output<T> : T) | ToolCall, runManager?: CallbackManagerForToolRun, parentConfig?: RunnableConfig ): Promise<ToolReturnType> { // eslint-disable-next-line @typescript-eslint/no-explicit-any return this.func(arg as any, runManager, parentConfig); } } /** * Abstract base class for toolkits in LangChain. Toolkits are collections * of tools that agents can use. Subclasses must implement the `tools` * property to provide the specific tools for the toolkit. */ export abstract class BaseToolkit { abstract tools: StructuredToolInterface[]; getTools(): StructuredToolInterface[] { return this.tools; } } /** * Parameters for the tool function. * Schema can be provided as Zod or JSON schema. * If you pass JSON schema, tool inputs will not be validated. * @template {ZodObjectAny | z.ZodString | Record<string, any> = ZodObjectAny} RunInput The input schema for the tool. Either any Zod object, a Zod string, or JSON schema. */ interface ToolWrapperParams< RunInput extends | ZodObjectAny | z.ZodString // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any> = ZodObjectAny > extends ToolParams { /** * The name of the tool. If using with an LLM, this * will be passed as the tool name. */ name: string; /** * The description of the tool. * @default `${fields.name} tool` */ description?: string; /** * The input schema for the tool. If using an LLM, this * will be passed as the tool schema to generate arguments * for. */ schema?: RunInput; /** * The tool response format. * * If "content" then the output of the tool is interpreted as the contents of a * ToolMessage. If "content_and_artifact" then the output is expected to be a * two-tuple corresponding to the (content, artifact) of a ToolMessage. * * @default "content" */ responseFormat?: ResponseFormat; } /** * Creates a new StructuredTool instance with the provided function, name, description, and schema. * * Schema can be provided as Zod or JSON schema. * If you pass JSON schema, tool inputs will not be validated. * * @function * @template {ZodObjectAny | z.ZodString | Record<string, any> = ZodObjectAny} T The input schema for the tool. Either any Zod object, a Zod string, or JSON schema instance. * * @param {RunnableFunc<z.output<T>, ToolReturnType>} func - The function to invoke when the tool is called. * @param {ToolWrapperParams<T>} fields - An object containing the following properties: * @param {string} fields.name The name of the tool. * @param {string | undefined} fields.description The description of the tool. Defaults to either the description on the Zod schema, or `${fields.name} tool`. * @param {ZodObjectAny | z.ZodString | undefined} fields.schema The Zod schema defining the input for the tool. If undefined, it will default to a Zod string schema. * * @returns {DynamicStructuredTool<T>} A new StructuredTool instance. */ export function tool<T extends z.ZodString>( func: RunnableFunc<z.output<T>, ToolReturnType>, fields: ToolWrapperParams<T> ): DynamicTool; export function tool<T extends ZodObjectAny>( func: RunnableFunc<z.output<T>, ToolReturnType>, fields: ToolWrapperParams<T> ): DynamicStructuredTool<T>; // eslint-disable-next-line @typescript-eslint/no-explicit-any export function tool<T extends Record<string, any>>( func: RunnableFunc<T, ToolReturnType>, fields: ToolWrapperParams<T> ): DynamicStructuredTool<T>; export function tool< // eslint-disable-next-line @typescript-eslint/no-explicit-any T extends ZodObjectAny | z.ZodString | Record<string, any> = ZodObjectAny >( func: RunnableFunc<T extends ZodObjectAny ? z.output<T> : T, ToolReturnType>, fields: ToolWrapperParams<T> ): | DynamicStructuredTool<T extends ZodObjectAny ? T : ZodObjectAny> | DynamicTool { // If the schema is not provided, or it's a string schema, create a DynamicTool if ( !fields.schema || (isZodSchema(fields.schema) && (!("shape" in fields.schema) || !fields.schema.shape)) ) { return new DynamicTool({ ...fields, description: fields.description ?? fields.schema?.description ?? `${fields.name} tool`, func: async (input, runManager, config) => { return new Promise((resolve, reject) => { const childConfig = patchConfig(config, { callbacks: runManager?.getChild(), }); void AsyncLocalStorageProviderSingleton.runWithConfig( pickRunnableConfigKeys(childConfig), async () => { try { // TS doesn't restrict the type here based on the guard above // eslint-disable-next-line @typescript-eslint/no-explicit-any resolve(func(input as any, childConfig)); } catch (e) { reject(e); } } ); }); }, }); } const description = fields.description ?? fields.schema.description ?? `${fields.name} tool`; return new DynamicStructuredTool<T extends ZodObjectAny ? T : ZodObjectAny>({ ...fields, description, // eslint-disable-next-line @typescript-eslint/no-explicit-any schema: fields.schema as any, // TODO: Consider moving into DynamicStructuredTool constructor func: async (input, runManager, config) => { return new Promise((resolve, reject) => { const childConfig = patchConfig(config, { callbacks: runManager?.getChild(), }); void AsyncLocalStorageProviderSingleton.runWithConfig( pickRunnableConfigKeys(childConfig), async () => { try { // TS doesn't restrict the type here based on the guard above // eslint-disable-next-line @typescript-eslint/no-explicit-any resolve(func(input as any, childConfig)); } catch (e) { reject(e); } } ); }); }, }); } function _formatToolOutput(params: { content: unknown; name: string; artifact?: unknown; toolCallId?: string; }): ToolReturnType { const { content, artifact, toolCallId } = params; if (toolCallId) { if ( typeof content === "string" || (Array.isArray(content) && content.every((item) => typeof item === "object")) ) { return new ToolMessage({ content, artifact, tool_call_id: toolCallId, name: params.name, }); } else { return new ToolMessage({ content: _stringify(content), artifact, tool_call_id: toolCallId, name: params.name, }); } } else { return content; } } function _stringify(content: unknown): string { try { return JSON.stringify(content, null, 2); } catch (_noOp) { return `${content}`; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/tools/utils.ts
import { ToolCall } from "../messages/tool.js"; export function _isToolCall(toolCall?: unknown): toolCall is ToolCall { return !!( toolCall && typeof toolCall === "object" && "type" in toolCall && toolCall.type === "tool_call" ); } /** * Custom error class used to handle exceptions related to tool input parsing. * It extends the built-in `Error` class and adds an optional `output` * property that can hold the output that caused the exception. */ export class ToolInputParsingException extends Error { output?: string; constructor(message: string, output?: string) { super(message); this.output = output; } }
0
lc_public_repos/langchainjs/langchain-core/src/tools
lc_public_repos/langchainjs/langchain-core/src/tools/tests/tools.test.ts
import { test, expect } from "@jest/globals"; import { z } from "zod"; import { DynamicStructuredTool, tool } from "../index.js"; import { ToolMessage } from "../../messages/tool.js"; test("Tool should error if responseFormat is content_and_artifact but the function doesn't return a tuple", async () => { const weatherSchema = z.object({ location: z.string(), }); const weatherTool = tool( (_) => { return "str"; }, { name: "weather", schema: weatherSchema, responseFormat: "content_and_artifact", } ); await expect(async () => { await weatherTool.invoke({ location: "San Francisco" }); }).rejects.toThrow(); }); test("Tool works if responseFormat is content_and_artifact and returns a tuple", async () => { const weatherSchema = z.object({ location: z.string(), }); const weatherTool = tool( (input) => { return ["msg_content", input]; }, { name: "weather", schema: weatherSchema, responseFormat: "content_and_artifact", } ); const toolResult = await weatherTool.invoke({ location: "San Francisco" }); expect(toolResult).not.toBeInstanceOf(ToolMessage); expect(toolResult).toBe("msg_content"); }); test("Does not return tool message if responseFormat is content_and_artifact and returns a tuple and a tool call with no id is passed in", async () => { const weatherSchema = z.object({ location: z.string(), }); const weatherTool = tool( (input) => { return ["msg_content", input]; }, { name: "weather", schema: weatherSchema, responseFormat: "content_and_artifact", } ); const toolResult = await weatherTool.invoke({ args: { location: "San Francisco" }, name: "weather", type: "tool_call", }); expect(toolResult).toBe("msg_content"); }); test("Returns tool message if responseFormat is content_and_artifact and returns a tuple and a tool call with id is passed in", async () => { const weatherSchema = z.object({ location: z.string(), }); const weatherTool = tool( (input) => { return ["msg_content", input]; }, { name: "weather", schema: weatherSchema, responseFormat: "content_and_artifact", } ); const toolResult = await weatherTool.invoke({ id: "testid", args: { location: "San Francisco" }, name: "weather", type: "tool_call", }); expect(toolResult).toBeInstanceOf(ToolMessage); expect(toolResult.content).toBe("msg_content"); expect(toolResult.artifact).toEqual({ location: "San Francisco" }); expect(toolResult.name).toBe("weather"); }); test("Tool can accept single string input", async () => { const stringTool = tool<z.ZodString>( (input: string, config): string => { expect(config).toMatchObject({ configurable: { foo: "bar" } }); return `${input}a`; }, { name: "string_tool", description: "A tool that appends 'a' to the input string", schema: z.string(), } ); const result = await stringTool.invoke("b", { configurable: { foo: "bar" } }); expect(result).toBe("ba"); }); test("Tool declared with JSON schema", async () => { const weatherSchema = { type: "object", properties: { location: { type: "string", description: "A place", }, }, required: ["location"], }; const weatherTool = tool( (input) => { // even without validation expect input to be passed expect(input).toEqual({ somethingSilly: true, }); return "Sunny"; }, { name: "weather", schema: weatherSchema, } ); expect(weatherTool).toBeInstanceOf(DynamicStructuredTool); const weatherTool2 = new DynamicStructuredTool({ name: "weather", description: "get the weather", func: async (input) => { // even without validation expect input to be passed expect(input).toEqual({ somethingSilly: true, }); return "Sunny"; }, schema: weatherSchema, }); // No validation on JSON schema tools await weatherTool.invoke({ somethingSilly: true, }); await weatherTool2.invoke({ somethingSilly: true, }); }); test("Tool input typing is enforced", async () => { const weatherSchema = z.object({ location: z.string(), }); const weatherTool = tool( (_) => { return "Sunny"; }, { name: "weather", schema: weatherSchema, } ); const weatherTool2 = new DynamicStructuredTool({ name: "weather", description: "get the weather", func: async (_) => { return "Sunny"; }, schema: weatherSchema, }); const weatherTool3 = tool( async (_) => { return "Sunny"; }, { name: "weather", description: "get the weather", schema: z.string(), } ); await expect(async () => { await weatherTool.invoke({ // @ts-expect-error Invalid argument badval: "someval", }); }).rejects.toThrow(); const res = await weatherTool.invoke({ location: "somewhere", }); expect(res).toEqual("Sunny"); await expect(async () => { await weatherTool2.invoke({ // @ts-expect-error Invalid argument badval: "someval", }); }).rejects.toThrow(); const res2 = await weatherTool2.invoke({ location: "someval", }); expect(res2).toEqual("Sunny"); const res3 = await weatherTool3.invoke("blah"); expect(res3).toEqual("Sunny"); }); test("Tool can throw detailed errors", async () => { const weatherSchema = z.object({ location: z.string(), }); const stringTool = tool( (input) => { return JSON.stringify(input); }, { name: "string_tool", description: "A tool that appends 'a' to the input string", schema: weatherSchema, verboseParsingErrors: true, } ); await expect( stringTool.invoke({ // @ts-expect-error Testing parsing errors location: 8, }) ).rejects.toThrow(`Received tool input did not match expected schema Details: [ { "code": "invalid_type", "expected": "string", "received": "number", "path": [ "location" ], "message": "Expected string, received number" } ]`); });
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/output_parsers/list.ts
import { BaseMessage } from "../messages/index.js"; import { OutputParserException } from "./base.js"; import { BaseTransformOutputParser } from "./transform.js"; /** * Class to parse the output of an LLM call to a list. * @augments BaseOutputParser */ export abstract class ListOutputParser extends BaseTransformOutputParser< string[] > { re?: RegExp; async *_transform( inputGenerator: AsyncGenerator<string | BaseMessage> ): AsyncGenerator<string[]> { let buffer = ""; for await (const input of inputGenerator) { if (typeof input === "string") { // add current chunk to buffer buffer += input; } else { // extract message content and add to buffer buffer += input.content; } // get parts in buffer if (!this.re) { const parts = await this.parse(buffer); if (parts.length > 1) { // if there are multiple parts, yield all but the last one for (const part of parts.slice(0, -1)) { yield [part]; } // keep the last part in the buffer buffer = parts[parts.length - 1]; } } else { // if there is a regex, get all matches const matches = [...buffer.matchAll(this.re)]; if (matches.length > 1) { let doneIdx = 0; // if there are multiple matches, yield all but the last one for (const match of matches.slice(0, -1)) { yield [match[1]]; doneIdx += (match.index ?? 0) + match[0].length; } // keep the last match in the buffer buffer = buffer.slice(doneIdx); } } } // yield the last part for (const part of await this.parse(buffer)) { yield [part]; } } } /** * Class to parse the output of an LLM call as a comma-separated list. * @augments ListOutputParser */ export class CommaSeparatedListOutputParser extends ListOutputParser { static lc_name() { return "CommaSeparatedListOutputParser"; } lc_namespace = ["langchain_core", "output_parsers", "list"]; lc_serializable = true; /** * Parses the given text into an array of strings, using a comma as the * separator. If the parsing fails, throws an OutputParserException. * @param text The text to parse. * @returns An array of strings obtained by splitting the input text at each comma. */ async parse(text: string): Promise<string[]> { try { return text .trim() .split(",") .map((s) => s.trim()); } catch (e) { throw new OutputParserException(`Could not parse output: ${text}`, text); } } /** * Provides instructions on the expected format of the response for the * CommaSeparatedListOutputParser. * @returns A string containing instructions on the expected format of the response. */ getFormatInstructions(): string { return `Your response should be a list of comma separated values, eg: \`foo, bar, baz\``; } } /** * Class to parse the output of an LLM call to a list with a specific length and separator. * @augments ListOutputParser */ export class CustomListOutputParser extends ListOutputParser { lc_namespace = ["langchain_core", "output_parsers", "list"]; private length: number | undefined; private separator: string; constructor({ length, separator }: { length?: number; separator?: string }) { super(...arguments); this.length = length; this.separator = separator || ","; } /** * Parses the given text into an array of strings, using the specified * separator. If the parsing fails or the number of items in the list * doesn't match the expected length, throws an OutputParserException. * @param text The text to parse. * @returns An array of strings obtained by splitting the input text at each occurrence of the specified separator. */ async parse(text: string): Promise<string[]> { try { const items = text .trim() .split(this.separator) .map((s) => s.trim()); if (this.length !== undefined && items.length !== this.length) { throw new OutputParserException( `Incorrect number of items. Expected ${this.length}, got ${items.length}.` ); } return items; } catch (e) { if (Object.getPrototypeOf(e) === OutputParserException.prototype) { throw e; } throw new OutputParserException(`Could not parse output: ${text}`); } } /** * Provides instructions on the expected format of the response for the * CustomListOutputParser, including the number of items and the * separator. * @returns A string containing instructions on the expected format of the response. */ getFormatInstructions(): string { return `Your response should be a list of ${ this.length === undefined ? "" : `${this.length} ` }items separated by "${this.separator}" (eg: \`foo${this.separator} bar${ this.separator } baz\`)`; } } export class NumberedListOutputParser extends ListOutputParser { static lc_name() { return "NumberedListOutputParser"; } lc_namespace = ["langchain_core", "output_parsers", "list"]; lc_serializable = true; getFormatInstructions(): string { return `Your response should be a numbered list with each item on a new line. For example: \n\n1. foo\n\n2. bar\n\n3. baz`; } re = /\d+\.\s([^\n]+)/g; async parse(text: string): Promise<string[]> { return [...(text.matchAll(this.re) ?? [])].map((m) => m[1]); } } export class MarkdownListOutputParser extends ListOutputParser { static lc_name() { return "NumberedListOutputParser"; } lc_namespace = ["langchain_core", "output_parsers", "list"]; lc_serializable = true; getFormatInstructions(): string { return `Your response should be a numbered list with each item on a new line. For example: \n\n1. foo\n\n2. bar\n\n3. baz`; } re = /^\s*[-*]\s([^\n]+)$/gm; async parse(text: string): Promise<string[]> { return [...(text.matchAll(this.re) ?? [])].map((m) => m[1]); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/output_parsers/string.ts
import { BaseTransformOutputParser } from "./transform.js"; import { MessageContentComplex, MessageContentImageUrl, MessageContentText, } from "../messages/index.js"; /** * OutputParser that parses LLMResult into the top likely string. * @example * ```typescript * const promptTemplate = PromptTemplate.fromTemplate( * "Tell me a joke about {topic}", * ); * * const chain = RunnableSequence.from([ * promptTemplate, * new ChatOpenAI({}), * new StringOutputParser(), * ]); * * const result = await chain.invoke({ topic: "bears" }); * console.log("What do you call a bear with no teeth? A gummy bear!"); * ``` */ export class StringOutputParser extends BaseTransformOutputParser<string> { static lc_name() { return "StrOutputParser"; } lc_namespace = ["langchain_core", "output_parsers", "string"]; lc_serializable = true; /** * Parses a string output from an LLM call. This method is meant to be * implemented by subclasses to define how a string output from an LLM * should be parsed. * @param text The string output from an LLM call. * @param callbacks Optional callbacks. * @returns A promise of the parsed output. */ parse(text: string): Promise<string> { return Promise.resolve(text); } getFormatInstructions(): string { return ""; } protected _textContentToString(content: MessageContentText): string { return content.text; } protected _imageUrlContentToString(_content: MessageContentImageUrl): string { throw new Error( `Cannot coerce a multimodal "image_url" message part into a string.` ); } protected _messageContentComplexToString( content: MessageContentComplex ): string { switch (content.type) { case "text": case "text_delta": if ("text" in content) { // Type guard for MessageContentText return this._textContentToString(content as MessageContentText); } break; case "image_url": if ("image_url" in content) { // Type guard for MessageContentImageUrl return this._imageUrlContentToString( content as MessageContentImageUrl ); } break; default: throw new Error( `Cannot coerce "${content.type}" message part into a string.` ); } throw new Error(`Invalid content type: ${content.type}`); } protected _baseMessageContentToString( content: MessageContentComplex[] ): string { return content.reduce( (acc: string, item: MessageContentComplex) => acc + this._messageContentComplexToString(item), "" ); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/output_parsers/structured.ts
import { z } from "zod"; import { zodToJsonSchema, JsonSchema7Type, JsonSchema7ArrayType, JsonSchema7ObjectType, JsonSchema7StringType, JsonSchema7NumberType, JsonSchema7NullableType, } from "zod-to-json-schema"; import { BaseOutputParser, FormatInstructionsOptions, OutputParserException, } from "./base.js"; export type JsonMarkdownStructuredOutputParserInput = { interpolationDepth?: number; }; export interface JsonMarkdownFormatInstructionsOptions extends FormatInstructionsOptions { interpolationDepth?: number; } export class StructuredOutputParser< T extends z.ZodTypeAny > extends BaseOutputParser<z.infer<T>> { static lc_name() { return "StructuredOutputParser"; } lc_namespace = ["langchain", "output_parsers", "structured"]; toJSON() { return this.toJSONNotImplemented(); } constructor(public schema: T) { super(schema); } /** * Creates a new StructuredOutputParser from a Zod schema. * @param schema The Zod schema which the output should match * @returns A new instance of StructuredOutputParser. */ static fromZodSchema<T extends z.ZodTypeAny>(schema: T) { return new this(schema); } /** * Creates a new StructuredOutputParser from a set of names and * descriptions. * @param schemas An object where each key is a name and each value is a description * @returns A new instance of StructuredOutputParser. */ static fromNamesAndDescriptions<S extends { [key: string]: string }>( schemas: S ) { const zodSchema = z.object( Object.fromEntries( Object.entries(schemas).map( ([name, description]) => [name, z.string().describe(description)] as const ) ) ); return new this(zodSchema); } /** * Returns a markdown code snippet with a JSON object formatted according * to the schema. * @param options Optional. The options for formatting the instructions * @returns A markdown code snippet with a JSON object formatted according to the schema. */ getFormatInstructions(): string { return `You must format your output as a JSON value that adheres to a given "JSON Schema" instance. "JSON Schema" is a declarative language that allows you to annotate and validate JSON documents. For example, the example "JSON Schema" instance {{"properties": {{"foo": {{"description": "a list of test words", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}} would match an object with one required property, "foo". The "type" property specifies "foo" must be an "array", and the "description" property semantically describes it as "a list of test words". The items within "foo" must be strings. Thus, the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of this example "JSON Schema". The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. Your output will be parsed and type-checked according to the provided schema instance, so make sure all fields in your output match the schema exactly and there are no trailing commas! Here is the JSON Schema instance your output must adhere to. Include the enclosing markdown codeblock: \`\`\`json ${JSON.stringify(zodToJsonSchema(this.schema))} \`\`\` `; } /** * Parses the given text according to the schema. * @param text The text to parse * @returns The parsed output. */ async parse(text: string): Promise<z.infer<T>> { try { const json = text.includes("```") ? text.trim().split(/```(?:json)?/)[1] : text.trim(); const escapedJson = json .replace(/"([^"\\]*(\\.[^"\\]*)*)"/g, (_match, capturedGroup) => { const escapedInsideQuotes = capturedGroup.replace(/\n/g, "\\n"); return `"${escapedInsideQuotes}"`; }) .replace(/\n/g, ""); return await this.schema.parseAsync(JSON.parse(escapedJson)); } catch (e) { throw new OutputParserException( `Failed to parse. Text: "${text}". Error: ${e}`, text ); } } } /** * A specific type of `StructuredOutputParser` that parses JSON data * formatted as a markdown code snippet. */ export class JsonMarkdownStructuredOutputParser< T extends z.ZodTypeAny > extends StructuredOutputParser<T> { static lc_name() { return "JsonMarkdownStructuredOutputParser"; } getFormatInstructions( options?: JsonMarkdownFormatInstructionsOptions ): string { const interpolationDepth = options?.interpolationDepth ?? 1; if (interpolationDepth < 1) { throw new Error("f string interpolation depth must be at least 1"); } return `Return a markdown code snippet with a JSON object formatted to look like:\n\`\`\`json\n${this._schemaToInstruction( zodToJsonSchema(this.schema) ) .replaceAll("{", "{".repeat(interpolationDepth)) .replaceAll("}", "}".repeat(interpolationDepth))}\n\`\`\``; } private _schemaToInstruction( schemaInput: JsonSchema7Type, indent = 2 ): string { const schema = schemaInput as Extract< JsonSchema7Type, | JsonSchema7ObjectType | JsonSchema7ArrayType | JsonSchema7StringType | JsonSchema7NumberType | JsonSchema7NullableType >; if ("type" in schema) { let nullable = false; let type: string; if (Array.isArray(schema.type)) { const nullIdx = schema.type.findIndex((type) => type === "null"); if (nullIdx !== -1) { nullable = true; schema.type.splice(nullIdx, 1); } type = schema.type.join(" | ") as string; } else { type = schema.type; } if (schema.type === "object" && schema.properties) { const description = schema.description ? ` // ${schema.description}` : ""; const properties = Object.entries(schema.properties) .map(([key, value]) => { const isOptional = schema.required?.includes(key) ? "" : " (optional)"; return `${" ".repeat(indent)}"${key}": ${this._schemaToInstruction( value, indent + 2 )}${isOptional}`; }) .join("\n"); return `{\n${properties}\n${" ".repeat(indent - 2)}}${description}`; } if (schema.type === "array" && schema.items) { const description = schema.description ? ` // ${schema.description}` : ""; return `array[\n${" ".repeat(indent)}${this._schemaToInstruction( schema.items, indent + 2 )}\n${" ".repeat(indent - 2)}] ${description}`; } const isNullable = nullable ? " (nullable)" : ""; const description = schema.description ? ` // ${schema.description}` : ""; return `${type}${description}${isNullable}`; } if ("anyOf" in schema) { return schema.anyOf .map((s) => this._schemaToInstruction(s, indent)) .join(`\n${" ".repeat(indent - 2)}`); } throw new Error("unsupported schema type"); } static fromZodSchema<T extends z.ZodTypeAny>(schema: T) { return new this<T>(schema); } static fromNamesAndDescriptions<S extends { [key: string]: string }>( schemas: S ) { const zodSchema = z.object( Object.fromEntries( Object.entries(schemas).map( ([name, description]) => [name, z.string().describe(description)] as const ) ) ); return new this<typeof zodSchema>(zodSchema); } } export interface AsymmetricStructuredOutputParserFields< T extends z.ZodTypeAny > { inputSchema: T; } /** * A type of `StructuredOutputParser` that handles asymmetric input and * output schemas. */ export abstract class AsymmetricStructuredOutputParser< T extends z.ZodTypeAny, Y = unknown > extends BaseOutputParser<Y> { private structuredInputParser: JsonMarkdownStructuredOutputParser<T>; constructor({ inputSchema }: AsymmetricStructuredOutputParserFields<T>) { super(...arguments); this.structuredInputParser = new JsonMarkdownStructuredOutputParser( inputSchema ); } /** * Processes the parsed input into the desired output format. Must be * implemented by subclasses. * @param input The parsed input * @returns The processed output. */ abstract outputProcessor(input: z.infer<T>): Promise<Y>; async parse(text: string): Promise<Y> { let parsedInput; try { parsedInput = await this.structuredInputParser.parse(text); } catch (e) { throw new OutputParserException( `Failed to parse. Text: "${text}". Error: ${e}`, text ); } return this.outputProcessor(parsedInput); } getFormatInstructions(): string { return this.structuredInputParser.getFormatInstructions(); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/output_parsers/xml.ts
import { BaseCumulativeTransformOutputParser, BaseCumulativeTransformOutputParserInput, } from "./transform.js"; import { Operation, compare } from "../utils/json_patch.js"; import { sax } from "../utils/sax-js/sax.js"; import { ChatGeneration, Generation } from "../outputs.js"; export const XML_FORMAT_INSTRUCTIONS = `The output should be formatted as a XML file. 1. Output should conform to the tags below. 2. If tags are not given, make them on your own. 3. Remember to always open and close all the tags. As an example, for the tags ["foo", "bar", "baz"]: 1. String "<foo>\n <bar>\n <baz></baz>\n </bar>\n</foo>" is a well-formatted instance of the schema. 2. String "<foo>\n <bar>\n </foo>" is a badly-formatted instance. 3. String "<foo>\n <tag>\n </tag>\n</foo>" is a badly-formatted instance. Here are the output tags: \`\`\` {tags} \`\`\``; export interface XMLOutputParserFields extends BaseCumulativeTransformOutputParserInput { /** * Optional list of tags that the output should conform to. * Only used in formatting of the prompt. */ tags?: string[]; } export type Content = string | undefined | Array<{ [key: string]: Content }>; export type XMLResult = { [key: string]: Content; }; export class XMLOutputParser extends BaseCumulativeTransformOutputParser<XMLResult> { tags?: string[]; constructor(fields?: XMLOutputParserFields) { super(fields); this.tags = fields?.tags; } static lc_name() { return "XMLOutputParser"; } lc_namespace = ["langchain_core", "output_parsers"]; lc_serializable = true; protected _diff( prev: unknown | undefined, next: unknown ): Operation[] | undefined { if (!next) { return undefined; } if (!prev) { return [{ op: "replace", path: "", value: next }]; } return compare(prev, next); } async parsePartialResult( generations: ChatGeneration[] | Generation[] ): Promise<XMLResult | undefined> { return parseXMLMarkdown(generations[0].text); } async parse(text: string): Promise<XMLResult> { return parseXMLMarkdown(text); } getFormatInstructions(): string { const withTags = !!(this.tags && this.tags.length > 0); return withTags ? XML_FORMAT_INSTRUCTIONS.replace("{tags}", this.tags?.join(", ") ?? "") : XML_FORMAT_INSTRUCTIONS; } } const strip = (text: string) => text .split("\n") .map((line) => line.replace(/^\s+/, "")) .join("\n") .trim(); type ParsedResult = { name: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any attributes: Record<string, any>; children: Array<ParsedResult>; text?: string; isSelfClosing: boolean; }; const parseParsedResult = (input: ParsedResult): XMLResult => { if (Object.keys(input).length === 0) { return {}; } const result: XMLResult = {}; if (input.children.length > 0) { result[input.name] = input.children.map(parseParsedResult); return result; } else { result[input.name] = input.text ?? undefined; return result; } }; export function parseXMLMarkdown(s: string): XMLResult { const cleanedString = strip(s); const parser = sax.parser(true); let parsedResult: ParsedResult = {} as ParsedResult; const elementStack: ParsedResult[] = []; // eslint-disable-next-line @typescript-eslint/no-explicit-any parser.onopentag = (node: any) => { const element = { name: node.name, attributes: node.attributes, children: [], text: "", isSelfClosing: node.isSelfClosing, }; if (elementStack.length > 0) { const parentElement = elementStack[elementStack.length - 1]; parentElement.children.push(element); } else { parsedResult = element as ParsedResult; } if (!node.isSelfClosing) { elementStack.push(element); } }; parser.onclosetag = () => { if (elementStack.length > 0) { const lastElement = elementStack.pop(); if (elementStack.length === 0 && lastElement) { parsedResult = lastElement as ParsedResult; } } }; // eslint-disable-next-line @typescript-eslint/no-explicit-any parser.ontext = (text: any) => { if (elementStack.length > 0) { const currentElement = elementStack[elementStack.length - 1]; currentElement.text += text; } }; // eslint-disable-next-line @typescript-eslint/no-explicit-any parser.onattribute = (attr: any) => { if (elementStack.length > 0) { const currentElement = elementStack[elementStack.length - 1]; currentElement.attributes[attr.name] = attr.value; } }; // Try to find XML string within triple backticks. const match = /```(xml)?(.*)```/s.exec(cleanedString); const xmlString = match ? match[2] : cleanedString; parser.write(xmlString).close(); // Remove the XML declaration if present if (parsedResult && parsedResult.name === "?xml") { parsedResult = parsedResult.children[0] as ParsedResult; } return parseParsedResult(parsedResult); }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/output_parsers/index.ts
export * from "./base.js"; export * from "./bytes.js"; export * from "./list.js"; export * from "./string.js"; export * from "./structured.js"; export * from "./transform.js"; export * from "./json.js"; export * from "./xml.js";
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/output_parsers/json.ts
import { BaseCumulativeTransformOutputParser } from "./transform.js"; import { Operation, compare } from "../utils/json_patch.js"; import { ChatGeneration, Generation } from "../outputs.js"; import { parseJsonMarkdown, parsePartialJson } from "../utils/json.js"; /** * Class for parsing the output of an LLM into a JSON object. */ export class JsonOutputParser< // eslint-disable-next-line @typescript-eslint/no-explicit-any T extends Record<string, any> = Record<string, any> > extends BaseCumulativeTransformOutputParser<T> { static lc_name() { return "JsonOutputParser"; } lc_namespace = ["langchain_core", "output_parsers"]; lc_serializable = true; protected _diff( prev: unknown | undefined, next: unknown ): Operation[] | undefined { if (!next) { return undefined; } if (!prev) { return [{ op: "replace", path: "", value: next }]; } return compare(prev, next); } // This should actually return Partial<T>, but there's no way // to specify emitted chunks as instances separate from the main output type. async parsePartialResult( generations: ChatGeneration[] | Generation[] ): Promise<T | undefined> { return parseJsonMarkdown(generations[0].text); } async parse(text: string): Promise<T> { return parseJsonMarkdown(text, JSON.parse); } getFormatInstructions(): string { return ""; } } export { parsePartialJson, parseJsonMarkdown };
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/output_parsers/base.ts
import { Runnable } from "../runnables/index.js"; import type { RunnableConfig } from "../runnables/config.js"; import type { BasePromptValueInterface } from "../prompt_values.js"; import type { BaseMessage, MessageContentComplex } from "../messages/index.js"; import type { Callbacks } from "../callbacks/manager.js"; import type { Generation, ChatGeneration } from "../outputs.js"; import { addLangChainErrorFields } from "../errors/index.js"; /** * Options for formatting instructions. */ export interface FormatInstructionsOptions {} /** * Abstract base class for parsing the output of a Large Language Model * (LLM) call. It provides methods for parsing the result of an LLM call * and invoking the parser with a given input. */ export abstract class BaseLLMOutputParser<T = unknown> extends Runnable< string | BaseMessage, T > { /** * Parses the result of an LLM call. This method is meant to be * implemented by subclasses to define how the output from the LLM should * be parsed. * @param generations The generations from an LLM call. * @param callbacks Optional callbacks. * @returns A promise of the parsed output. */ abstract parseResult( generations: Generation[] | ChatGeneration[], callbacks?: Callbacks ): Promise<T>; /** * Parses the result of an LLM call with a given prompt. By default, it * simply calls `parseResult`. * @param generations The generations from an LLM call. * @param _prompt The prompt used in the LLM call. * @param callbacks Optional callbacks. * @returns A promise of the parsed output. */ parseResultWithPrompt( generations: Generation[] | ChatGeneration[], _prompt: BasePromptValueInterface, callbacks?: Callbacks ): Promise<T> { return this.parseResult(generations, callbacks); } protected _baseMessageToString(message: BaseMessage): string { return typeof message.content === "string" ? message.content : this._baseMessageContentToString(message.content); } protected _baseMessageContentToString( content: MessageContentComplex[] ): string { return JSON.stringify(content); } /** * Calls the parser with a given input and optional configuration options. * If the input is a string, it creates a generation with the input as * text and calls `parseResult`. If the input is a `BaseMessage`, it * creates a generation with the input as a message and the content of the * input as text, and then calls `parseResult`. * @param input The input to the parser, which can be a string or a `BaseMessage`. * @param options Optional configuration options. * @returns A promise of the parsed output. */ async invoke( input: string | BaseMessage, options?: RunnableConfig ): Promise<T> { if (typeof input === "string") { return this._callWithConfig( async (input: string, options): Promise<T> => this.parseResult([{ text: input }], options?.callbacks), input, { ...options, runType: "parser" } ); } else { return this._callWithConfig( async (input: BaseMessage, options): Promise<T> => this.parseResult( [ { message: input, text: this._baseMessageToString(input), }, ], options?.callbacks ), input, { ...options, runType: "parser" } ); } } } /** * Class to parse the output of an LLM call. */ export abstract class BaseOutputParser< T = unknown > extends BaseLLMOutputParser<T> { parseResult( generations: Generation[] | ChatGeneration[], callbacks?: Callbacks ): Promise<T> { return this.parse(generations[0].text, callbacks); } /** * Parse the output of an LLM call. * * @param text - LLM output to parse. * @returns Parsed output. */ abstract parse(text: string, callbacks?: Callbacks): Promise<T>; async parseWithPrompt( text: string, _prompt: BasePromptValueInterface, callbacks?: Callbacks ): Promise<T> { return this.parse(text, callbacks); } /** * Return a string describing the format of the output. * @returns Format instructions. * @param options - Options for formatting instructions. * @example * ```json * { * "foo": "bar" * } * ``` */ abstract getFormatInstructions(options?: FormatInstructionsOptions): string; /** * Return the string type key uniquely identifying this class of parser */ _type(): string { throw new Error("_type not implemented"); } } /** * Exception that output parsers should raise to signify a parsing error. * * This exists to differentiate parsing errors from other code or execution errors * that also may arise inside the output parser. OutputParserExceptions will be * available to catch and handle in ways to fix the parsing error, while other * errors will be raised. * * @param message - The error that's being re-raised or an error message. * @param llmOutput - String model output which is error-ing. * @param observation - String explanation of error which can be passed to a * model to try and remediate the issue. * @param sendToLLM - Whether to send the observation and llm_output back to an Agent * after an OutputParserException has been raised. This gives the underlying * model driving the agent the context that the previous output was improperly * structured, in the hopes that it will update the output to the correct * format. */ export class OutputParserException extends Error { llmOutput?: string; observation?: string; sendToLLM: boolean; constructor( message: string, llmOutput?: string, observation?: string, sendToLLM = false ) { super(message); this.llmOutput = llmOutput; this.observation = observation; this.sendToLLM = sendToLLM; if (sendToLLM) { if (observation === undefined || llmOutput === undefined) { throw new Error( "Arguments 'observation' & 'llmOutput' are required if 'sendToLlm' is true" ); } } addLangChainErrorFields(this, "OUTPUT_PARSING_FAILURE"); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/output_parsers/bytes.ts
import { BaseTransformOutputParser } from "./transform.js"; /** * OutputParser that parses LLMResult into the top likely string and * encodes it into bytes. */ export class BytesOutputParser extends BaseTransformOutputParser<Uint8Array> { static lc_name() { return "BytesOutputParser"; } lc_namespace = ["langchain_core", "output_parsers", "bytes"]; lc_serializable = true; // TODO: Figure out why explicit typing is needed // eslint-disable-next-line @typescript-eslint/no-explicit-any protected textEncoder: any = new TextEncoder(); parse(text: string): Promise<Uint8Array> { return Promise.resolve(this.textEncoder.encode(text)); } getFormatInstructions(): string { return ""; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/output_parsers/transform.ts
import { BaseOutputParser } from "./base.js"; import { type BaseMessage, isBaseMessage, isBaseMessageChunk, } from "../messages/base.js"; import { convertToChunk } from "../messages/utils.js"; import type { BaseCallbackConfig } from "../callbacks/manager.js"; import { type Generation, type ChatGeneration, GenerationChunk, ChatGenerationChunk, } from "../outputs.js"; import { deepCompareStrict } from "../utils/@cfworker/json-schema/index.js"; /** * Class to parse the output of an LLM call that also allows streaming inputs. */ export abstract class BaseTransformOutputParser< T = unknown > extends BaseOutputParser<T> { async *_transform( inputGenerator: AsyncGenerator<string | BaseMessage> ): AsyncGenerator<T> { for await (const chunk of inputGenerator) { if (typeof chunk === "string") { yield this.parseResult([{ text: chunk }]); } else { yield this.parseResult([ { message: chunk, text: this._baseMessageToString(chunk), }, ]); } } } /** * Transforms an asynchronous generator of input into an asynchronous * generator of parsed output. * @param inputGenerator An asynchronous generator of input. * @param options A configuration object. * @returns An asynchronous generator of parsed output. */ async *transform( inputGenerator: AsyncGenerator<string | BaseMessage>, options: BaseCallbackConfig ): AsyncGenerator<T> { yield* this._transformStreamWithConfig( inputGenerator, this._transform.bind(this), { ...options, runType: "parser", } ); } } export type BaseCumulativeTransformOutputParserInput = { diff?: boolean }; /** * A base class for output parsers that can handle streaming input. It * extends the `BaseTransformOutputParser` class and provides a method for * converting parsed outputs into a diff format. */ export abstract class BaseCumulativeTransformOutputParser< T = unknown > extends BaseTransformOutputParser<T> { protected diff = false; constructor(fields?: BaseCumulativeTransformOutputParserInput) { super(fields); this.diff = fields?.diff ?? this.diff; } // eslint-disable-next-line @typescript-eslint/no-explicit-any protected abstract _diff(prev: any | undefined, next: any): any; abstract parsePartialResult( generations: Generation[] | ChatGeneration[] ): Promise<T | undefined>; async *_transform( inputGenerator: AsyncGenerator<string | BaseMessage> ): AsyncGenerator<T> { let prevParsed: T | undefined; let accGen: GenerationChunk | undefined; for await (const chunk of inputGenerator) { if (typeof chunk !== "string" && typeof chunk.content !== "string") { throw new Error("Cannot handle non-string output."); } let chunkGen: GenerationChunk; if (isBaseMessageChunk(chunk)) { if (typeof chunk.content !== "string") { throw new Error("Cannot handle non-string message output."); } chunkGen = new ChatGenerationChunk({ message: chunk, text: chunk.content, }); } else if (isBaseMessage(chunk)) { if (typeof chunk.content !== "string") { throw new Error("Cannot handle non-string message output."); } chunkGen = new ChatGenerationChunk({ message: convertToChunk(chunk), text: chunk.content, }); } else { chunkGen = new GenerationChunk({ text: chunk }); } if (accGen === undefined) { accGen = chunkGen; } else { accGen = accGen.concat(chunkGen); } const parsed = await this.parsePartialResult([accGen]); if ( parsed !== undefined && parsed !== null && !deepCompareStrict(parsed, prevParsed) ) { if (this.diff) { yield this._diff(prevParsed, parsed); } else { yield parsed; } prevParsed = parsed; } } } getFormatInstructions(): string { return ""; } }
0
lc_public_repos/langchainjs/langchain-core/src/output_parsers
lc_public_repos/langchainjs/langchain-core/src/output_parsers/tests/output_parser.test.ts
/* eslint-disable no-loop-func */ /* eslint-disable no-promise-executor-return */ import { test, expect } from "@jest/globals"; import { FakeStreamingLLM } from "../../utils/testing/index.js"; import { BytesOutputParser } from "../bytes.js"; import { CommaSeparatedListOutputParser, ListOutputParser, MarkdownListOutputParser, NumberedListOutputParser, } from "../list.js"; test("BytesOutputParser", async () => { const llm = new FakeStreamingLLM({}); const stream = await llm.pipe(new BytesOutputParser()).stream("Hi there!"); const chunks = []; const decoder = new TextDecoder(); for await (const chunk of stream) { chunks.push(decoder.decode(chunk)); } expect(chunks.length).toEqual("Hi there!".length); expect(chunks.join("")).toEqual("Hi there!"); }); async function acc(iter: AsyncGenerator<string[]>): Promise<string[][]> { const acc = []; for await (const chunk of iter) { acc.push(chunk); } return acc; } const listTestCases: [new () => ListOutputParser, string, string[]][] = [ [CommaSeparatedListOutputParser, "a,b,c", ["a", "b", "c"]], [CommaSeparatedListOutputParser, "a,b,c,", ["a", "b", "c", ""]], [CommaSeparatedListOutputParser, "a", ["a"]], [NumberedListOutputParser, "1. a\n2. b\n3. c", ["a", "b", "c"]], [ NumberedListOutputParser, "Items:\n\n1. apple\n\n2. banana\n\n3. cherry", ["apple", "banana", "cherry"], ], [ NumberedListOutputParser, `Your response should be a numbered list with each item on a new line. For example: \n\n1. foo\n\n2. bar\n\n3. baz`, ["foo", "bar", "baz"], ], [NumberedListOutputParser, "No items in the list.", []], [MarkdownListOutputParser, "- a\n - b\n- c", ["a", "b", "c"]], [ MarkdownListOutputParser, "Items:\n\n- apple\n\n- banana\n\n- cherry", ["apple", "banana", "cherry"], ], [ MarkdownListOutputParser, `Your response should be a numbered - not an item - list with each item on a new line. For example: \n\n- foo\n\n- bar\n\n- baz`, ["foo", "bar", "baz"], ], [MarkdownListOutputParser, "No items in the list.", []], [MarkdownListOutputParser, "* a\n * b\n* c", ["a", "b", "c"]], [ MarkdownListOutputParser, "Items:\n\n* apple\n\n* banana\n\n* cherry", ["apple", "banana", "cherry"], ], [ MarkdownListOutputParser, `Your response should be a numbered list with each item on a new line. For example: \n\n* foo\n\n* bar\n\n* baz`, ["foo", "bar", "baz"], ], [MarkdownListOutputParser, "No items in the list.", []], ]; for (const [Parser, input, output] of listTestCases) { test(`${Parser.name} parses ${input} to ${output}`, async () => { async function* generator() { for (const char of input) { yield char; } } const parser = new Parser(); const chunks = await acc(parser.transform(generator(), {})); expect(chunks).toEqual(output.map((x) => [x])); await expect(parser.parse(input)).resolves.toEqual(output); }); }
0
lc_public_repos/langchainjs/langchain-core/src/output_parsers
lc_public_repos/langchainjs/langchain-core/src/output_parsers/tests/json.test.ts
import { test, expect } from "@jest/globals"; import { ChatPromptTemplate } from "../../prompts/chat.js"; import { RunnableSequence } from "../../runnables/base.js"; import { RunnablePassthrough } from "../../runnables/passthrough.js"; import { FakeStreamingLLM } from "../../utils/testing/index.js"; import { JsonOutputParser } from "../json.js"; const STREAMED_TOKENS = ` { " setup ": " Why did the bears start a band called Bears Bears Bears ? " , " punchline ": " Because they wanted to play bear -y good music ! " , " audience ": [ " Haha " , " So funny " ] } `.split("\n"); const EXPECTED_STREAMED_JSON = [ {}, { setup: "" }, { setup: "Why" }, { setup: "Why did" }, { setup: "Why did the" }, { setup: "Why did the bears" }, { setup: "Why did the bears start" }, { setup: "Why did the bears start a" }, { setup: "Why did the bears start a band" }, { setup: "Why did the bears start a band called" }, { setup: "Why did the bears start a band called Bears" }, { setup: "Why did the bears start a band called Bears Bears" }, { setup: "Why did the bears start a band called Bears Bears Bears" }, { setup: "Why did the bears start a band called Bears Bears Bears ?" }, { setup: "Why did the bears start a band called Bears Bears Bears ?", punchline: "", }, { setup: "Why did the bears start a band called Bears Bears Bears ?", punchline: "Because", }, { setup: "Why did the bears start a band called Bears Bears Bears ?", punchline: "Because they", }, { setup: "Why did the bears start a band called Bears Bears Bears ?", punchline: "Because they wanted", }, { setup: "Why did the bears start a band called Bears Bears Bears ?", punchline: "Because they wanted to", }, { setup: "Why did the bears start a band called Bears Bears Bears ?", punchline: "Because they wanted to play", }, { setup: "Why did the bears start a band called Bears Bears Bears ?", punchline: "Because they wanted to play bear", }, { setup: "Why did the bears start a band called Bears Bears Bears ?", punchline: "Because they wanted to play bear -y", }, { setup: "Why did the bears start a band called Bears Bears Bears ?", punchline: "Because they wanted to play bear -y good", }, { setup: "Why did the bears start a band called Bears Bears Bears ?", punchline: "Because they wanted to play bear -y good music", }, { setup: "Why did the bears start a band called Bears Bears Bears ?", punchline: "Because they wanted to play bear -y good music !", }, { punchline: "Because they wanted to play bear -y good music !", setup: "Why did the bears start a band called Bears Bears Bears ?", audience: [], }, { punchline: "Because they wanted to play bear -y good music !", setup: "Why did the bears start a band called Bears Bears Bears ?", audience: [""], }, { punchline: "Because they wanted to play bear -y good music !", setup: "Why did the bears start a band called Bears Bears Bears ?", audience: ["Haha"], }, { punchline: "Because they wanted to play bear -y good music !", setup: "Why did the bears start a band called Bears Bears Bears ?", audience: ["Haha", ""], }, { punchline: "Because they wanted to play bear -y good music !", setup: "Why did the bears start a band called Bears Bears Bears ?", audience: ["Haha", "So"], }, { punchline: "Because they wanted to play bear -y good music !", setup: "Why did the bears start a band called Bears Bears Bears ?", audience: ["Haha", "So funny"], }, ]; const EXPECTED_STREAMED_JSON_DIFF = [ [{ op: "replace", path: "", value: {} }], [{ op: "add", path: "/setup", value: "" }], [{ op: "replace", path: "/setup", value: "Why" }], [{ op: "replace", path: "/setup", value: "Why did" }], [{ op: "replace", path: "/setup", value: "Why did the" }], [{ op: "replace", path: "/setup", value: "Why did the bears" }], [{ op: "replace", path: "/setup", value: "Why did the bears start" }], [{ op: "replace", path: "/setup", value: "Why did the bears start a" }], [{ op: "replace", path: "/setup", value: "Why did the bears start a band" }], [ { op: "replace", path: "/setup", value: "Why did the bears start a band called", }, ], [ { op: "replace", path: "/setup", value: "Why did the bears start a band called Bears", }, ], [ { op: "replace", path: "/setup", value: "Why did the bears start a band called Bears Bears", }, ], [ { op: "replace", path: "/setup", value: "Why did the bears start a band called Bears Bears Bears", }, ], [ { op: "replace", path: "/setup", value: "Why did the bears start a band called Bears Bears Bears ?", }, ], [{ op: "add", path: "/punchline", value: "" }], [{ op: "replace", path: "/punchline", value: "Because" }], [{ op: "replace", path: "/punchline", value: "Because they" }], [{ op: "replace", path: "/punchline", value: "Because they wanted" }], [{ op: "replace", path: "/punchline", value: "Because they wanted to" }], [{ op: "replace", path: "/punchline", value: "Because they wanted to play" }], [ { op: "replace", path: "/punchline", value: "Because they wanted to play bear", }, ], [ { op: "replace", path: "/punchline", value: "Because they wanted to play bear -y", }, ], [ { op: "replace", path: "/punchline", value: "Because they wanted to play bear -y good", }, ], [ { op: "replace", path: "/punchline", value: "Because they wanted to play bear -y good music", }, ], [ { op: "replace", path: "/punchline", value: "Because they wanted to play bear -y good music !", }, ], [{ op: "add", path: "/audience", value: [] }], [{ op: "add", path: "/audience/0", value: "" }], [{ op: "replace", path: "/audience/0", value: "Haha" }], [{ op: "add", path: "/audience/1", value: "" }], [{ op: "replace", path: "/audience/1", value: "So" }], [{ op: "replace", path: "/audience/1", value: "So funny" }], ]; async function acc(iter: AsyncGenerator<object>): Promise<object[]> { const acc = []; for await (const chunk of iter) { acc.push(chunk); } return acc; } test("JSONOutputParser parses streamed JSON", async () => { async function* generator() { for (const token of STREAMED_TOKENS) { yield token; } } const parser = new JsonOutputParser(); const result = await acc(parser.transform(generator(), {})); expect(result).toEqual(EXPECTED_STREAMED_JSON); await expect(parser.parse(STREAMED_TOKENS.join(""))).resolves.toEqual( EXPECTED_STREAMED_JSON[EXPECTED_STREAMED_JSON.length - 1] ); }); test("JSONOutputParser parses streamed JSON diff", async () => { async function* generator() { for (const token of STREAMED_TOKENS) { yield token; } } const parser = new JsonOutputParser({ diff: true }); const result = await acc(parser.transform(generator(), {})); expect(result).toEqual(EXPECTED_STREAMED_JSON_DIFF); }); test("JsonOutputParser supports a type param", async () => { type CypherEvaluationChainInput = { question: string; cypher: string; schema: string; errors: string[]; }; type CypherEvaluationChainOutput = { cypher: string; errors: string[]; }; const prompt = ChatPromptTemplate.fromTemplate( `{errors} {question} {cypher} {schema}` ); const llm = new FakeStreamingLLM({ responses: [`{"cypher":"testoutput","errors":["testerror"]}`], }); const chain = RunnableSequence.from< CypherEvaluationChainInput, CypherEvaluationChainOutput >([ RunnablePassthrough.assign<CypherEvaluationChainInput>({ // Convert array of strings into single string errors: ({ errors }) => Array.isArray(errors) ? errors.join("\n") : errors, }), prompt, llm, new JsonOutputParser<CypherEvaluationChainOutput>(), ]); const result = await chain.invoke({ question: "test", cypher: "test", schema: "test", errors: ["test"], }); expect(result).toEqual({ cypher: "testoutput", errors: ["testerror"], }); }); const GOOD_JSON = `\`\`\`json { "foo": "bar" } \`\`\``; const JSON_WITH_NEW_LINES = ` \`\`\`json { "foo": "bar" } \`\`\` `; const JSON_WITH_NEW_LINES_INSIDE = `\`\`\`json { "foo": "bar" } \`\`\``; const JSON_WITH_NEW_LINES_EVERYWHERE = ` \`\`\`json { "foo": "bar" } \`\`\` `; const TICKS_WITH_NEW_LINES_EVERYWHERE = ` \`\`\` { "foo": "bar" } \`\`\` `; const JSON_WITH_ESCAPED_DOUBLE_QUOTES_IN_NESTED_JSON = `\`\`\`json { "action": "Final Answer", "action_input": "{\\"foo\\": \\"bar\\", \\"bar\\": \\"foo\\"}" } \`\`\``; const NO_TICKS = `{ "foo": "bar" }`; const NO_TICKS_WHITE_SPACE = ` { "foo": "bar" } `; const TEXT_BEFORE = `Thought: I need to use the search tool Action: \`\`\` { "foo": "bar" } \`\`\``; const TEXT_AFTER = `\`\`\` { "foo": "bar" } \`\`\` This should do the trick`; const TEXT_BEFORE_AND_AFTER = `Action: Testing \`\`\` { "foo": "bar" } \`\`\` This should do the trick`; const TEST_CASES = [ GOOD_JSON, JSON_WITH_NEW_LINES, JSON_WITH_NEW_LINES_INSIDE, JSON_WITH_NEW_LINES_EVERYWHERE, TICKS_WITH_NEW_LINES_EVERYWHERE, NO_TICKS, NO_TICKS_WHITE_SPACE, TEXT_BEFORE, TEXT_AFTER, TEXT_BEFORE_AND_AFTER, ]; const EXPECTED_JSON = { foo: "bar", }; for (const test_case of TEST_CASES) { // eslint-disable-next-line no-loop-func test(`JSONOutputParser parses ${test_case}`, async () => { async function* generator() { for (const token of test_case) { yield token; } } const parser = new JsonOutputParser(); const result = await acc(parser.transform(generator(), {})); expect(result[result.length - 1]).toEqual(EXPECTED_JSON); await expect(parser.parse(test_case)).resolves.toEqual(EXPECTED_JSON); }); } const TEST_CASES_ESCAPED_QUOTES = [ JSON_WITH_ESCAPED_DOUBLE_QUOTES_IN_NESTED_JSON, ]; const EXPECTED_JSON_ESCAPED_QUOTES = { action: "Final Answer", action_input: '{"foo": "bar", "bar": "foo"}', }; for (const test_case of TEST_CASES_ESCAPED_QUOTES) { // eslint-disable-next-line no-loop-func test(`JSONOutputParser parses ${test_case}`, async () => { async function* generator() { for (const token of test_case) { yield token; } } const parser = new JsonOutputParser(); const result = await acc(parser.transform(generator(), {})); expect(result[result.length - 1]).toEqual(EXPECTED_JSON_ESCAPED_QUOTES); await expect(parser.parse(test_case)).resolves.toEqual( EXPECTED_JSON_ESCAPED_QUOTES ); }); }
0
lc_public_repos/langchainjs/langchain-core/src/output_parsers
lc_public_repos/langchainjs/langchain-core/src/output_parsers/tests/structured.test.ts
import { z } from "zod"; import { expect, test } from "@jest/globals"; import { OutputParserException } from "../base.js"; import { StructuredOutputParser } from "../structured.js"; test("StructuredOutputParser.fromNamesAndDescriptions", async () => { const parser = StructuredOutputParser.fromNamesAndDescriptions({ url: "A link to the resource", }); expect(await parser.parse('```\n{"url": "value"}```')).toEqual({ url: "value", }); expect(parser.getFormatInstructions()).toMatchInlineSnapshot(` "You must format your output as a JSON value that adheres to a given "JSON Schema" instance. "JSON Schema" is a declarative language that allows you to annotate and validate JSON documents. For example, the example "JSON Schema" instance {{"properties": {{"foo": {{"description": "a list of test words", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}} would match an object with one required property, "foo". The "type" property specifies "foo" must be an "array", and the "description" property semantically describes it as "a list of test words". The items within "foo" must be strings. Thus, the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of this example "JSON Schema". The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. Your output will be parsed and type-checked according to the provided schema instance, so make sure all fields in your output match the schema exactly and there are no trailing commas! Here is the JSON Schema instance your output must adhere to. Include the enclosing markdown codeblock: \`\`\`json {"type":"object","properties":{"url":{"type":"string","description":"A link to the resource"}},"required":["url"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"} \`\`\` " `); }); enum StateProvinceEnum { Alabama = "AL", Alaska = "AK", Arizona = "AZ", } test("StructuredOutputParser.fromZodSchema", async () => { const parser = StructuredOutputParser.fromZodSchema( z.object({ url: z.string().describe("A link to the resource") }) ); expect(await parser.parse('```\n{"url": "value"}```')).toEqual({ url: "value", }); expect(parser.getFormatInstructions()).toMatchInlineSnapshot(` "You must format your output as a JSON value that adheres to a given "JSON Schema" instance. "JSON Schema" is a declarative language that allows you to annotate and validate JSON documents. For example, the example "JSON Schema" instance {{"properties": {{"foo": {{"description": "a list of test words", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}} would match an object with one required property, "foo". The "type" property specifies "foo" must be an "array", and the "description" property semantically describes it as "a list of test words". The items within "foo" must be strings. Thus, the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of this example "JSON Schema". The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. Your output will be parsed and type-checked according to the provided schema instance, so make sure all fields in your output match the schema exactly and there are no trailing commas! Here is the JSON Schema instance your output must adhere to. Include the enclosing markdown codeblock: \`\`\`json {"type":"object","properties":{"url":{"type":"string","description":"A link to the resource"}},"required":["url"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"} \`\`\` " `); }); test("StructuredOutputParser.fromZodSchema", async () => { const parser = StructuredOutputParser.fromZodSchema( z.object({ answer: z.enum(["yes", "no"]).describe("yes or no") }) ); await expect(parser.parse('```\n{"answer": "YES"}```')).rejects.toThrow( OutputParserException ); }); test("StructuredOutputParser.fromZodSchema", async () => { const parser = StructuredOutputParser.fromZodSchema( z.object({ answer: z.string().describe("answer to the user's question"), sources: z .array(z.string()) .describe("sources used to answer the question, should be websites."), }) ); expect( await parser.parse( '```\n{"answer": "value", "sources": ["this-source"]}```' ) ).toEqual({ answer: "value", sources: ["this-source"], }); expect( await parser.parse( '```json\n{"answer": "value", "sources": ["this-source"]}```' ) ).toEqual({ answer: "value", sources: ["this-source"], }); expect( await parser.parse( 'some other stuff```json\n{"answer": "value", "sources": ["this-source"]}```some other stuff at the end' ) ).toEqual({ answer: "value", sources: ["this-source"], }); expect(parser.getFormatInstructions()).toMatchInlineSnapshot(` "You must format your output as a JSON value that adheres to a given "JSON Schema" instance. "JSON Schema" is a declarative language that allows you to annotate and validate JSON documents. For example, the example "JSON Schema" instance {{"properties": {{"foo": {{"description": "a list of test words", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}} would match an object with one required property, "foo". The "type" property specifies "foo" must be an "array", and the "description" property semantically describes it as "a list of test words". The items within "foo" must be strings. Thus, the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of this example "JSON Schema". The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. Your output will be parsed and type-checked according to the provided schema instance, so make sure all fields in your output match the schema exactly and there are no trailing commas! Here is the JSON Schema instance your output must adhere to. Include the enclosing markdown codeblock: \`\`\`json {"type":"object","properties":{"answer":{"type":"string","description":"answer to the user's question"},"sources":{"type":"array","items":{"type":"string"},"description":"sources used to answer the question, should be websites."}},"required":["answer","sources"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"} \`\`\` " `); }); test("StructuredOutputParser.fromZodSchema", async () => { const parser = StructuredOutputParser.fromZodSchema( z .object({ url: z.string().describe("A link to the resource"), title: z.string().describe("A title for the resource"), year: z.number().describe("The year the resource was created"), createdAt: z .string() .datetime() .describe("The date and time the resource was created"), createdAtDate: z.coerce .date() .describe("The date the resource was created") .optional(), authors: z.array( z.object({ name: z.string().describe("The name of the author"), email: z.string().describe("The email of the author"), type: z.enum(["author", "editor"]).optional(), address: z .string() .optional() .describe("The address of the author"), stateProvince: z .nativeEnum(StateProvinceEnum) .optional() .describe("The state or province of the author"), }) ), }) .describe("Only One object") ); expect( await parser.parse( '```\n{"url": "value", "title": "value", "year": 2011, "createdAt": "2023-03-29T16:07:09.600Z", "createdAtDate": "2023-03-29", "authors": [{"name": "value", "email": "value", "stateProvince": "AZ"}]}```' ) ).toEqual({ url: "value", title: "value", year: 2011, createdAt: "2023-03-29T16:07:09.600Z", createdAtDate: new Date("2023-03-29T00:00:00.000Z"), authors: [{ name: "value", email: "value", stateProvince: "AZ" }], }); expect(parser.getFormatInstructions()).toMatchInlineSnapshot(` "You must format your output as a JSON value that adheres to a given "JSON Schema" instance. "JSON Schema" is a declarative language that allows you to annotate and validate JSON documents. For example, the example "JSON Schema" instance {{"properties": {{"foo": {{"description": "a list of test words", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}} would match an object with one required property, "foo". The "type" property specifies "foo" must be an "array", and the "description" property semantically describes it as "a list of test words". The items within "foo" must be strings. Thus, the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of this example "JSON Schema". The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. Your output will be parsed and type-checked according to the provided schema instance, so make sure all fields in your output match the schema exactly and there are no trailing commas! Here is the JSON Schema instance your output must adhere to. Include the enclosing markdown codeblock: \`\`\`json {"type":"object","properties":{"url":{"type":"string","description":"A link to the resource"},"title":{"type":"string","description":"A title for the resource"},"year":{"type":"number","description":"The year the resource was created"},"createdAt":{"type":"string","format":"date-time","description":"The date and time the resource was created"},"createdAtDate":{"type":"string","format":"date-time","description":"The date the resource was created"},"authors":{"type":"array","items":{"type":"object","properties":{"name":{"type":"string","description":"The name of the author"},"email":{"type":"string","description":"The email of the author"},"type":{"type":"string","enum":["author","editor"]},"address":{"type":"string","description":"The address of the author"},"stateProvince":{"type":"string","enum":["AL","AK","AZ"],"description":"The state or province of the author"}},"required":["name","email"],"additionalProperties":false}}},"required":["url","title","year","createdAt","authors"],"additionalProperties":false,"description":"Only One object","$schema":"http://json-schema.org/draft-07/schema#"} \`\`\` " `); }); test("StructuredOutputParser.fromZodSchema parsing newlines", async () => { const parser = StructuredOutputParser.fromZodSchema( z .object({ url: z.string().describe("A link to the resource"), summary: z.string().describe("A summary"), }) .describe("Only One object") ); expect( await parser.parse( '```\n{"url": "value", "summary": "line1,\nline2,\nline3"}```' ) ).toEqual({ url: "value", summary: "line1,\nline2,\nline3", }); });
0
lc_public_repos/langchainjs/langchain-core/src/output_parsers
lc_public_repos/langchainjs/langchain-core/src/output_parsers/tests/string.test.ts
import { describe, test, expect } from "@jest/globals"; import { StringOutputParser } from "../string.js"; import { AIMessage, BaseMessage, MessageContentComplex, } from "../../messages/index.js"; describe("StringOutputParser", () => { test("string input", async () => { const msg: string = "hello"; const parser = new StringOutputParser(); const result = await parser.invoke(msg); expect(result).toEqual("hello"); }); test("BaseMessage string content", async () => { const msg: BaseMessage = new AIMessage({ content: "hello" }); const parser = new StringOutputParser(); const result = await parser.invoke(msg); expect(result).toEqual("hello"); }); test("BaseMessage complex text type", async () => { const parser = new StringOutputParser(); const content: MessageContentComplex[] = [ { type: "text", text: "hello", }, ]; const msg: BaseMessage = new AIMessage({ content, }); const result = await parser.invoke(msg); expect(result).toEqual("hello"); }); test("BaseMessage multiple complex text type", async () => { const parser = new StringOutputParser(); const content: MessageContentComplex[] = [ { type: "text", text: "hello", }, { type: "text", text: "there", }, ]; const msg: BaseMessage = new AIMessage({ content, }); const result = await parser.invoke(msg); expect(result).toEqual("hellothere"); }); test("BaseMessage complex text and image type fails", async () => { const parser = new StringOutputParser(); const content: MessageContentComplex[] = [ { type: "text", text: "hello", }, { type: "image_url", image_url: "https://example.com/example.png", }, ]; const msg: BaseMessage = new AIMessage({ content, }); await expect(async () => { await parser.invoke(msg); }).rejects.toThrowError(); }); });
0
lc_public_repos/langchainjs/langchain-core/src/output_parsers
lc_public_repos/langchainjs/langchain-core/src/output_parsers/tests/xml.test.ts
import { test, expect } from "@jest/globals"; import { FakeStreamingLLM } from "../../utils/testing/index.js"; import { XMLOutputParser } from "../xml.js"; const XML_EXAMPLE = `<?xml version="1.0" encoding="UTF-8"?> <userProfile> <userID>12345</userID> <email>john.doe@example.com</email> <roles> <role>Admin</role> <role>User</role> </roles> <preferences> <theme>Dark</theme> <notifications> <email>true</email> </notifications> </preferences> </userProfile>`; const BACKTICK_WRAPPED_XML = `\`\`\`xml\n${XML_EXAMPLE}\n\`\`\``; const expectedResult = { userProfile: [ { userID: "12345", }, { email: "john.doe@example.com", }, { roles: [ { role: "Admin", }, { role: "User", }, ], }, { preferences: [ { theme: "Dark", }, { notifications: [ { email: "true", }, ], }, ], }, ], }; test("Can parse XML", async () => { const parser = new XMLOutputParser(); const result = await parser.invoke(XML_EXAMPLE); expect(result).toStrictEqual(expectedResult); }); test("Can parse backtick wrapped XML", async () => { const parser = new XMLOutputParser(); const result = await parser.invoke(BACKTICK_WRAPPED_XML); expect(result).toStrictEqual(expectedResult); }); test("Can format instructions with passed tags.", async () => { const tags = ["tag1", "tag2", "tag3"]; const parser = new XMLOutputParser({ tags }); const formatInstructions = parser.getFormatInstructions(); expect(formatInstructions).toContain("tag1, tag2, tag3"); }); test("Can parse streams", async () => { const parser = new XMLOutputParser(); const streamingLlm = new FakeStreamingLLM({ responses: [XML_EXAMPLE], }).pipe(parser); const result = await streamingLlm.stream(XML_EXAMPLE); let finalResult = {}; for await (const chunk of result) { console.log(chunk); finalResult = chunk; } expect(finalResult).toStrictEqual(expectedResult); });
0
lc_public_repos/langchainjs/langchain-core/src/output_parsers
lc_public_repos/langchainjs/langchain-core/src/output_parsers/openai_functions/json_output_functions_parsers.ts
import { JsonSchema7ObjectType } from "zod-to-json-schema"; import { Optional } from "../../types/type-utils.js"; import { Generation, ChatGeneration } from "../../outputs.js"; import { BaseLLMOutputParser } from "../base.js"; import { parsePartialJson } from "../json.js"; import { BaseCumulativeTransformOutputParser, BaseCumulativeTransformOutputParserInput, } from "../transform.js"; import { compare, type Operation as JSONPatchOperation, } from "../../utils/json_patch.js"; /** * Represents optional parameters for a function in a JSON Schema. */ export type FunctionParameters = Optional< JsonSchema7ObjectType, "additionalProperties" >; /** * Class for parsing the output of an LLM. Can be configured to return * only the arguments of the function call in the output. */ export class OutputFunctionsParser extends BaseLLMOutputParser<string> { static lc_name() { return "OutputFunctionsParser"; } lc_namespace = ["langchain", "output_parsers", "openai_functions"]; lc_serializable = true; argsOnly = true; constructor(config?: { argsOnly?: boolean }) { super(); this.argsOnly = config?.argsOnly ?? this.argsOnly; } /** * Parses the output and returns a string representation of the function * call or its arguments. * @param generations The output of the LLM to parse. * @returns A string representation of the function call or its arguments. */ async parseResult( generations: Generation[] | ChatGeneration[] ): Promise<string> { if ("message" in generations[0]) { const gen = generations[0] as ChatGeneration; const functionCall = gen.message.additional_kwargs.function_call; if (!functionCall) { throw new Error( `No function_call in message ${JSON.stringify(generations)}` ); } if (!functionCall.arguments) { throw new Error( `No arguments in function_call ${JSON.stringify(generations)}` ); } if (this.argsOnly) { return functionCall.arguments; } return JSON.stringify(functionCall); } else { throw new Error( `No message in generations ${JSON.stringify(generations)}` ); } } } /** * Class for parsing the output of an LLM into a JSON object. Uses an * instance of `OutputFunctionsParser` to parse the output. */ export class JsonOutputFunctionsParser< // eslint-disable-next-line @typescript-eslint/no-explicit-any Output extends Record<string, any> = Record<string, any> > extends BaseCumulativeTransformOutputParser<Output> { static lc_name() { return "JsonOutputFunctionsParser"; } lc_namespace = ["langchain", "output_parsers", "openai_functions"]; lc_serializable = true; outputParser: OutputFunctionsParser; argsOnly = true; constructor( config?: { argsOnly?: boolean } & BaseCumulativeTransformOutputParserInput ) { super(config); this.argsOnly = config?.argsOnly ?? this.argsOnly; this.outputParser = new OutputFunctionsParser(config); } protected _diff( prev: unknown | undefined, next: unknown ): JSONPatchOperation[] | undefined { if (!next) { return undefined; } const ops = compare(prev ?? {}, next); return ops; } async parsePartialResult( generations: ChatGeneration[] ): Promise<Output | undefined> { const generation = generations[0]; if (!generation.message) { return undefined; } const { message } = generation; const functionCall = message.additional_kwargs.function_call; if (!functionCall) { return undefined; } if (this.argsOnly) { return parsePartialJson(functionCall.arguments); } return { ...functionCall, arguments: parsePartialJson(functionCall.arguments), // eslint-disable-next-line @typescript-eslint/no-explicit-any } as Record<string, any> as Output; } /** * Parses the output and returns a JSON object. If `argsOnly` is true, * only the arguments of the function call are returned. * @param generations The output of the LLM to parse. * @returns A JSON object representation of the function call or its arguments. */ async parseResult( generations: Generation[] | ChatGeneration[] ): Promise<Output> { const result = await this.outputParser.parseResult(generations); if (!result) { throw new Error( `No result from "OutputFunctionsParser" ${JSON.stringify(generations)}` ); } return this.parse(result); } async parse(text: string): Promise<Output> { const parsedResult = JSON.parse(text); if (this.argsOnly) { return parsedResult; } parsedResult.arguments = JSON.parse(parsedResult.arguments); return parsedResult; } getFormatInstructions(): string { return ""; } } /** * Class for parsing the output of an LLM into a JSON object and returning * a specific attribute. Uses an instance of `JsonOutputFunctionsParser` * to parse the output. */ export class JsonKeyOutputFunctionsParser< // eslint-disable-next-line @typescript-eslint/no-explicit-any T extends Record<string, any> = Record<string, any> > extends BaseLLMOutputParser<T> { static lc_name() { return "JsonKeyOutputFunctionsParser"; } lc_namespace = ["langchain", "output_parsers", "openai_functions"]; lc_serializable = true; outputParser = new JsonOutputFunctionsParser(); attrName: string; get lc_aliases() { return { attrName: "key_name", }; } constructor(fields: { attrName: string }) { super(fields); this.attrName = fields.attrName; } /** * Parses the output and returns a specific attribute of the parsed JSON * object. * @param generations The output of the LLM to parse. * @returns The value of a specific attribute of the parsed JSON object. */ async parseResult(generations: Generation[] | ChatGeneration[]): Promise<T> { const result = await this.outputParser.parseResult(generations); return result[this.attrName as keyof typeof result] as T; } }
0
lc_public_repos/langchainjs/langchain-core/src/output_parsers
lc_public_repos/langchainjs/langchain-core/src/output_parsers/openai_functions/index.ts
export * from "./json_output_functions_parsers.js";
0
lc_public_repos/langchainjs/langchain-core/src/output_parsers
lc_public_repos/langchainjs/langchain-core/src/output_parsers/openai_tools/index.ts
export * from "./json_output_tools_parsers.js";
0
lc_public_repos/langchainjs/langchain-core/src/output_parsers
lc_public_repos/langchainjs/langchain-core/src/output_parsers/openai_tools/json_output_tools_parsers.ts
import { z } from "zod"; import { ChatGeneration, ChatGenerationChunk } from "../../outputs.js"; import { OutputParserException } from "../base.js"; import { parsePartialJson } from "../json.js"; import { InvalidToolCall, ToolCall } from "../../messages/tool.js"; import { BaseCumulativeTransformOutputParser, BaseCumulativeTransformOutputParserInput, } from "../transform.js"; import { isAIMessage } from "../../messages/ai.js"; export type ParsedToolCall = { id?: string; type: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any args: Record<string, any>; }; export type JsonOutputToolsParserParams = { /** Whether to return the tool call id. */ returnId?: boolean; } & BaseCumulativeTransformOutputParserInput; export function parseToolCall( // eslint-disable-next-line @typescript-eslint/no-explicit-any rawToolCall: Record<string, any>, options: { returnId?: boolean; partial: true } ): ToolCall | undefined; export function parseToolCall( // eslint-disable-next-line @typescript-eslint/no-explicit-any rawToolCall: Record<string, any>, options?: { returnId?: boolean; partial?: false } ): ToolCall; export function parseToolCall( // eslint-disable-next-line @typescript-eslint/no-explicit-any rawToolCall: Record<string, any>, options?: { returnId?: boolean; partial?: boolean } ): ToolCall | undefined; export function parseToolCall( // eslint-disable-next-line @typescript-eslint/no-explicit-any rawToolCall: Record<string, any>, options?: { returnId?: boolean; partial?: boolean } ): ToolCall | undefined { if (rawToolCall.function === undefined) { return undefined; } let functionArgs; if (options?.partial) { try { functionArgs = parsePartialJson(rawToolCall.function.arguments ?? "{}"); } catch (e) { return undefined; } } else { try { functionArgs = JSON.parse(rawToolCall.function.arguments); // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e: any) { throw new OutputParserException( [ `Function "${rawToolCall.function.name}" arguments:`, ``, rawToolCall.function.arguments, ``, `are not valid JSON.`, `Error: ${e.message}`, ].join("\n") ); } } const parsedToolCall: ToolCall = { name: rawToolCall.function.name, args: functionArgs, type: "tool_call", }; if (options?.returnId) { parsedToolCall.id = rawToolCall.id; } return parsedToolCall; } export function convertLangChainToolCallToOpenAI(toolCall: ToolCall) { if (toolCall.id === undefined) { throw new Error(`All OpenAI tool calls must have an "id" field.`); } return { id: toolCall.id, type: "function", function: { name: toolCall.name, arguments: JSON.stringify(toolCall.args), }, }; } export function makeInvalidToolCall( // eslint-disable-next-line @typescript-eslint/no-explicit-any rawToolCall: Record<string, any>, errorMsg?: string ): InvalidToolCall { return { name: rawToolCall.function?.name, args: rawToolCall.function?.arguments, id: rawToolCall.id, error: errorMsg, type: "invalid_tool_call", }; } /** * Class for parsing the output of a tool-calling LLM into a JSON object. */ export class JsonOutputToolsParser< T > extends BaseCumulativeTransformOutputParser<T> { static lc_name() { return "JsonOutputToolsParser"; } returnId = false; lc_namespace = ["langchain", "output_parsers", "openai_tools"]; lc_serializable = true; constructor(fields?: JsonOutputToolsParserParams) { super(fields); this.returnId = fields?.returnId ?? this.returnId; } protected _diff() { throw new Error("Not supported."); } async parse(): Promise<T> { throw new Error("Not implemented."); } async parseResult(generations: ChatGeneration[]): Promise<T> { const result = await this.parsePartialResult(generations, false); return result; } /** * Parses the output and returns a JSON object. If `argsOnly` is true, * only the arguments of the function call are returned. * @param generations The output of the LLM to parse. * @returns A JSON object representation of the function call or its arguments. */ async parsePartialResult( generations: ChatGenerationChunk[] | ChatGeneration[], partial = true // eslint-disable-next-line @typescript-eslint/no-explicit-any ): Promise<any> { const message = generations[0].message; let toolCalls; if (isAIMessage(message) && message.tool_calls?.length) { toolCalls = message.tool_calls.map((toolCall) => { const { id, ...rest } = toolCall; if (!this.returnId) { return rest; } return { id, ...rest, }; }); } else if (message.additional_kwargs.tool_calls !== undefined) { const rawToolCalls = JSON.parse( JSON.stringify(message.additional_kwargs.tool_calls) ); toolCalls = rawToolCalls.map((rawToolCall: Record<string, unknown>) => { return parseToolCall(rawToolCall, { returnId: this.returnId, partial }); }); } if (!toolCalls) { return []; } const parsedToolCalls = []; for (const toolCall of toolCalls) { if (toolCall !== undefined) { const backwardsCompatibleToolCall: ParsedToolCall = { type: toolCall.name, args: toolCall.args, id: toolCall.id, }; parsedToolCalls.push(backwardsCompatibleToolCall); } } return parsedToolCalls; } } export type JsonOutputKeyToolsParserParams< // eslint-disable-next-line @typescript-eslint/no-explicit-any T extends Record<string, any> = Record<string, any> > = { keyName: string; returnSingle?: boolean; zodSchema?: z.ZodType<T>; } & JsonOutputToolsParserParams; /** * Class for parsing the output of a tool-calling LLM into a JSON object if you are * expecting only a single tool to be called. */ export class JsonOutputKeyToolsParser< // eslint-disable-next-line @typescript-eslint/no-explicit-any T extends Record<string, any> = Record<string, any> > extends JsonOutputToolsParser<T> { static lc_name() { return "JsonOutputKeyToolsParser"; } lc_namespace = ["langchain", "output_parsers", "openai_tools"]; lc_serializable = true; returnId = false; /** The type of tool calls to return. */ keyName: string; /** Whether to return only the first tool call. */ returnSingle = false; zodSchema?: z.ZodType<T>; constructor(params: JsonOutputKeyToolsParserParams<T>) { super(params); this.keyName = params.keyName; this.returnSingle = params.returnSingle ?? this.returnSingle; this.zodSchema = params.zodSchema; } protected async _validateResult(result: unknown): Promise<T> { if (this.zodSchema === undefined) { return result as T; } const zodParsedResult = await this.zodSchema.safeParseAsync(result); if (zodParsedResult.success) { return zodParsedResult.data; } else { throw new OutputParserException( `Failed to parse. Text: "${JSON.stringify( result, null, 2 )}". Error: ${JSON.stringify(zodParsedResult.error.errors)}`, JSON.stringify(result, null, 2) ); } } // eslint-disable-next-line @typescript-eslint/no-explicit-any async parsePartialResult(generations: ChatGeneration[]): Promise<any> { const results = await super.parsePartialResult(generations); const matchingResults = results.filter( (result: ParsedToolCall) => result.type === this.keyName ); // eslint-disable-next-line @typescript-eslint/no-explicit-any let returnedValues: ParsedToolCall[] | Record<string, any>[] = matchingResults; if (!matchingResults.length) { return undefined; } if (!this.returnId) { returnedValues = matchingResults.map( (result: ParsedToolCall) => result.args ); } if (this.returnSingle) { return returnedValues[0]; } return returnedValues; } // eslint-disable-next-line @typescript-eslint/no-explicit-any async parseResult(generations: ChatGeneration[]): Promise<any> { const results = await super.parsePartialResult(generations, false); const matchingResults = results.filter( (result: ParsedToolCall) => result.type === this.keyName ); // eslint-disable-next-line @typescript-eslint/no-explicit-any let returnedValues: ParsedToolCall[] | Record<string, any>[] = matchingResults; if (!matchingResults.length) { return undefined; } if (!this.returnId) { returnedValues = matchingResults.map( (result: ParsedToolCall) => result.args ); } if (this.returnSingle) { return this._validateResult(returnedValues[0]); } const toolCallResults = await Promise.all( returnedValues.map((value) => this._validateResult(value)) ); return toolCallResults; } }
0
lc_public_repos/langchainjs/langchain-core/src/output_parsers/openai_tools
lc_public_repos/langchainjs/langchain-core/src/output_parsers/openai_tools/tests/json_output_tools_parser.test.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ import { test, expect } from "@jest/globals"; import { z } from "zod"; import { JsonOutputKeyToolsParser } from "../json_output_tools_parsers.js"; import { OutputParserException } from "../../base.js"; import { AIMessage, AIMessageChunk } from "../../../messages/ai.js"; import { RunnableLambda } from "../../../runnables/base.js"; test("JSONOutputKeyToolsParser invoke", async () => { const outputParser = new JsonOutputKeyToolsParser({ keyName: "testing", returnSingle: true, }); const result = await outputParser.invoke( new AIMessage({ content: "", additional_kwargs: { tool_calls: [ { id: "test", type: "function", function: { name: "testing", arguments: JSON.stringify({ testKey: 9 }), }, }, ], }, }) ); expect(result).toEqual({ testKey: 9 }); }); test("JSONOutputKeyToolsParser with a passed schema throws", async () => { const outputParser = new JsonOutputKeyToolsParser({ keyName: "testing", returnSingle: true, zodSchema: z.object({ testKey: z.string(), }), }); try { await outputParser.invoke( new AIMessage({ content: "", additional_kwargs: { tool_calls: [ { id: "test", type: "function", function: { name: "testing", arguments: JSON.stringify({ testKey: 9 }), }, }, ], }, }) ); } catch (e) { expect(e).toBeInstanceOf(OutputParserException); } }); test("JSONOutputKeyToolsParser can validate a proper input", async () => { const outputParser = new JsonOutputKeyToolsParser({ keyName: "testing", returnSingle: true, zodSchema: z.object({ testKey: z.string(), }), }); const result = await outputParser.invoke( new AIMessage({ content: "", additional_kwargs: { tool_calls: [ { id: "test", type: "function", function: { name: "testing", arguments: JSON.stringify({ testKey: "testval" }), }, }, ], }, }) ); expect(result).toEqual({ testKey: "testval" }); }); test("JSONOutputKeyToolsParser invoke with a top-level tool call", async () => { const outputParser = new JsonOutputKeyToolsParser({ keyName: "testing", returnSingle: true, }); const result = await outputParser.invoke( new AIMessage({ content: "", tool_calls: [ { id: "test", name: "testing", args: { testKey: 9 }, }, ], }) ); expect(result).toEqual({ testKey: 9 }); }); test("JSONOutputKeyToolsParser with a top-level tool call and passed schema throws", async () => { const outputParser = new JsonOutputKeyToolsParser({ keyName: "testing", returnSingle: true, zodSchema: z.object({ testKey: z.string(), }), }); try { await outputParser.invoke( new AIMessage({ content: "", tool_calls: [ { id: "test", name: "testing", args: { testKey: 9 }, }, ], }) ); } catch (e) { expect(e).toBeInstanceOf(OutputParserException); } }); test("JSONOutputKeyToolsParser with a top-level tool call can validate a proper input", async () => { const outputParser = new JsonOutputKeyToolsParser({ keyName: "testing", returnSingle: true, zodSchema: z.object({ testKey: z.string(), }), }); const result = await outputParser.invoke( new AIMessage({ content: "", tool_calls: [ { id: "test", name: "testing", args: { testKey: "testval" }, }, ], }) ); expect(result).toEqual({ testKey: "testval" }); }); test("JSONOutputKeyToolsParser can handle streaming input", async () => { const outputParser = new JsonOutputKeyToolsParser({ keyName: "testing", returnSingle: true, zodSchema: z.object({ testKey: z.string(), }), }); const fakeModel = RunnableLambda.from(async function* () { yield new AIMessageChunk({ content: "", tool_call_chunks: [ { index: 0, id: "test", name: "testing", args: `{ "testKey":`, type: "tool_call_chunk", }, ], }); yield new AIMessageChunk({ content: "", tool_call_chunks: [], }); yield new AIMessageChunk({ content: "", tool_call_chunks: [ { index: 0, id: "test", args: ` "testv`, type: "tool_call_chunk", }, ], }); yield new AIMessageChunk({ content: "", tool_call_chunks: [ { index: 0, id: "test", args: `al" }`, type: "tool_call_chunk", }, ], }); }); const stream = await (fakeModel as any).pipe(outputParser).stream(); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); expect(chunks.at(-1)).toEqual({ testKey: "testval" }); // TODO: Fix typing issue const result = await (fakeModel as any).pipe(outputParser).invoke( new AIMessage({ content: "", tool_calls: [ { id: "test", name: "testing", args: { testKey: "testval" }, type: "tool_call", }, ], }) ); expect(result).toEqual({ testKey: "testval" }); });