index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/messages/modifier.ts
import { BaseMessage, BaseMessageFields, MessageType } from "./base.js"; export interface RemoveMessageFields extends Omit<BaseMessageFields, "content"> { /** * The ID of the message to remove. */ id: string; } /** * Message responsible for deleting other messages. */ export class RemoveMessage extends BaseMessage { /** * The ID of the message to remove. */ id: string; constructor(fields: RemoveMessageFields) { super({ ...fields, content: "", }); this.id = fields.id; } _getType(): MessageType { return "remove"; } override get _printableFields(): Record<string, unknown> { return { ...super._printableFields, id: this.id, }; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/messages/human.ts
import { BaseMessage, BaseMessageChunk, mergeContent, _mergeDicts, type MessageType, } from "./base.js"; /** * Represents a human message in a conversation. */ export class HumanMessage extends BaseMessage { static lc_name() { return "HumanMessage"; } _getType(): MessageType { return "human"; } } /** * Represents a chunk of a human message, which can be concatenated with * other human message chunks. */ export class HumanMessageChunk extends BaseMessageChunk { static lc_name() { return "HumanMessageChunk"; } _getType(): MessageType { return "human"; } concat(chunk: HumanMessageChunk) { return new HumanMessageChunk({ content: mergeContent(this.content, chunk.content), additional_kwargs: _mergeDicts( this.additional_kwargs, chunk.additional_kwargs ), response_metadata: _mergeDicts( this.response_metadata, chunk.response_metadata ), id: this.id ?? chunk.id, }); } } export function isHumanMessage(x: BaseMessage): x is HumanMessage { return x.getType() === "human"; } export function isHumanMessageChunk( x: BaseMessageChunk ): x is HumanMessageChunk { return x.getType() === "human"; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/messages/tool.ts
import { BaseMessage, BaseMessageChunk, type BaseMessageFields, mergeContent, _mergeDicts, type MessageType, _mergeObj, _mergeStatus, } from "./base.js"; export interface ToolMessageFieldsWithToolCallId extends BaseMessageFields { /** * Artifact of the Tool execution which is not meant to be sent to the model. * * Should only be specified if it is different from the message content, e.g. if only * a subset of the full tool output is being passed as message content but the full * output is needed in other parts of the code. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any artifact?: any; tool_call_id: string; /** * Status of the tool invocation. * @version 0.2.19 */ status?: "success" | "error"; } /** * Represents a tool message in a conversation. */ export class ToolMessage extends BaseMessage { static lc_name() { return "ToolMessage"; } get lc_aliases(): Record<string, string> { // exclude snake case conversion to pascal case return { tool_call_id: "tool_call_id" }; } /** * Status of the tool invocation. * @version 0.2.19 */ status?: "success" | "error"; tool_call_id: string; /** * Artifact of the Tool execution which is not meant to be sent to the model. * * Should only be specified if it is different from the message content, e.g. if only * a subset of the full tool output is being passed as message content but the full * output is needed in other parts of the code. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any artifact?: any; constructor(fields: ToolMessageFieldsWithToolCallId); constructor( fields: string | BaseMessageFields, tool_call_id: string, name?: string ); constructor( fields: string | ToolMessageFieldsWithToolCallId, tool_call_id?: string, name?: string ) { if (typeof fields === "string") { // eslint-disable-next-line no-param-reassign, @typescript-eslint/no-non-null-assertion fields = { content: fields, name, tool_call_id: tool_call_id! }; } super(fields); this.tool_call_id = fields.tool_call_id; this.artifact = fields.artifact; this.status = fields.status; } _getType(): MessageType { return "tool"; } static isInstance(message: BaseMessage): message is ToolMessage { return message._getType() === "tool"; } override get _printableFields(): Record<string, unknown> { return { ...super._printableFields, tool_call_id: this.tool_call_id, artifact: this.artifact, }; } } /** * Represents a chunk of a tool message, which can be concatenated * with other tool message chunks. */ export class ToolMessageChunk extends BaseMessageChunk { tool_call_id: string; /** * Status of the tool invocation. * @version 0.2.19 */ status?: "success" | "error"; /** * Artifact of the Tool execution which is not meant to be sent to the model. * * Should only be specified if it is different from the message content, e.g. if only * a subset of the full tool output is being passed as message content but the full * output is needed in other parts of the code. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any artifact?: any; constructor(fields: ToolMessageFieldsWithToolCallId) { super(fields); this.tool_call_id = fields.tool_call_id; this.artifact = fields.artifact; this.status = fields.status; } static lc_name() { return "ToolMessageChunk"; } _getType(): MessageType { return "tool"; } concat(chunk: ToolMessageChunk) { return new ToolMessageChunk({ content: mergeContent(this.content, chunk.content), additional_kwargs: _mergeDicts( this.additional_kwargs, chunk.additional_kwargs ), response_metadata: _mergeDicts( this.response_metadata, chunk.response_metadata ), artifact: _mergeObj(this.artifact, chunk.artifact), tool_call_id: this.tool_call_id, id: this.id ?? chunk.id, status: _mergeStatus(this.status, chunk.status), }); } override get _printableFields(): Record<string, unknown> { return { ...super._printableFields, tool_call_id: this.tool_call_id, artifact: this.artifact, }; } } /** * A call to a tool. * @property {string} name - The name of the tool to be called * @property {Record<string, any>} args - The arguments to the tool call * @property {string} [id] - If provided, an identifier associated with the tool call */ export type ToolCall = { name: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any args: Record<string, any>; id?: string; type?: "tool_call"; }; /** * A chunk of a tool call (e.g., as part of a stream). * When merging ToolCallChunks (e.g., via AIMessageChunk.__add__), * all string attributes are concatenated. Chunks are only merged if their * values of `index` are equal and not None. * * @example * ```ts * const leftChunks = [ * { * name: "foo", * args: '{"a":', * index: 0 * } * ]; * * const leftAIMessageChunk = new AIMessageChunk({ * content: "", * tool_call_chunks: leftChunks * }); * * const rightChunks = [ * { * name: undefined, * args: '1}', * index: 0 * } * ]; * * const rightAIMessageChunk = new AIMessageChunk({ * content: "", * tool_call_chunks: rightChunks * }); * * const result = leftAIMessageChunk.concat(rightAIMessageChunk); * // result.tool_call_chunks is equal to: * // [ * // { * // name: "foo", * // args: '{"a":1}' * // index: 0 * // } * // ] * ``` * * @property {string} [name] - If provided, a substring of the name of the tool to be called * @property {string} [args] - If provided, a JSON substring of the arguments to the tool call * @property {string} [id] - If provided, a substring of an identifier for the tool call * @property {number} [index] - If provided, the index of the tool call in a sequence */ export type ToolCallChunk = { name?: string; args?: string; id?: string; index?: number; type?: "tool_call_chunk"; }; export type InvalidToolCall = { name?: string; args?: string; id?: string; error?: string; type?: "invalid_tool_call"; }; export function defaultToolCallParser( // eslint-disable-next-line @typescript-eslint/no-explicit-any rawToolCalls: Record<string, any>[] ): [ToolCall[], InvalidToolCall[]] { const toolCalls: ToolCall[] = []; const invalidToolCalls: InvalidToolCall[] = []; for (const toolCall of rawToolCalls) { if (!toolCall.function) { continue; } else { const functionName = toolCall.function.name; try { const functionArgs = JSON.parse(toolCall.function.arguments); const parsed = { name: functionName || "", args: functionArgs || {}, id: toolCall.id, }; toolCalls.push(parsed); } catch (error) { invalidToolCalls.push({ name: functionName, args: toolCall.function.arguments, id: toolCall.id, error: "Malformed args.", }); } } } return [toolCalls, invalidToolCalls]; } export function isToolMessage(x: BaseMessage): x is ToolMessage { return x._getType() === "tool"; } export function isToolMessageChunk(x: BaseMessageChunk): x is ToolMessageChunk { return x._getType() === "tool"; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/messages/transformers.ts
import { BaseDocumentTransformer } from "../documents/transformers.js"; import { BaseLanguageModel } from "../language_models/base.js"; import { Runnable, RunnableLambda } from "../runnables/base.js"; import { AIMessage, AIMessageChunk, AIMessageChunkFields } from "./ai.js"; import { BaseMessage, MessageType, BaseMessageChunk, BaseMessageFields, } from "./base.js"; import { ChatMessage, ChatMessageChunk, ChatMessageFieldsWithRole, } from "./chat.js"; import { FunctionMessage, FunctionMessageChunk, FunctionMessageFieldsWithName, } from "./function.js"; import { HumanMessage, HumanMessageChunk } from "./human.js"; import { RemoveMessage } from "./modifier.js"; import { SystemMessage, SystemMessageChunk } from "./system.js"; import { ToolMessage, ToolMessageChunk, ToolMessageFieldsWithToolCallId, } from "./tool.js"; import { convertToChunk } from "./utils.js"; export type MessageUnion = | typeof HumanMessage | typeof AIMessage | typeof SystemMessage | typeof ChatMessage | typeof FunctionMessage | typeof ToolMessage | typeof RemoveMessage; export type MessageChunkUnion = | typeof HumanMessageChunk | typeof AIMessageChunk | typeof SystemMessageChunk | typeof FunctionMessageChunk | typeof ToolMessageChunk | typeof ChatMessageChunk | typeof RemoveMessage; // RemoveMessage does not have a chunk class. export type MessageTypeOrClass = MessageType | MessageUnion | MessageChunkUnion; const _isMessageType = (msg: BaseMessage, types: MessageTypeOrClass[]) => { const typesAsStrings = [ ...new Set<string>( types?.map((t) => { if (typeof t === "string") { return t; } // eslint-disable-next-line @typescript-eslint/no-explicit-any const instantiatedMsgClass = new (t as any)({}); if ( !("_getType" in instantiatedMsgClass) || typeof instantiatedMsgClass._getType !== "function" ) { throw new Error("Invalid type provided."); } return instantiatedMsgClass._getType(); }) ), ]; const msgType = msg._getType(); return typesAsStrings.some((t) => t === msgType); }; export interface FilterMessagesFields { /** * @param {string[] | undefined} includeNames Message names to include. */ includeNames?: string[]; /** * @param {string[] | undefined} excludeNames Messages names to exclude. */ excludeNames?: string[]; /** * @param {(MessageType | BaseMessage)[] | undefined} includeTypes Message types to include. Can be specified as string names (e.g. * "system", "human", "ai", ...) or as BaseMessage classes (e.g. * SystemMessage, HumanMessage, AIMessage, ...). */ includeTypes?: MessageTypeOrClass[]; /** * @param {(MessageType | BaseMessage)[] | undefined} excludeTypes Message types to exclude. Can be specified as string names (e.g. * "system", "human", "ai", ...) or as BaseMessage classes (e.g. * SystemMessage, HumanMessage, AIMessage, ...). */ excludeTypes?: MessageTypeOrClass[]; /** * @param {string[] | undefined} includeIds Message IDs to include. */ includeIds?: string[]; /** * @param {string[] | undefined} excludeIds Message IDs to exclude. */ excludeIds?: string[]; } /** * Filter messages based on name, type or id. * * @param {BaseMessage[] | FilterMessagesFields} messagesOrOptions - Either an array of BaseMessage objects to filter or the filtering options. If an array is provided, the `options` parameter should also be supplied. If filtering options are provided, a RunnableLambda is returned. * @param {FilterMessagesFields} [options] - Optional filtering options. Should only be provided if `messagesOrOptions` is an array of BaseMessage objects. * @returns A list of Messages that meets at least one of the include conditions and none * of the exclude conditions, or a RunnableLambda which does the same. If no include conditions are specified then * anything that is not explicitly excluded will be included. * @throws {Error} If two incompatible arguments are provided. * * @example * ```typescript * import { filterMessages, AIMessage, HumanMessage, SystemMessage } from "@langchain/core/messages"; * * const messages = [ * new SystemMessage("you're a good assistant."), * new HumanMessage({ content: "what's your name", id: "foo", name: "example_user" }), * new AIMessage({ content: "steve-o", id: "bar", name: "example_assistant" }), * new HumanMessage({ content: "what's your favorite color", id: "baz" }), * new AIMessage({ content: "silicon blue" , id: "blah" }), * ]; * * filterMessages(messages, { * includeNames: ["example_user", "example_assistant"], * includeTypes: ["system"], * excludeIds: ["bar"], * }); * ``` * * The above example would return: * ```typescript * [ * new SystemMessage("you're a good assistant."), * new HumanMessage({ content: "what's your name", id: "foo", name: "example_user" }), * ] * ``` */ export function filterMessages( options?: FilterMessagesFields ): Runnable<BaseMessage[], BaseMessage[]>; export function filterMessages( messages: BaseMessage[], options?: FilterMessagesFields ): BaseMessage[]; export function filterMessages( messagesOrOptions?: BaseMessage[] | FilterMessagesFields, options?: FilterMessagesFields ): BaseMessage[] | Runnable<BaseMessage[], BaseMessage[]> { if (Array.isArray(messagesOrOptions)) { return _filterMessages(messagesOrOptions, options); } return RunnableLambda.from((input: BaseMessage[]): BaseMessage[] => { return _filterMessages(input, messagesOrOptions); }); } function _filterMessages( messages: BaseMessage[], options: FilterMessagesFields = {} ): BaseMessage[] { const { includeNames, excludeNames, includeTypes, excludeTypes, includeIds, excludeIds, } = options; const filtered: BaseMessage[] = []; for (const msg of messages) { if (excludeNames && msg.name && excludeNames.includes(msg.name)) { continue; } else if (excludeTypes && _isMessageType(msg, excludeTypes)) { continue; } else if (excludeIds && msg.id && excludeIds.includes(msg.id)) { continue; } // default to inclusion when no inclusion criteria given. if (!(includeTypes || includeIds || includeNames)) { filtered.push(msg); } else if ( includeNames && msg.name && includeNames.some((iName) => iName === msg.name) ) { filtered.push(msg); } else if (includeTypes && _isMessageType(msg, includeTypes)) { filtered.push(msg); } else if (includeIds && msg.id && includeIds.some((id) => id === msg.id)) { filtered.push(msg); } } return filtered; } /** * Merge consecutive Messages of the same type. * * **NOTE**: ToolMessages are not merged, as each has a distinct tool call id that * can't be merged. * * @param {BaseMessage[] | undefined} messages Sequence of Message-like objects to merge. Optional. If not provided, a RunnableLambda is returned. * @returns List of BaseMessages with consecutive runs of message types merged into single * messages, or a RunnableLambda which returns a list of BaseMessages If two messages being merged both have string contents, the merged * content is a concatenation of the two strings with a new-line separator. If at * least one of the messages has a list of content blocks, the merged content is a * list of content blocks. * * @example * ```typescript * import { mergeMessageRuns, AIMessage, HumanMessage, SystemMessage, ToolCall } from "@langchain/core/messages"; * * const messages = [ * new SystemMessage("you're a good assistant."), * new HumanMessage({ content: "what's your favorite color", id: "foo" }), * new HumanMessage({ content: "wait your favorite food", id: "bar" }), * new AIMessage({ * content: "my favorite colo", * tool_calls: [{ name: "blah_tool", args: { x: 2 }, id: "123" }], * id: "baz", * }), * new AIMessage({ * content: [{ type: "text", text: "my favorite dish is lasagna" }], * tool_calls: [{ name: "blah_tool", args: { x: -10 }, id: "456" }], * id: "blur", * }), * ]; * * mergeMessageRuns(messages); * ``` * * The above example would return: * ```typescript * [ * new SystemMessage("you're a good assistant."), * new HumanMessage({ * content: "what's your favorite colorwait your favorite food", * id: "foo", * }), * new AIMessage({ * content: [ * { type: "text", text: "my favorite colo" }, * { type: "text", text: "my favorite dish is lasagna" }, * ], * tool_calls: [ * { name: "blah_tool", args: { x: 2 }, id: "123" }, * { name: "blah_tool", args: { x: -10 }, id: "456" }, * ], * id: "baz", * }), * ] * ``` */ export function mergeMessageRuns(): Runnable<BaseMessage[], BaseMessage[]>; export function mergeMessageRuns(messages: BaseMessage[]): BaseMessage[]; export function mergeMessageRuns( messages?: BaseMessage[] ): BaseMessage[] | Runnable<BaseMessage[], BaseMessage[]> { if (Array.isArray(messages)) { return _mergeMessageRuns(messages); } return RunnableLambda.from(_mergeMessageRuns); } function _mergeMessageRuns(messages: BaseMessage[]): BaseMessage[] { if (!messages.length) { return []; } const merged: BaseMessage[] = []; for (const msg of messages) { const curr = msg; // Create a shallow copy of the message const last = merged.pop(); if (!last) { merged.push(curr); } else if ( curr._getType() === "tool" || !(curr._getType() === last._getType()) ) { merged.push(last, curr); } else { const lastChunk = convertToChunk(last) as BaseMessageChunk; const currChunk = convertToChunk(curr) as BaseMessageChunk; const mergedChunks = lastChunk.concat(currChunk); if ( typeof lastChunk.content === "string" && typeof currChunk.content === "string" ) { mergedChunks.content = `${lastChunk.content}\n${currChunk.content}`; } merged.push(_chunkToMsg(mergedChunks)); } } return merged; } // Since we can not import from `@langchain/textsplitters` we need // to reconstruct the interface here. interface _TextSplitterInterface extends BaseDocumentTransformer { splitText(text: string): Promise<string[]>; } export interface TrimMessagesFields { /** * @param {number} maxTokens Max token count of trimmed messages. */ maxTokens: number; /** * @param {((messages: BaseMessage[]) => number) | ((messages: BaseMessage[]) => Promise<number>) | BaseLanguageModel} tokenCounter * Function or LLM for counting tokens in an array of `BaseMessage`s. * If a `BaseLanguageModel` is passed in then `BaseLanguageModel.getNumTokens()` will be used. */ tokenCounter: | ((messages: BaseMessage[]) => number) | ((messages: BaseMessage[]) => Promise<number>) | BaseLanguageModel; /** * @param {"first" | "last"} [strategy="last"] Strategy for trimming. * - "first": Keep the first <= n_count tokens of the messages. * - "last": Keep the last <= n_count tokens of the messages. * @default "last" */ strategy?: "first" | "last"; /** * @param {boolean} [allowPartial=false] Whether to split a message if only part of the message can be included. * If `strategy: "last"` then the last partial contents of a message are included. * If `strategy: "first"` then the first partial contents of a message are included. * @default false */ allowPartial?: boolean; /** * @param {MessageTypeOrClass | MessageTypeOrClass[]} [endOn] The message type to end on. * If specified then every message after the last occurrence of this type is ignored. * If `strategy === "last"` then this is done before we attempt to get the last `maxTokens`. * If `strategy === "first"` then this is done after we get the first `maxTokens`. * Can be specified as string names (e.g. "system", "human", "ai", ...) or as `BaseMessage` classes * (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...). Can be a single type or an array of types. */ endOn?: MessageTypeOrClass | MessageTypeOrClass[]; /** * @param {MessageTypeOrClass | MessageTypeOrClass[]} [startOn] The message type to start on. * Should only be specified if `strategy: "last"`. If specified then every message before the first occurrence * of this type is ignored. This is done after we trim the initial messages to the last `maxTokens`. * Does not apply to a `SystemMessage` at index 0 if `includeSystem: true`. * Can be specified as string names (e.g. "system", "human", "ai", ...) or as `BaseMessage` classes * (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...). Can be a single type or an array of types. */ startOn?: MessageTypeOrClass | MessageTypeOrClass[]; /** * @param {boolean} [includeSystem=false] Whether to keep the `SystemMessage` if there is one at index 0. * Should only be specified if `strategy: "last"`. * @default false */ includeSystem?: boolean; /** * @param {((text: string) => string[]) | BaseDocumentTransformer} [textSplitter] Function or `BaseDocumentTransformer` for * splitting the string contents of a message. Only used if `allowPartial: true`. * If `strategy: "last"` then the last split tokens from a partial message will be included. * If `strategy: "first"` then the first split tokens from a partial message will be included. * Token splitter assumes that separators are kept, so that split contents can be directly concatenated * to recreate the original text. Defaults to splitting on newlines. */ textSplitter?: | ((text: string) => string[]) | ((text: string) => Promise<string[]>) | _TextSplitterInterface; } /** * Trim messages to be below a token count. * * @param {BaseMessage[]} messages Array of `BaseMessage` instances to trim. * @param {TrimMessagesFields} options Trimming options. * @returns An array of trimmed `BaseMessage`s or a `Runnable` that takes a sequence of `BaseMessage`-like objects and returns * an array of trimmed `BaseMessage`s. * @throws {Error} If two incompatible arguments are specified or an unrecognized `strategy` is specified. * * @example * ```typescript * import { trimMessages, AIMessage, BaseMessage, HumanMessage, SystemMessage } from "@langchain/core/messages"; * * const messages = [ * new SystemMessage("This is a 4 token text. The full message is 10 tokens."), * new HumanMessage({ * content: "This is a 4 token text. The full message is 10 tokens.", * id: "first", * }), * new AIMessage({ * content: [ * { type: "text", text: "This is the FIRST 4 token block." }, * { type: "text", text: "This is the SECOND 4 token block." }, * ], * id: "second", * }), * new HumanMessage({ * content: "This is a 4 token text. The full message is 10 tokens.", * id: "third", * }), * new AIMessage({ * content: "This is a 4 token text. The full message is 10 tokens.", * id: "fourth", * }), * ]; * * function dummyTokenCounter(messages: BaseMessage[]): number { * // treat each message like it adds 3 default tokens at the beginning * // of the message and at the end of the message. 3 + 4 + 3 = 10 tokens * // per message. * * const defaultContentLen = 4; * const defaultMsgPrefixLen = 3; * const defaultMsgSuffixLen = 3; * * let count = 0; * for (const msg of messages) { * if (typeof msg.content === "string") { * count += defaultMsgPrefixLen + defaultContentLen + defaultMsgSuffixLen; * } * if (Array.isArray(msg.content)) { * count += * defaultMsgPrefixLen + * msg.content.length * defaultContentLen + * defaultMsgSuffixLen; * } * } * return count; * } * ``` * * First 30 tokens, not allowing partial messages: * ```typescript * await trimMessages(messages, { * maxTokens: 30, * tokenCounter: dummyTokenCounter, * strategy: "first", * }); * ``` * * Output: * ```typescript * [ * new SystemMessage( * "This is a 4 token text. The full message is 10 tokens." * ), * new HumanMessage({ * content: "This is a 4 token text. The full message is 10 tokens.", * id: "first", * }), * ] * ``` * * First 30 tokens, allowing partial messages: * ```typescript * await trimMessages(messages, { * maxTokens: 30, * tokenCounter: dummyTokenCounter, * strategy: "first", * allowPartial: true, * }); * ``` * * Output: * ```typescript * [ * new SystemMessage( * "This is a 4 token text. The full message is 10 tokens." * ), * new HumanMessage({ * content: "This is a 4 token text. The full message is 10 tokens.", * id: "first", * }), * new AIMessage({ * content: [{ type: "text", text: "This is the FIRST 4 token block." }], * id: "second", * }), * ] * ``` * * First 30 tokens, allowing partial messages, have to end on HumanMessage: * ```typescript * await trimMessages(messages, { * maxTokens: 30, * tokenCounter: dummyTokenCounter, * strategy: "first", * allowPartial: true, * endOn: "human", * }); * ``` * * Output: * ```typescript * [ * new SystemMessage( * "This is a 4 token text. The full message is 10 tokens." * ), * new HumanMessage({ * content: "This is a 4 token text. The full message is 10 tokens.", * id: "first", * }), * ] * ``` * * Last 30 tokens, including system message, not allowing partial messages: * ```typescript * await trimMessages(messages, { * maxTokens: 30, * includeSystem: true, * tokenCounter: dummyTokenCounter, * strategy: "last", * }); * ``` * * Output: * ```typescript * [ * new SystemMessage( * "This is a 4 token text. The full message is 10 tokens." * ), * new HumanMessage({ * content: "This is a 4 token text. The full message is 10 tokens.", * id: "third", * }), * new AIMessage({ * content: "This is a 4 token text. The full message is 10 tokens.", * id: "fourth", * }), * ] * ``` * * Last 40 tokens, including system message, allowing partial messages: * ```typescript * await trimMessages(messages, { * maxTokens: 40, * tokenCounter: dummyTokenCounter, * strategy: "last", * allowPartial: true, * includeSystem: true, * }); * ``` * * Output: * ```typescript * [ * new SystemMessage( * "This is a 4 token text. The full message is 10 tokens." * ), * new AIMessage({ * content: [{ type: "text", text: "This is the FIRST 4 token block." }], * id: "second", * }), * new HumanMessage({ * content: "This is a 4 token text. The full message is 10 tokens.", * id: "third", * }), * new AIMessage({ * content: "This is a 4 token text. The full message is 10 tokens.", * id: "fourth", * }), * ] * ``` * * Last 30 tokens, including system message, allowing partial messages, end on HumanMessage: * ```typescript * await trimMessages(messages, { * maxTokens: 30, * tokenCounter: dummyTokenCounter, * strategy: "last", * endOn: "human", * includeSystem: true, * allowPartial: true, * }); * ``` * * Output: * ```typescript * [ * new SystemMessage( * "This is a 4 token text. The full message is 10 tokens." * ), * new AIMessage({ * content: [{ type: "text", text: "This is the FIRST 4 token block." }], * id: "second", * }), * new HumanMessage({ * content: "This is a 4 token text. The full message is 10 tokens.", * id: "third", * }), * ] * ``` * * Last 40 tokens, including system message, allowing partial messages, start on HumanMessage: * ```typescript * await trimMessages(messages, { * maxTokens: 40, * tokenCounter: dummyTokenCounter, * strategy: "last", * includeSystem: true, * allowPartial: true, * startOn: "human", * }); * ``` * * Output: * ```typescript * [ * new SystemMessage( * "This is a 4 token text. The full message is 10 tokens." * ), * new HumanMessage({ * content: "This is a 4 token text. The full message is 10 tokens.", * id: "third", * }), * new AIMessage({ * content: "This is a 4 token text. The full message is 10 tokens.", * id: "fourth", * }), * ] * ``` */ export function trimMessages( options: TrimMessagesFields ): Runnable<BaseMessage[], BaseMessage[]>; export function trimMessages( messages: BaseMessage[], options: TrimMessagesFields ): Promise<BaseMessage[]>; export function trimMessages( messagesOrOptions: BaseMessage[] | TrimMessagesFields, options?: TrimMessagesFields ): Promise<BaseMessage[]> | Runnable<BaseMessage[], BaseMessage[]> { if (Array.isArray(messagesOrOptions)) { const messages = messagesOrOptions; if (!options) { throw new Error("Options parameter is required when providing messages."); } return _trimMessagesHelper(messages, options); } else { const trimmerOptions = messagesOrOptions; return RunnableLambda.from((input: BaseMessage[]) => _trimMessagesHelper(input, trimmerOptions) ); } } async function _trimMessagesHelper( messages: BaseMessage[], options: TrimMessagesFields ): Promise<Array<BaseMessage>> { const { maxTokens, tokenCounter, strategy = "last", allowPartial = false, endOn, startOn, includeSystem = false, textSplitter, } = options; if (startOn && strategy === "first") { throw new Error( "`startOn` should only be specified if `strategy` is 'last'." ); } if (includeSystem && strategy === "first") { throw new Error( "`includeSystem` should only be specified if `strategy` is 'last'." ); } let listTokenCounter: (msgs: BaseMessage[]) => Promise<number>; if ("getNumTokens" in tokenCounter) { listTokenCounter = async (msgs: BaseMessage[]): Promise<number> => { const tokenCounts = await Promise.all( msgs.map((msg) => tokenCounter.getNumTokens(msg.content)) ); return tokenCounts.reduce((sum, count) => sum + count, 0); }; } else { listTokenCounter = async (msgs: BaseMessage[]): Promise<number> => tokenCounter(msgs); } let textSplitterFunc: (text: string) => Promise<string[]> = defaultTextSplitter; if (textSplitter) { if ("splitText" in textSplitter) { textSplitterFunc = textSplitter.splitText; } else { textSplitterFunc = async (text: string): Promise<string[]> => textSplitter(text); } } if (strategy === "first") { return _firstMaxTokens(messages, { maxTokens, tokenCounter: listTokenCounter, textSplitter: textSplitterFunc, partialStrategy: allowPartial ? "first" : undefined, endOn, }); } else if (strategy === "last") { return _lastMaxTokens(messages, { maxTokens, tokenCounter: listTokenCounter, textSplitter: textSplitterFunc, allowPartial, includeSystem, startOn, endOn, }); } else { throw new Error( `Unrecognized strategy: '${strategy}'. Must be one of 'first' or 'last'.` ); } } async function _firstMaxTokens( messages: BaseMessage[], options: { maxTokens: number; tokenCounter: (messages: BaseMessage[]) => Promise<number>; textSplitter: (text: string) => Promise<string[]>; partialStrategy?: "first" | "last"; endOn?: MessageTypeOrClass | MessageTypeOrClass[]; } ): Promise<BaseMessage[]> { const { maxTokens, tokenCounter, textSplitter, partialStrategy, endOn } = options; let messagesCopy = [...messages]; let idx = 0; for (let i = 0; i < messagesCopy.length; i += 1) { const remainingMessages = i > 0 ? messagesCopy.slice(0, -i) : messagesCopy; if ((await tokenCounter(remainingMessages)) <= maxTokens) { idx = messagesCopy.length - i; break; } } if (idx < messagesCopy.length - 1 && partialStrategy) { let includedPartial = false; if (Array.isArray(messagesCopy[idx].content)) { const excluded = messagesCopy[idx]; if (typeof excluded.content === "string") { throw new Error("Expected content to be an array."); } const numBlock = excluded.content.length; const reversedContent = partialStrategy === "last" ? [...excluded.content].reverse() : excluded.content; for (let i = 1; i <= numBlock; i += 1) { const partialContent = partialStrategy === "first" ? reversedContent.slice(0, i) : reversedContent.slice(-i); const fields = Object.fromEntries( Object.entries(excluded).filter( ([k]) => k !== "type" && !k.startsWith("lc_") ) ) as BaseMessageFields; const updatedMessage = _switchTypeToMessage(excluded._getType(), { ...fields, content: partialContent, }); const slicedMessages = [...messagesCopy.slice(0, idx), updatedMessage]; if ((await tokenCounter(slicedMessages)) <= maxTokens) { messagesCopy = slicedMessages; idx += 1; includedPartial = true; } else { break; } } if (includedPartial && partialStrategy === "last") { excluded.content = [...reversedContent].reverse(); } } if (!includedPartial) { const excluded = messagesCopy[idx]; let text: string | undefined; if ( Array.isArray(excluded.content) && excluded.content.some( (block) => typeof block === "string" || block.type === "text" ) ) { const textBlock = excluded.content.find( (block) => block.type === "text" && block.text ) as { type: "text"; text: string } | undefined; text = textBlock?.text; } else if (typeof excluded.content === "string") { text = excluded.content; } if (text) { const splitTexts = await textSplitter(text); const numSplits = splitTexts.length; if (partialStrategy === "last") { splitTexts.reverse(); } for (let _ = 0; _ < numSplits - 1; _ += 1) { splitTexts.pop(); excluded.content = splitTexts.join(""); if ( (await tokenCounter([...messagesCopy.slice(0, idx), excluded])) <= maxTokens ) { if (partialStrategy === "last") { excluded.content = [...splitTexts].reverse().join(""); } messagesCopy = [...messagesCopy.slice(0, idx), excluded]; idx += 1; break; } } } } } if (endOn) { const endOnArr = Array.isArray(endOn) ? endOn : [endOn]; while (idx > 0 && !_isMessageType(messagesCopy[idx - 1], endOnArr)) { idx -= 1; } } return messagesCopy.slice(0, idx); } async function _lastMaxTokens( messages: BaseMessage[], options: { maxTokens: number; tokenCounter: (messages: BaseMessage[]) => Promise<number>; textSplitter: (text: string) => Promise<string[]>; /** * @default {false} */ allowPartial?: boolean; /** * @default {false} */ includeSystem?: boolean; startOn?: MessageTypeOrClass | MessageTypeOrClass[]; endOn?: MessageTypeOrClass | MessageTypeOrClass[]; } ): Promise<BaseMessage[]> { const { allowPartial = false, includeSystem = false, endOn, startOn, ...rest } = options; if (endOn) { const endOnArr = Array.isArray(endOn) ? endOn : [endOn]; while ( messages && !_isMessageType(messages[messages.length - 1], endOnArr) ) { messages.pop(); } } const swappedSystem = includeSystem && messages[0]._getType() === "system"; let reversed_ = swappedSystem ? messages.slice(0, 1).concat(messages.slice(1).reverse()) : messages.reverse(); reversed_ = await _firstMaxTokens(reversed_, { ...rest, partialStrategy: allowPartial ? "last" : undefined, endOn: startOn, }); if (swappedSystem) { return [reversed_[0], ...reversed_.slice(1).reverse()]; } else { return reversed_.reverse(); } } const _MSG_CHUNK_MAP: Record< MessageType, { message: MessageUnion; messageChunk: MessageChunkUnion; } > = { human: { message: HumanMessage, messageChunk: HumanMessageChunk, }, ai: { message: AIMessage, messageChunk: AIMessageChunk, }, system: { message: SystemMessage, messageChunk: SystemMessageChunk, }, tool: { message: ToolMessage, messageChunk: ToolMessageChunk, }, function: { message: FunctionMessage, messageChunk: FunctionMessageChunk, }, generic: { message: ChatMessage, messageChunk: ChatMessageChunk, }, remove: { message: RemoveMessage, messageChunk: RemoveMessage, // RemoveMessage does not have a chunk class. }, }; function _switchTypeToMessage( messageType: MessageType, fields: BaseMessageFields ): BaseMessage; function _switchTypeToMessage( messageType: MessageType, fields: BaseMessageFields, returnChunk: true ): BaseMessageChunk; function _switchTypeToMessage( messageType: MessageType, fields: BaseMessageFields, returnChunk?: boolean ): BaseMessageChunk | BaseMessage { let chunk: BaseMessageChunk | undefined; let msg: BaseMessage | undefined; switch (messageType) { case "human": if (returnChunk) { chunk = new HumanMessageChunk(fields); } else { msg = new HumanMessage(fields); } break; case "ai": if (returnChunk) { let aiChunkFields: AIMessageChunkFields = { ...fields, }; if ("tool_calls" in aiChunkFields) { aiChunkFields = { ...aiChunkFields, tool_call_chunks: aiChunkFields.tool_calls?.map((tc) => ({ ...tc, type: "tool_call_chunk", index: undefined, args: JSON.stringify(tc.args), })), }; } chunk = new AIMessageChunk(aiChunkFields); } else { msg = new AIMessage(fields); } break; case "system": if (returnChunk) { chunk = new SystemMessageChunk(fields); } else { msg = new SystemMessage(fields); } break; case "tool": if ("tool_call_id" in fields) { if (returnChunk) { chunk = new ToolMessageChunk( fields as ToolMessageFieldsWithToolCallId ); } else { msg = new ToolMessage(fields as ToolMessageFieldsWithToolCallId); } } else { throw new Error( "Can not convert ToolMessage to ToolMessageChunk if 'tool_call_id' field is not defined." ); } break; case "function": if (returnChunk) { chunk = new FunctionMessageChunk(fields); } else { if (!fields.name) { throw new Error("FunctionMessage must have a 'name' field"); } msg = new FunctionMessage(fields as FunctionMessageFieldsWithName); } break; case "generic": if ("role" in fields) { if (returnChunk) { chunk = new ChatMessageChunk(fields as ChatMessageFieldsWithRole); } else { msg = new ChatMessage(fields as ChatMessageFieldsWithRole); } } else { throw new Error( "Can not convert ChatMessage to ChatMessageChunk if 'role' field is not defined." ); } break; default: throw new Error(`Unrecognized message type ${messageType}`); } if (returnChunk && chunk) { return chunk; } if (msg) { return msg; } throw new Error(`Unrecognized message type ${messageType}`); } function _chunkToMsg(chunk: BaseMessageChunk): BaseMessage { const chunkType = chunk._getType(); let msg: BaseMessage | undefined; const fields = Object.fromEntries( Object.entries(chunk).filter( ([k]) => !["type", "tool_call_chunks"].includes(k) && !k.startsWith("lc_") ) ) as BaseMessageFields; if (chunkType in _MSG_CHUNK_MAP) { msg = _switchTypeToMessage(chunkType, fields); } if (!msg) { throw new Error( `Unrecognized message chunk class ${chunkType}. Supported classes are ${Object.keys( _MSG_CHUNK_MAP )}` ); } return msg; } /** * The default text splitter function that splits text by newlines. * * @param {string} text * @returns A promise that resolves to an array of strings split by newlines. */ export function defaultTextSplitter(text: string): Promise<string[]> { const splits = text.split("\n"); return Promise.resolve([ ...splits.slice(0, -1).map((s) => `${s}\n`), splits[splits.length - 1], ]); }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/messages/chat.ts
import { BaseMessage, BaseMessageChunk, type BaseMessageFields, mergeContent, _mergeDicts, type MessageType, } from "./base.js"; export interface ChatMessageFieldsWithRole extends BaseMessageFields { role: string; } /** * Represents a chat message in a conversation. */ export class ChatMessage extends BaseMessage implements ChatMessageFieldsWithRole { static lc_name() { return "ChatMessage"; } role: string; static _chatMessageClass(): typeof ChatMessage { return ChatMessage; } constructor(content: string, role: string); constructor(fields: ChatMessageFieldsWithRole); constructor(fields: string | ChatMessageFieldsWithRole, role?: string) { if (typeof fields === "string") { // eslint-disable-next-line no-param-reassign, @typescript-eslint/no-non-null-assertion fields = { content: fields, role: role! }; } super(fields); this.role = fields.role; } _getType(): MessageType { return "generic"; } static isInstance(message: BaseMessage): message is ChatMessage { return message._getType() === "generic"; } override get _printableFields(): Record<string, unknown> { return { ...super._printableFields, role: this.role, }; } } /** * Represents a chunk of a chat message, which can be concatenated with * other chat message chunks. */ export class ChatMessageChunk extends BaseMessageChunk { static lc_name() { return "ChatMessageChunk"; } role: string; constructor(content: string, role: string); constructor(fields: ChatMessageFieldsWithRole); constructor(fields: string | ChatMessageFieldsWithRole, role?: string) { if (typeof fields === "string") { // eslint-disable-next-line no-param-reassign, @typescript-eslint/no-non-null-assertion fields = { content: fields, role: role! }; } super(fields); this.role = fields.role; } _getType(): MessageType { return "generic"; } concat(chunk: ChatMessageChunk) { return new ChatMessageChunk({ content: mergeContent(this.content, chunk.content), additional_kwargs: _mergeDicts( this.additional_kwargs, chunk.additional_kwargs ), response_metadata: _mergeDicts( this.response_metadata, chunk.response_metadata ), role: this.role, id: this.id ?? chunk.id, }); } override get _printableFields(): Record<string, unknown> { return { ...super._printableFields, role: this.role, }; } } export function isChatMessage(x: BaseMessage): x is ChatMessage { return x._getType() === "generic"; } export function isChatMessageChunk(x: BaseMessageChunk): x is ChatMessageChunk { return x._getType() === "generic"; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/messages/function.ts
import { BaseMessage, BaseMessageChunk, type BaseMessageFields, mergeContent, _mergeDicts, type MessageType, } from "./base.js"; export interface FunctionMessageFieldsWithName extends BaseMessageFields { name: string; } /** * Represents a function message in a conversation. */ export class FunctionMessage extends BaseMessage { static lc_name() { return "FunctionMessage"; } constructor(fields: FunctionMessageFieldsWithName); constructor( fields: string | BaseMessageFields, /** @deprecated */ name: string ); constructor( fields: string | FunctionMessageFieldsWithName, /** @deprecated */ name?: string ) { if (typeof fields === "string") { // eslint-disable-next-line no-param-reassign, @typescript-eslint/no-non-null-assertion fields = { content: fields, name: name! }; } super(fields); } _getType(): MessageType { return "function"; } } /** * Represents a chunk of a function message, which can be concatenated * with other function message chunks. */ export class FunctionMessageChunk extends BaseMessageChunk { static lc_name() { return "FunctionMessageChunk"; } _getType(): MessageType { return "function"; } concat(chunk: FunctionMessageChunk) { return new FunctionMessageChunk({ content: mergeContent(this.content, chunk.content), additional_kwargs: _mergeDicts( this.additional_kwargs, chunk.additional_kwargs ), response_metadata: _mergeDicts( this.response_metadata, chunk.response_metadata ), name: this.name ?? "", id: this.id ?? chunk.id, }); } } export function isFunctionMessage(x: BaseMessage): x is FunctionMessage { return x._getType() === "function"; } export function isFunctionMessageChunk( x: BaseMessageChunk ): x is FunctionMessageChunk { return x._getType() === "function"; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/messages/system.ts
import { BaseMessage, BaseMessageChunk, mergeContent, _mergeDicts, type MessageType, } from "./base.js"; /** * Represents a system message in a conversation. */ export class SystemMessage extends BaseMessage { static lc_name() { return "SystemMessage"; } _getType(): MessageType { return "system"; } } /** * Represents a chunk of a system message, which can be concatenated with * other system message chunks. */ export class SystemMessageChunk extends BaseMessageChunk { static lc_name() { return "SystemMessageChunk"; } _getType(): MessageType { return "system"; } concat(chunk: SystemMessageChunk) { return new SystemMessageChunk({ content: mergeContent(this.content, chunk.content), additional_kwargs: _mergeDicts( this.additional_kwargs, chunk.additional_kwargs ), response_metadata: _mergeDicts( this.response_metadata, chunk.response_metadata ), id: this.id ?? chunk.id, }); } } export function isSystemMessage(x: BaseMessage): x is SystemMessage { return x._getType() === "system"; } export function isSystemMessageChunk( x: BaseMessageChunk ): x is SystemMessageChunk { return x._getType() === "system"; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/messages/index.ts
export * from "./ai.js"; export * from "./base.js"; export * from "./chat.js"; export * from "./function.js"; export * from "./human.js"; export * from "./system.js"; export * from "./utils.js"; export * from "./transformers.js"; export * from "./modifier.js"; // TODO: Use a star export when we deprecate the // existing "ToolCall" type in "base.js". export { type ToolMessageFieldsWithToolCallId, ToolMessage, ToolMessageChunk, type InvalidToolCall, isToolMessage, isToolMessageChunk, } from "./tool.js";
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/messages/ai.ts
import { parsePartialJson } from "../utils/json.js"; import { BaseMessage, BaseMessageChunk, mergeContent, _mergeDicts, type MessageType, BaseMessageFields, _mergeLists, } from "./base.js"; import { InvalidToolCall, ToolCall, ToolCallChunk, defaultToolCallParser, } from "./tool.js"; export type AIMessageFields = BaseMessageFields & { tool_calls?: ToolCall[]; invalid_tool_calls?: InvalidToolCall[]; usage_metadata?: UsageMetadata; }; /** * Breakdown of input token counts. * * Does not *need* to sum to full input token count. Does *not* need to have all keys. */ export type InputTokenDetails = { /** * Audio input tokens. */ audio?: number; /** * Input tokens that were cached and there was a cache hit. * * Since there was a cache hit, the tokens were read from the cache. * More precisely, the model state given these tokens was read from the cache. */ cache_read?: number; /** * Input tokens that were cached and there was a cache miss. * * Since there was a cache miss, the cache was created from these tokens. */ cache_creation?: number; }; /** * Breakdown of output token counts. * * Does *not* need to sum to full output token count. Does *not* need to have all keys. */ export type OutputTokenDetails = { /** * Audio output tokens */ audio?: number; /** * Reasoning output tokens. * * Tokens generated by the model in a chain of thought process (i.e. by * OpenAI's o1 models) that are not returned as part of model output. */ reasoning?: number; }; /** * Usage metadata for a message, such as token counts. */ export type UsageMetadata = { /** * Count of input (or prompt) tokens. Sum of all input token types. */ input_tokens: number; /** * Count of output (or completion) tokens. Sum of all output token types. */ output_tokens: number; /** * Total token count. Sum of input_tokens + output_tokens. */ total_tokens: number; /** * Breakdown of input token counts. * * Does *not* need to sum to full input token count. Does *not* need to have all keys. */ input_token_details?: InputTokenDetails; /** * Breakdown of output token counts. * * Does *not* need to sum to full output token count. Does *not* need to have all keys. */ output_token_details?: OutputTokenDetails; }; /** * Represents an AI message in a conversation. */ export class AIMessage extends BaseMessage { // These are typed as optional to avoid breaking changes and allow for casting // from BaseMessage. tool_calls?: ToolCall[] = []; invalid_tool_calls?: InvalidToolCall[] = []; /** * If provided, token usage information associated with the message. */ usage_metadata?: UsageMetadata; get lc_aliases(): Record<string, string> { // exclude snake case conversion to pascal case return { ...super.lc_aliases, tool_calls: "tool_calls", invalid_tool_calls: "invalid_tool_calls", }; } constructor( fields: string | AIMessageFields, /** @deprecated */ kwargs?: Record<string, unknown> ) { let initParams: AIMessageFields; if (typeof fields === "string") { initParams = { content: fields, tool_calls: [], invalid_tool_calls: [], additional_kwargs: kwargs ?? {}, }; } else { initParams = fields; const rawToolCalls = initParams.additional_kwargs?.tool_calls; const toolCalls = initParams.tool_calls; if ( !(rawToolCalls == null) && rawToolCalls.length > 0 && (toolCalls === undefined || toolCalls.length === 0) ) { console.warn( [ "New LangChain packages are available that more efficiently handle", "tool calling.\n\nPlease upgrade your packages to versions that set", "message tool calls. e.g., `yarn add @langchain/anthropic`,", "yarn add @langchain/openai`, etc.", ].join(" ") ); } try { if (!(rawToolCalls == null) && toolCalls === undefined) { const [toolCalls, invalidToolCalls] = defaultToolCallParser(rawToolCalls); initParams.tool_calls = toolCalls ?? []; initParams.invalid_tool_calls = invalidToolCalls ?? []; } else { initParams.tool_calls = initParams.tool_calls ?? []; initParams.invalid_tool_calls = initParams.invalid_tool_calls ?? []; } } catch (e) { // Do nothing if parsing fails initParams.tool_calls = []; initParams.invalid_tool_calls = []; } } // Sadly, TypeScript only allows super() calls at root if the class has // properties with initializers, so we have to check types twice. super(initParams); if (typeof initParams !== "string") { this.tool_calls = initParams.tool_calls ?? this.tool_calls; this.invalid_tool_calls = initParams.invalid_tool_calls ?? this.invalid_tool_calls; } this.usage_metadata = initParams.usage_metadata; } static lc_name() { return "AIMessage"; } _getType(): MessageType { return "ai"; } override get _printableFields(): Record<string, unknown> { return { ...super._printableFields, tool_calls: this.tool_calls, invalid_tool_calls: this.invalid_tool_calls, usage_metadata: this.usage_metadata, }; } } export function isAIMessage(x: BaseMessage): x is AIMessage { return x._getType() === "ai"; } export function isAIMessageChunk(x: BaseMessageChunk): x is AIMessageChunk { return x._getType() === "ai"; } export type AIMessageChunkFields = AIMessageFields & { tool_call_chunks?: ToolCallChunk[]; }; /** * Represents a chunk of an AI message, which can be concatenated with * other AI message chunks. */ export class AIMessageChunk extends BaseMessageChunk { // Must redeclare tool call fields since there is no multiple inheritance in JS. // These are typed as optional to avoid breaking changes and allow for casting // from BaseMessage. tool_calls?: ToolCall[] = []; invalid_tool_calls?: InvalidToolCall[] = []; tool_call_chunks?: ToolCallChunk[] = []; /** * If provided, token usage information associated with the message. */ usage_metadata?: UsageMetadata; constructor(fields: string | AIMessageChunkFields) { let initParams: AIMessageChunkFields; if (typeof fields === "string") { initParams = { content: fields, tool_calls: [], invalid_tool_calls: [], tool_call_chunks: [], }; } else if (fields.tool_call_chunks === undefined) { initParams = { ...fields, tool_calls: fields.tool_calls ?? [], invalid_tool_calls: [], tool_call_chunks: [], usage_metadata: fields.usage_metadata !== undefined ? fields.usage_metadata : undefined, }; } else { const toolCalls: ToolCall[] = []; const invalidToolCalls: InvalidToolCall[] = []; for (const toolCallChunk of fields.tool_call_chunks) { let parsedArgs = {}; try { parsedArgs = parsePartialJson(toolCallChunk.args || "{}"); if ( parsedArgs === null || typeof parsedArgs !== "object" || Array.isArray(parsedArgs) ) { throw new Error("Malformed tool call chunk args."); } toolCalls.push({ name: toolCallChunk.name ?? "", args: parsedArgs, id: toolCallChunk.id, type: "tool_call", }); } catch (e) { invalidToolCalls.push({ name: toolCallChunk.name, args: toolCallChunk.args, id: toolCallChunk.id, error: "Malformed args.", type: "invalid_tool_call", }); } } initParams = { ...fields, tool_calls: toolCalls, invalid_tool_calls: invalidToolCalls, usage_metadata: fields.usage_metadata !== undefined ? fields.usage_metadata : undefined, }; } // Sadly, TypeScript only allows super() calls at root if the class has // properties with initializers, so we have to check types twice. super(initParams); this.tool_call_chunks = initParams.tool_call_chunks ?? this.tool_call_chunks; this.tool_calls = initParams.tool_calls ?? this.tool_calls; this.invalid_tool_calls = initParams.invalid_tool_calls ?? this.invalid_tool_calls; this.usage_metadata = initParams.usage_metadata; } get lc_aliases(): Record<string, string> { // exclude snake case conversion to pascal case return { ...super.lc_aliases, tool_calls: "tool_calls", invalid_tool_calls: "invalid_tool_calls", tool_call_chunks: "tool_call_chunks", }; } static lc_name() { return "AIMessageChunk"; } _getType(): MessageType { return "ai"; } override get _printableFields(): Record<string, unknown> { return { ...super._printableFields, tool_calls: this.tool_calls, tool_call_chunks: this.tool_call_chunks, invalid_tool_calls: this.invalid_tool_calls, usage_metadata: this.usage_metadata, }; } concat(chunk: AIMessageChunk) { const combinedFields: AIMessageChunkFields = { content: mergeContent(this.content, chunk.content), additional_kwargs: _mergeDicts( this.additional_kwargs, chunk.additional_kwargs ), response_metadata: _mergeDicts( this.response_metadata, chunk.response_metadata ), tool_call_chunks: [], id: this.id ?? chunk.id, }; if ( this.tool_call_chunks !== undefined || chunk.tool_call_chunks !== undefined ) { const rawToolCalls = _mergeLists( this.tool_call_chunks, chunk.tool_call_chunks ); if (rawToolCalls !== undefined && rawToolCalls.length > 0) { combinedFields.tool_call_chunks = rawToolCalls; } } if ( this.usage_metadata !== undefined || chunk.usage_metadata !== undefined ) { const inputTokenDetails: InputTokenDetails = { ...((this.usage_metadata?.input_token_details?.audio !== undefined || chunk.usage_metadata?.input_token_details?.audio !== undefined) && { audio: (this.usage_metadata?.input_token_details?.audio ?? 0) + (chunk.usage_metadata?.input_token_details?.audio ?? 0), }), ...((this.usage_metadata?.input_token_details?.cache_read !== undefined || chunk.usage_metadata?.input_token_details?.cache_read !== undefined) && { cache_read: (this.usage_metadata?.input_token_details?.cache_read ?? 0) + (chunk.usage_metadata?.input_token_details?.cache_read ?? 0), }), ...((this.usage_metadata?.input_token_details?.cache_creation !== undefined || chunk.usage_metadata?.input_token_details?.cache_creation !== undefined) && { cache_creation: (this.usage_metadata?.input_token_details?.cache_creation ?? 0) + (chunk.usage_metadata?.input_token_details?.cache_creation ?? 0), }), }; const outputTokenDetails: OutputTokenDetails = { ...((this.usage_metadata?.output_token_details?.audio !== undefined || chunk.usage_metadata?.output_token_details?.audio !== undefined) && { audio: (this.usage_metadata?.output_token_details?.audio ?? 0) + (chunk.usage_metadata?.output_token_details?.audio ?? 0), }), ...((this.usage_metadata?.output_token_details?.reasoning !== undefined || chunk.usage_metadata?.output_token_details?.reasoning !== undefined) && { reasoning: (this.usage_metadata?.output_token_details?.reasoning ?? 0) + (chunk.usage_metadata?.output_token_details?.reasoning ?? 0), }), }; const left: UsageMetadata = this.usage_metadata ?? { input_tokens: 0, output_tokens: 0, total_tokens: 0, }; const right: UsageMetadata = chunk.usage_metadata ?? { input_tokens: 0, output_tokens: 0, total_tokens: 0, }; const usage_metadata: UsageMetadata = { input_tokens: left.input_tokens + right.input_tokens, output_tokens: left.output_tokens + right.output_tokens, total_tokens: left.total_tokens + right.total_tokens, // Do not include `input_token_details` / `output_token_details` keys in combined fields // unless their values are defined. ...(Object.keys(inputTokenDetails).length > 0 && { input_token_details: inputTokenDetails, }), ...(Object.keys(outputTokenDetails).length > 0 && { output_token_details: outputTokenDetails, }), }; combinedFields.usage_metadata = usage_metadata; } return new AIMessageChunk(combinedFields); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/messages/base.ts
import { Serializable, SerializedConstructor } from "../load/serializable.js"; import { StringWithAutocomplete } from "../utils/types/index.js"; export interface StoredMessageData { content: string; role: string | undefined; name: string | undefined; tool_call_id: string | undefined; // eslint-disable-next-line @typescript-eslint/no-explicit-any additional_kwargs?: Record<string, any>; /** Response metadata. For example: response headers, logprobs, token counts. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any response_metadata?: Record<string, any>; id?: string; } export interface StoredMessage { type: string; data: StoredMessageData; } export interface StoredGeneration { text: string; message?: StoredMessage; } export interface StoredMessageV1 { type: string; role: string | undefined; text: string; } export type MessageType = | "human" | "ai" | "generic" | "system" | "function" | "tool" | "remove"; export type ImageDetail = "auto" | "low" | "high"; export type MessageContentText = { type: "text"; text: string; }; export type MessageContentImageUrl = { type: "image_url"; image_url: string | { url: string; detail?: ImageDetail }; }; export type MessageContentComplex = | MessageContentText | MessageContentImageUrl // eslint-disable-next-line @typescript-eslint/no-explicit-any | (Record<string, any> & { type?: "text" | "image_url" | string }) // eslint-disable-next-line @typescript-eslint/no-explicit-any | (Record<string, any> & { type?: never }); export type MessageContent = string | MessageContentComplex[]; export interface FunctionCall { /** * The arguments to call the function with, as generated by the model in JSON * format. Note that the model does not always generate valid JSON, and may * hallucinate parameters not defined by your function schema. Validate the * arguments in your code before calling your function. */ arguments: string; /** * The name of the function to call. */ name: string; } export type BaseMessageFields = { content: MessageContent; name?: string; additional_kwargs?: { /** * @deprecated Use "tool_calls" field on AIMessages instead */ function_call?: FunctionCall; /** * @deprecated Use "tool_calls" field on AIMessages instead */ tool_calls?: OpenAIToolCall[]; [key: string]: unknown; }; /** Response metadata. For example: response headers, logprobs, token counts. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any response_metadata?: Record<string, any>; /** * An optional unique identifier for the message. This should ideally be * provided by the provider/model which created the message. */ id?: string; }; export function mergeContent( firstContent: MessageContent, secondContent: MessageContent ): MessageContent { // If first content is a string if (typeof firstContent === "string") { if (typeof secondContent === "string") { return firstContent + secondContent; } else { return [{ type: "text", text: firstContent }, ...secondContent]; } // If both are arrays } else if (Array.isArray(secondContent)) { return ( _mergeLists(firstContent, secondContent) ?? [ ...firstContent, ...secondContent, ] ); } else { // Otherwise, add the second content as a new element of the list return [...firstContent, { type: "text", text: secondContent }]; } } /** * 'Merge' two statuses. If either value passed is 'error', it will return 'error'. Else * it will return 'success'. * * @param {"success" | "error" | undefined} left The existing value to 'merge' with the new value. * @param {"success" | "error" | undefined} right The new value to 'merge' with the existing value * @returns {"success" | "error"} The 'merged' value. */ export function _mergeStatus( left?: "success" | "error", right?: "success" | "error" ): "success" | "error" | undefined { if (left === "error" || right === "error") { return "error"; } return "success"; } // eslint-disable-next-line @typescript-eslint/no-explicit-any function stringifyWithDepthLimit(obj: any, depthLimit: number): string { // eslint-disable-next-line @typescript-eslint/no-explicit-any function helper(obj: any, currentDepth: number): any { if (typeof obj !== "object" || obj === null || obj === undefined) { return obj; } if (currentDepth >= depthLimit) { if (Array.isArray(obj)) { return "[Array]"; } return "[Object]"; } if (Array.isArray(obj)) { return obj.map((item) => helper(item, currentDepth + 1)); } const result: Record<string, unknown> = {}; for (const key of Object.keys(obj)) { result[key] = helper(obj[key], currentDepth + 1); } return result; } return JSON.stringify(helper(obj, 0), null, 2); } /** * Base class for all types of messages in a conversation. It includes * properties like `content`, `name`, and `additional_kwargs`. It also * includes methods like `toDict()` and `_getType()`. */ export abstract class BaseMessage extends Serializable implements BaseMessageFields { lc_namespace = ["langchain_core", "messages"]; lc_serializable = true; get lc_aliases(): Record<string, string> { // exclude snake case conversion to pascal case return { additional_kwargs: "additional_kwargs", response_metadata: "response_metadata", }; } /** * @deprecated * Use {@link BaseMessage.content} instead. */ get text(): string { return typeof this.content === "string" ? this.content : ""; } /** The content of the message. */ content: MessageContent; /** The name of the message sender in a multi-user chat. */ name?: string; /** Additional keyword arguments */ additional_kwargs: NonNullable<BaseMessageFields["additional_kwargs"]>; /** Response metadata. For example: response headers, logprobs, token counts. */ response_metadata: NonNullable<BaseMessageFields["response_metadata"]>; /** * An optional unique identifier for the message. This should ideally be * provided by the provider/model which created the message. */ id?: string; /** * @deprecated Use .getType() instead or import the proper typeguard. * For example: * * ```ts * import { isAIMessage } from "@langchain/core/messages"; * * const message = new AIMessage("Hello!"); * isAIMessage(message); // true * ``` */ abstract _getType(): MessageType; /** The type of the message. */ getType(): MessageType { return this._getType(); } constructor( fields: string | BaseMessageFields, /** @deprecated */ kwargs?: Record<string, unknown> ) { if (typeof fields === "string") { // eslint-disable-next-line no-param-reassign fields = { content: fields, additional_kwargs: kwargs, response_metadata: {}, }; } // Make sure the default value for additional_kwargs is passed into super() for serialization if (!fields.additional_kwargs) { // eslint-disable-next-line no-param-reassign fields.additional_kwargs = {}; } if (!fields.response_metadata) { // eslint-disable-next-line no-param-reassign fields.response_metadata = {}; } super(fields); this.name = fields.name; this.content = fields.content; this.additional_kwargs = fields.additional_kwargs; this.response_metadata = fields.response_metadata; this.id = fields.id; } toDict(): StoredMessage { return { type: this._getType(), data: (this.toJSON() as SerializedConstructor) .kwargs as StoredMessageData, }; } static lc_name() { return "BaseMessage"; } // Can't be protected for silly reasons get _printableFields(): Record<string, unknown> { return { id: this.id, content: this.content, name: this.name, additional_kwargs: this.additional_kwargs, response_metadata: this.response_metadata, }; } // this private method is used to update the ID for the runtime // value as well as in lc_kwargs for serialisation _updateId(value: string | undefined) { this.id = value; // lc_attributes wouldn't work here, because jest compares the // whole object this.lc_kwargs.id = value; } get [Symbol.toStringTag]() { // eslint-disable-next-line @typescript-eslint/no-explicit-any return (this.constructor as any).lc_name(); } // Override the default behavior of console.log [Symbol.for("nodejs.util.inspect.custom")](depth: number | null) { if (depth === null) { return this; } const printable = stringifyWithDepthLimit( this._printableFields, Math.max(4, depth) ); // eslint-disable-next-line @typescript-eslint/no-explicit-any return `${(this.constructor as any).lc_name()} ${printable}`; } } /** * @deprecated Use "tool_calls" field on AIMessages instead */ export type OpenAIToolCall = { /** * The ID of the tool call. */ id: string; /** * The function that the model called. */ function: FunctionCall; /** * The type of the tool. Currently, only `function` is supported. */ type: "function"; index?: number; }; export function isOpenAIToolCallArray( value?: unknown ): value is OpenAIToolCall[] { return ( Array.isArray(value) && value.every((v) => typeof (v as OpenAIToolCall).index === "number") ); } export function _mergeDicts( // eslint-disable-next-line @typescript-eslint/no-explicit-any left: Record<string, any>, // eslint-disable-next-line @typescript-eslint/no-explicit-any right: Record<string, any> // eslint-disable-next-line @typescript-eslint/no-explicit-any ): Record<string, any> { const merged = { ...left }; for (const [key, value] of Object.entries(right)) { if (merged[key] == null) { merged[key] = value; } else if (value == null) { continue; } else if ( typeof merged[key] !== typeof value || Array.isArray(merged[key]) !== Array.isArray(value) ) { throw new Error( `field[${key}] already exists in the message chunk, but with a different type.` ); } else if (typeof merged[key] === "string") { if (key === "type") { // Do not merge 'type' fields continue; } merged[key] += value; } else if (typeof merged[key] === "object" && !Array.isArray(merged[key])) { merged[key] = _mergeDicts(merged[key], value); } else if (Array.isArray(merged[key])) { merged[key] = _mergeLists(merged[key], value); } else if (merged[key] === value) { continue; } else { console.warn( `field[${key}] already exists in this message chunk and value has unsupported type.` ); } } return merged; } // eslint-disable-next-line @typescript-eslint/no-explicit-any export function _mergeLists(left?: any[], right?: any[]) { if (left === undefined && right === undefined) { return undefined; } else if (left === undefined || right === undefined) { return left || right; } else { const merged = [...left]; for (const item of right) { if ( typeof item === "object" && "index" in item && typeof item.index === "number" ) { const toMerge = merged.findIndex( (leftItem) => leftItem.index === item.index ); if (toMerge !== -1) { merged[toMerge] = _mergeDicts(merged[toMerge], item); } else { merged.push(item); } } else if ( typeof item === "object" && "text" in item && item.text === "" ) { // No-op - skip empty text blocks continue; } else { merged.push(item); } } return merged; } } // eslint-disable-next-line @typescript-eslint/no-explicit-any export function _mergeObj<T = any>( left: T | undefined, right: T | undefined ): T { if (!left && !right) { throw new Error("Cannot merge two undefined objects."); } if (!left || !right) { return left || (right as T); } else if (typeof left !== typeof right) { throw new Error( `Cannot merge objects of different types.\nLeft ${typeof left}\nRight ${typeof right}` ); } else if (typeof left === "string" && typeof right === "string") { return (left + right) as T; } else if (Array.isArray(left) && Array.isArray(right)) { return _mergeLists(left, right) as T; } else if (typeof left === "object" && typeof right === "object") { return _mergeDicts(left, right) as T; } else if (left === right) { return left; } else { throw new Error( `Can not merge objects of different types.\nLeft ${left}\nRight ${right}` ); } } /** * Represents a chunk of a message, which can be concatenated with other * message chunks. It includes a method `_merge_kwargs_dict()` for merging * additional keyword arguments from another `BaseMessageChunk` into this * one. It also overrides the `__add__()` method to support concatenation * of `BaseMessageChunk` instances. */ export abstract class BaseMessageChunk extends BaseMessage { abstract concat(chunk: BaseMessageChunk): BaseMessageChunk; } export type MessageFieldWithRole = { role: StringWithAutocomplete<"user" | "assistant" | MessageType>; content: MessageContent; name?: string; } & Record<string, unknown>; export function _isMessageFieldWithRole( x: BaseMessageLike ): x is MessageFieldWithRole { return typeof (x as MessageFieldWithRole).role === "string"; } export type BaseMessageLike = | BaseMessage | MessageFieldWithRole | [ StringWithAutocomplete< MessageType | "user" | "assistant" | "placeholder" >, MessageContent ] | string /** * @deprecated Specifying "type" is deprecated and will be removed in 0.4.0. */ | ({ type: MessageType | "user" | "assistant" | "placeholder"; } & BaseMessageFields & Record<string, unknown>) | SerializedConstructor; export function isBaseMessage( messageLike?: unknown ): messageLike is BaseMessage { return typeof (messageLike as BaseMessage)?._getType === "function"; } export function isBaseMessageChunk( messageLike?: unknown ): messageLike is BaseMessageChunk { return ( isBaseMessage(messageLike) && typeof (messageLike as BaseMessageChunk).concat === "function" ); }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/messages/utils.ts
import { addLangChainErrorFields } from "../errors/index.js"; import { SerializedConstructor } from "../load/serializable.js"; import { _isToolCall } from "../tools/utils.js"; import { AIMessage, AIMessageChunk, AIMessageChunkFields } from "./ai.js"; import { BaseMessageLike, BaseMessage, isBaseMessage, StoredMessage, StoredMessageV1, BaseMessageFields, _isMessageFieldWithRole, } from "./base.js"; import { ChatMessage, ChatMessageFieldsWithRole, ChatMessageChunk, } from "./chat.js"; import { FunctionMessage, FunctionMessageFieldsWithName, FunctionMessageChunk, } from "./function.js"; import { HumanMessage, HumanMessageChunk } from "./human.js"; import { SystemMessage, SystemMessageChunk } from "./system.js"; import { ToolCall, ToolMessage, ToolMessageFieldsWithToolCallId, } from "./tool.js"; function _coerceToolCall( toolCall: ToolCall | Record<string, unknown> ): ToolCall { if (_isToolCall(toolCall)) { return toolCall; } else if ( typeof toolCall.id === "string" && toolCall.type === "function" && typeof toolCall.function === "object" && toolCall.function !== null && "arguments" in toolCall.function && typeof toolCall.function.arguments === "string" && "name" in toolCall.function && typeof toolCall.function.name === "string" ) { // Handle OpenAI tool call format return { id: toolCall.id, args: JSON.parse(toolCall.function.arguments), name: toolCall.function.name, type: "tool_call", }; } else { // TODO: Throw an error? return toolCall as ToolCall; } } function isSerializedConstructor(x: unknown): x is SerializedConstructor { return ( typeof x === "object" && x != null && (x as SerializedConstructor).lc === 1 && Array.isArray((x as SerializedConstructor).id) && (x as SerializedConstructor).kwargs != null && typeof (x as SerializedConstructor).kwargs === "object" ); } function _constructMessageFromParams( params: | (BaseMessageFields & { type: string } & Record<string, unknown>) | SerializedConstructor ) { let type: string; let rest: BaseMessageFields & Record<string, unknown>; // Support serialized messages if (isSerializedConstructor(params)) { const className = params.id.at(-1); if (className === "HumanMessage" || className === "HumanMessageChunk") { type = "user"; } else if (className === "AIMessage" || className === "AIMessageChunk") { type = "assistant"; } else if ( className === "SystemMessage" || className === "SystemMessageChunk" ) { type = "system"; } else { type = "unknown"; } rest = params.kwargs as BaseMessageFields; } else { const { type: extractedType, ...otherParams } = params; type = extractedType; rest = otherParams; } if (type === "human" || type === "user") { return new HumanMessage(rest); } else if (type === "ai" || type === "assistant") { const { tool_calls: rawToolCalls, ...other } = rest; if (!Array.isArray(rawToolCalls)) { return new AIMessage(rest); } const tool_calls = rawToolCalls.map(_coerceToolCall); return new AIMessage({ ...other, tool_calls }); } else if (type === "system") { return new SystemMessage(rest); } else if (type === "tool" && "tool_call_id" in rest) { return new ToolMessage({ ...rest, content: rest.content, tool_call_id: rest.tool_call_id as string, name: rest.name, }); } else { const error = addLangChainErrorFields( new Error( `Unable to coerce message from array: only human, AI, system, or tool message coercion is currently supported.\n\nReceived: ${JSON.stringify( params, null, 2 )}` ), "MESSAGE_COERCION_FAILURE" ); throw error; } } export function coerceMessageLikeToMessage( messageLike: BaseMessageLike ): BaseMessage { if (typeof messageLike === "string") { return new HumanMessage(messageLike); } else if (isBaseMessage(messageLike)) { return messageLike; } if (Array.isArray(messageLike)) { const [type, content] = messageLike; return _constructMessageFromParams({ type, content }); } else if (_isMessageFieldWithRole(messageLike)) { const { role: type, ...rest } = messageLike; return _constructMessageFromParams({ ...rest, type }); } else { return _constructMessageFromParams(messageLike); } } /** * This function is used by memory classes to get a string representation * of the chat message history, based on the message content and role. */ export function getBufferString( messages: BaseMessage[], humanPrefix = "Human", aiPrefix = "AI" ): string { const string_messages: string[] = []; for (const m of messages) { let role: string; if (m._getType() === "human") { role = humanPrefix; } else if (m._getType() === "ai") { role = aiPrefix; } else if (m._getType() === "system") { role = "System"; } else if (m._getType() === "function") { role = "Function"; } else if (m._getType() === "tool") { role = "Tool"; } else if (m._getType() === "generic") { role = (m as ChatMessage).role; } else { throw new Error(`Got unsupported message type: ${m._getType()}`); } const nameStr = m.name ? `${m.name}, ` : ""; const readableContent = typeof m.content === "string" ? m.content : JSON.stringify(m.content, null, 2); string_messages.push(`${role}: ${nameStr}${readableContent}`); } return string_messages.join("\n"); } /** * Maps messages from an older format (V1) to the current `StoredMessage` * format. If the message is already in the `StoredMessage` format, it is * returned as is. Otherwise, it transforms the V1 message into a * `StoredMessage`. This function is important for maintaining * compatibility with older message formats. */ function mapV1MessageToStoredMessage( message: StoredMessage | StoredMessageV1 ): StoredMessage { // TODO: Remove this mapper when we deprecate the old message format. if ((message as StoredMessage).data !== undefined) { return message as StoredMessage; } else { const v1Message = message as StoredMessageV1; return { type: v1Message.type, data: { content: v1Message.text, role: v1Message.role, name: undefined, tool_call_id: undefined, }, }; } } export function mapStoredMessageToChatMessage(message: StoredMessage) { const storedMessage = mapV1MessageToStoredMessage(message); switch (storedMessage.type) { case "human": return new HumanMessage(storedMessage.data); case "ai": return new AIMessage(storedMessage.data); case "system": return new SystemMessage(storedMessage.data); case "function": if (storedMessage.data.name === undefined) { throw new Error("Name must be defined for function messages"); } return new FunctionMessage( storedMessage.data as FunctionMessageFieldsWithName ); case "tool": if (storedMessage.data.tool_call_id === undefined) { throw new Error("Tool call ID must be defined for tool messages"); } return new ToolMessage( storedMessage.data as ToolMessageFieldsWithToolCallId ); case "generic": { if (storedMessage.data.role === undefined) { throw new Error("Role must be defined for chat messages"); } return new ChatMessage(storedMessage.data as ChatMessageFieldsWithRole); } default: throw new Error(`Got unexpected type: ${storedMessage.type}`); } } /** * Transforms an array of `StoredMessage` instances into an array of * `BaseMessage` instances. It uses the `mapV1MessageToStoredMessage` * function to ensure all messages are in the `StoredMessage` format, then * creates new instances of the appropriate `BaseMessage` subclass based * on the type of each message. This function is used to prepare stored * messages for use in a chat context. */ export function mapStoredMessagesToChatMessages( messages: StoredMessage[] ): BaseMessage[] { return messages.map(mapStoredMessageToChatMessage); } /** * Transforms an array of `BaseMessage` instances into an array of * `StoredMessage` instances. It does this by calling the `toDict` method * on each `BaseMessage`, which returns a `StoredMessage`. This function * is used to prepare chat messages for storage. */ export function mapChatMessagesToStoredMessages( messages: BaseMessage[] ): StoredMessage[] { return messages.map((message) => message.toDict()); } export function convertToChunk(message: BaseMessage) { const type = message._getType(); if (type === "human") { // eslint-disable-next-line @typescript-eslint/no-use-before-define return new HumanMessageChunk({ ...message }); } else if (type === "ai") { let aiChunkFields: AIMessageChunkFields = { ...message, }; if ("tool_calls" in aiChunkFields) { aiChunkFields = { ...aiChunkFields, tool_call_chunks: aiChunkFields.tool_calls?.map((tc) => ({ ...tc, type: "tool_call_chunk", index: undefined, args: JSON.stringify(tc.args), })), }; } // eslint-disable-next-line @typescript-eslint/no-use-before-define return new AIMessageChunk({ ...aiChunkFields }); } else if (type === "system") { // eslint-disable-next-line @typescript-eslint/no-use-before-define return new SystemMessageChunk({ ...message }); } else if (type === "function") { // eslint-disable-next-line @typescript-eslint/no-use-before-define return new FunctionMessageChunk({ ...message }); // eslint-disable-next-line @typescript-eslint/no-use-before-define } else if (ChatMessage.isInstance(message)) { // eslint-disable-next-line @typescript-eslint/no-use-before-define return new ChatMessageChunk({ ...message }); } else { throw new Error("Unknown message type."); } }
0
lc_public_repos/langchainjs/langchain-core/src/messages
lc_public_repos/langchainjs/langchain-core/src/messages/tests/message_utils.test.ts
import { it, describe, test, expect } from "@jest/globals"; import { filterMessages, mergeMessageRuns, trimMessages, } from "../transformers.js"; import { AIMessage } from "../ai.js"; import { ChatMessage } from "../chat.js"; import { HumanMessage } from "../human.js"; import { SystemMessage } from "../system.js"; import { BaseMessage } from "../base.js"; import { getBufferString, mapChatMessagesToStoredMessages, mapStoredMessagesToChatMessages, } from "../utils.js"; describe("filterMessage", () => { const getMessages = () => [ new SystemMessage("you're a good assistant."), new HumanMessage({ content: "what's your name", id: "foo", name: "example_user", }), new AIMessage({ content: "steve-o", id: "bar", name: "example_assistant" }), new HumanMessage({ content: "what's your favorite color", id: "baz" }), new AIMessage({ content: "silicon blue", id: "blah" }), ]; it("works", () => { const messages = getMessages(); const filteredMessages = filterMessages(messages, { includeNames: ["example_user", "example_assistant"], includeTypes: ["system"], excludeIds: ["bar"], }); expect(filteredMessages).toEqual([ new SystemMessage("you're a good assistant."), new HumanMessage({ content: "what's your name", id: "foo", name: "example_user", }), ]); }); it("can filter messages based on class types", () => { const messages = getMessages(); const filteredMessages = filterMessages(messages, { includeTypes: [HumanMessage, AIMessage], }); expect(filteredMessages).toHaveLength(4); expect(filteredMessages).toEqual([ new HumanMessage({ content: "what's your name", id: "foo", name: "example_user", }), new AIMessage({ content: "steve-o", id: "bar", name: "example_assistant", }), new HumanMessage({ content: "what's your favorite color", id: "baz" }), new AIMessage({ content: "silicon blue", id: "blah" }), ]); }); it("returns a runnable if no messages are passed", () => { const filteredMessagesRunnable = filterMessages(); expect(filteredMessagesRunnable).toBeDefined(); expect(filteredMessagesRunnable.lc_namespace).toEqual([ "langchain_core", "runnables", ]); expect("func" in filteredMessagesRunnable).toBeTruthy(); // `func` is protected, so we need to cast it to any to access it // eslint-disable-next-line @typescript-eslint/no-explicit-any expect(typeof (filteredMessagesRunnable as any).func).toBe("function"); }); }); describe("mergeMessageRuns", () => { const getMessages = () => [ new SystemMessage("you're a good assistant."), new HumanMessage({ content: "what's your favorite color", id: "foo" }), new HumanMessage({ content: "wait your favorite food", id: "bar" }), new AIMessage({ content: "my favorite colo", tool_calls: [{ name: "blah_tool", args: { x: 2 }, id: "123" }], id: "baz", }), new AIMessage({ content: [{ type: "text", text: "my favorite dish is lasagna" }], tool_calls: [{ name: "blah_tool", args: { x: -10 }, id: "456" }], id: "blur", }), ]; it("works", () => { const messages = getMessages(); const mergedMessages = mergeMessageRuns(messages); expect(mergedMessages).toHaveLength(3); expect(mergedMessages).toEqual([ new SystemMessage("you're a good assistant."), new HumanMessage({ content: "what's your favorite color\nwait your favorite food", id: "foo", }), new AIMessage({ content: [ { type: "text", text: "my favorite colo" }, { type: "text", text: "my favorite dish is lasagna" }, ], tool_calls: [ { name: "blah_tool", args: { x: 2 }, id: "123", type: "tool_call" }, { name: "blah_tool", args: { x: -10 }, id: "456", type: "tool_call" }, ], id: "baz", }), ]); }); it("returns a runnable if no messages are passed", () => { const mergedMessages = mergeMessageRuns(); expect(mergedMessages).toBeDefined(); expect(mergedMessages.lc_namespace).toEqual([ "langchain_core", "runnables", ]); expect("func" in mergedMessages).toBeTruthy(); // `func` is protected, so we need to cast it to any to access it // eslint-disable-next-line @typescript-eslint/no-explicit-any expect(typeof (mergedMessages as any).func).toBe("function"); }); }); describe("trimMessages can trim", () => { const messagesAndTokenCounterFactory = () => { const messages = [ new SystemMessage( "This is a 4 token text. The full message is 10 tokens." ), new HumanMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "first", }), new AIMessage({ content: [ { type: "text", text: "This is the FIRST 4 token block." }, { type: "text", text: "This is the SECOND 4 token block." }, ], id: "second", }), new HumanMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "third", }), new AIMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "fourth", }), ]; const dummyTokenCounter = (messages: BaseMessage[]): number => { // treat each message like it adds 3 default tokens at the beginning // of the message and at the end of the message. 3 + 4 + 3 = 10 tokens // per message. const defaultContentLen = 4; const defaultMsgPrefixLen = 3; const defaultMsgSuffixLen = 3; let count = 0; for (const msg of messages) { if (typeof msg.content === "string") { count += defaultMsgPrefixLen + defaultContentLen + defaultMsgSuffixLen; } if (Array.isArray(msg.content)) { count += defaultMsgPrefixLen + msg.content.length * defaultContentLen + defaultMsgSuffixLen; } } return count; }; return { messages, dummyTokenCounter, }; }; it("First 30 tokens, not allowing partial messages", async () => { const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory(); const trimmedMessages = await trimMessages(messages, { maxTokens: 30, tokenCounter: dummyTokenCounter, strategy: "first", }); expect(trimmedMessages).toHaveLength(2); expect(trimmedMessages).toEqual([ new SystemMessage( "This is a 4 token text. The full message is 10 tokens." ), new HumanMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "first", }), ]); }); it("First 30 tokens, allowing partial messages", async () => { const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory(); const trimmedMessages = await trimMessages(messages, { maxTokens: 30, tokenCounter: dummyTokenCounter, strategy: "first", allowPartial: true, }); expect(trimmedMessages).toHaveLength(3); expect(trimmedMessages).toEqual([ new SystemMessage( "This is a 4 token text. The full message is 10 tokens." ), new HumanMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "first", }), new AIMessage({ content: [{ type: "text", text: "This is the FIRST 4 token block." }], id: "second", }), ]); }); it("First 30 tokens, allowing partial messages, have to end on HumanMessage", async () => { const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory(); const trimmedMessages = await trimMessages(messages, { maxTokens: 30, tokenCounter: dummyTokenCounter, strategy: "first", allowPartial: true, endOn: "human", }); expect(trimmedMessages).toHaveLength(2); expect(trimmedMessages).toEqual([ new SystemMessage( "This is a 4 token text. The full message is 10 tokens." ), new HumanMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "first", }), ]); }); it("Last 30 tokens, including system message, not allowing partial messages", async () => { const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory(); const trimmedMessages = await trimMessages(messages, { maxTokens: 30, includeSystem: true, tokenCounter: dummyTokenCounter, strategy: "last", }); expect(trimmedMessages).toHaveLength(3); expect(trimmedMessages).toEqual([ new SystemMessage( "This is a 4 token text. The full message is 10 tokens." ), new HumanMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "third", }), new AIMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "fourth", }), ]); }); it("Last 40 tokens, including system message, allowing partial messages", async () => { const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory(); const trimmedMessages = await trimMessages(messages, { maxTokens: 40, tokenCounter: dummyTokenCounter, strategy: "last", allowPartial: true, includeSystem: true, }); expect(trimmedMessages).toHaveLength(4); expect(trimmedMessages).toEqual([ new SystemMessage( "This is a 4 token text. The full message is 10 tokens." ), new AIMessage({ content: [{ type: "text", text: "This is the FIRST 4 token block." }], id: "second", }), new HumanMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "third", }), new AIMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "fourth", }), ]); }); it("Last 30 tokens, including system message, allowing partial messages, end on HumanMessage", async () => { const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory(); const trimmedMessages = await trimMessages(messages, { maxTokens: 30, tokenCounter: dummyTokenCounter, strategy: "last", endOn: "human", includeSystem: true, allowPartial: true, }); expect(trimmedMessages).toHaveLength(3); expect(trimmedMessages).toEqual([ new SystemMessage( "This is a 4 token text. The full message is 10 tokens." ), new AIMessage({ content: [{ type: "text", text: "This is the FIRST 4 token block." }], id: "second", }), new HumanMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "third", }), ]); }); it("Last 40 tokens, including system message, allowing partial messages, start on HumanMessage", async () => { const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory(); const trimmedMessages = await trimMessages(messages, { maxTokens: 40, tokenCounter: dummyTokenCounter, strategy: "last", includeSystem: true, allowPartial: true, startOn: "human", }); expect(trimmedMessages).toHaveLength(3); console.log(trimmedMessages); expect(trimmedMessages).toEqual([ new SystemMessage( "This is a 4 token text. The full message is 10 tokens." ), new HumanMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "third", }), new AIMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "fourth", }), ]); }); it("can filter (startOn) with message classes", async () => { const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory(); const trimmedMessages = await trimMessages(messages, { maxTokens: 40, tokenCounter: dummyTokenCounter, startOn: [HumanMessage], }); expect(trimmedMessages).toHaveLength(2); expect(trimmedMessages).toEqual([ new HumanMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "third", }), new AIMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "fourth", }), ]); }); it("can filter (endOn) with message classes", async () => { const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory(); const trimmedMessages = await trimMessages(messages, { maxTokens: 40, tokenCounter: dummyTokenCounter, endOn: [HumanMessage], }); console.log(trimmedMessages); expect(trimmedMessages).toHaveLength(3); expect(trimmedMessages).toEqual([ new HumanMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "first", }), new AIMessage({ content: [ { type: "text", text: "This is the FIRST 4 token block." }, { type: "text", text: "This is the SECOND 4 token block." }, ], id: "second", }), new HumanMessage({ content: "This is a 4 token text. The full message is 10 tokens.", id: "third", }), ]); }); it("can return a runnable if empty array is passed", () => { const { dummyTokenCounter } = messagesAndTokenCounterFactory(); const trimmedMessages = trimMessages({ maxTokens: 40, tokenCounter: dummyTokenCounter, }); expect(trimmedMessages).toBeDefined(); expect(trimmedMessages.lc_namespace).toEqual([ "langchain_core", "runnables", ]); expect("func" in trimmedMessages).toBeTruthy(); // `func` is protected, so we need to cast it to any to access it // eslint-disable-next-line @typescript-eslint/no-explicit-any expect(typeof (trimmedMessages as any).func).toBe("function"); }); }); test("getBufferString can handle complex messages", () => { const messageArr1 = [new HumanMessage("Hello there!")]; const messageArr2 = [ new AIMessage({ content: [ { type: "text", text: "Hello there!", }, ], }), ]; const messageArr3 = [ new HumanMessage({ content: [ { type: "image_url", image_url: { url: "https://example.com/image.jpg", }, }, { type: "image_url", image_url: "https://example.com/image.jpg", }, ], }), ]; const bufferString1 = getBufferString(messageArr1); expect(bufferString1).toBe("Human: Hello there!"); const bufferString2 = getBufferString(messageArr2); expect(bufferString2).toBe( `AI: ${JSON.stringify( [ { type: "text", text: "Hello there!", }, ], null, 2 )}` ); const bufferString3 = getBufferString(messageArr3); expect(bufferString3).toBe( `Human: ${JSON.stringify( [ { type: "image_url", image_url: { url: "https://example.com/image.jpg", }, }, { type: "image_url", image_url: "https://example.com/image.jpg", }, ], null, 2 )}` ); }); describe("chat message conversions", () => { it("can convert a chat message to a stored message and back", () => { const originalMessages = [ new ChatMessage("I'm a generic message!", "human"), new HumanMessage("I'm a human message!"), ]; const storedMessages = mapChatMessagesToStoredMessages(originalMessages); const convertedBackMessages = mapStoredMessagesToChatMessages(storedMessages); expect(convertedBackMessages).toEqual(originalMessages); }); });
0
lc_public_repos/langchainjs/langchain-core/src/messages
lc_public_repos/langchainjs/langchain-core/src/messages/tests/base_message.test.ts
import { test, describe, it, expect } from "@jest/globals"; import { ChatPromptTemplate } from "../../prompts/chat.js"; import { HumanMessage, AIMessage, ToolMessage, ToolMessageChunk, AIMessageChunk, coerceMessageLikeToMessage, SystemMessage, } from "../index.js"; import { load } from "../../load/index.js"; import { concat } from "../../utils/stream.js"; test("Test ChatPromptTemplate can format OpenAI content image messages", async () => { const message = new HumanMessage({ content: [ { type: "image_url", image_url: { url: `data:image/jpeg;base64,{image_string}`, }, }, ], }); const prompt = ChatPromptTemplate.fromMessages([ message, ["ai", "Will this format with multiple messages?: {yes_or_no}"], ]); const formatted = await prompt.invoke({ image_string: "base_64_encoded_string", yes_or_no: "YES!", }); expect(formatted.messages[0].content[0]).toEqual({ type: "image_url", image_url: { url: "data:image/jpeg;base64,base_64_encoded_string", }, }); expect(formatted.messages[1].content).toEqual( "Will this format with multiple messages?: YES!" ); }); test("Test ChatPromptTemplate can format OpenAI content image messages", async () => { const message = new HumanMessage({ content: [ { type: "image_url", image_url: { url: `data:image/jpeg;base64,{image_string}`, }, }, ], }); const prompt = ChatPromptTemplate.fromMessages([ message, ["ai", "Will this format with multiple messages?: {yes_or_no}"], ]); const formatted = await prompt.invoke({ image_string: "base_64_encoded_string", yes_or_no: "YES!", }); expect(formatted.messages[0].content[0]).toEqual({ type: "image_url", image_url: { url: "data:image/jpeg;base64,base_64_encoded_string", }, }); expect(formatted.messages[1].content).toEqual( "Will this format with multiple messages?: YES!" ); }); test("Deserialisation and serialisation of additional_kwargs and tool_call_id", async () => { const config = { importMap: { messages: { AIMessage } }, optionalImportEntrypoints: [], optionalImportsMap: {}, secretsMap: {}, }; const message = new AIMessage({ content: "", additional_kwargs: { tool_calls: [ { id: "call_tXJNP1S6LHT5tLfaNHCbYCtH", type: "function" as const, function: { name: "Weather", arguments: '{\n "location": "Prague"\n}', }, }, ], }, }); const deserialized: AIMessage = await load(JSON.stringify(message), config); expect(deserialized).toEqual(message); }); test("Deserialisation and serialisation of tool_call_id", async () => { const config = { importMap: { messages: { ToolMessage } }, optionalImportEntrypoints: [], optionalImportsMap: {}, secretsMap: {}, }; const message = new ToolMessage({ content: '{"value": 32}', tool_call_id: "call_tXJNP1S6LHT5tLfaNHCbYCtH", }); const deserialized: ToolMessage = await load(JSON.stringify(message), config); expect(deserialized).toEqual(message); }); test("Deserialisation and serialisation of messages with ID", async () => { const config = { importMap: { messages: { AIMessage } }, optionalImportEntrypoints: [], optionalImportsMap: {}, secretsMap: {}, }; const messageId = "uuid-1234"; const message = new AIMessage({ content: "The sky is blue because...", id: messageId, }); const deserialized: AIMessage = await load(JSON.stringify(message), config); expect(deserialized).toEqual(message); expect(deserialized.id).toBe(messageId); }); test("Can concat artifact (string) of ToolMessageChunk", () => { const rawOutputOne = "Hello"; const rawOutputTwo = " world"; const chunk1 = new ToolMessageChunk({ content: "Hello", tool_call_id: "1", artifact: rawOutputOne, }); const chunk2 = new ToolMessageChunk({ content: " world", tool_call_id: "1", artifact: rawOutputTwo, }); const concated = chunk1.concat(chunk2); expect(concated.artifact).toBe(`${rawOutputOne}${rawOutputTwo}`); }); test("Can concat artifact (array) of ToolMessageChunk", () => { const rawOutputOne = ["Hello", " world"]; const rawOutputTwo = ["!!"]; const chunk1 = new ToolMessageChunk({ content: "Hello", tool_call_id: "1", artifact: rawOutputOne, }); const chunk2 = new ToolMessageChunk({ content: " world", tool_call_id: "1", artifact: rawOutputTwo, }); const concated = chunk1.concat(chunk2); expect(concated.artifact).toEqual(["Hello", " world", "!!"]); }); test("Can concat artifact (object) of ToolMessageChunk", () => { const rawOutputOne = { foo: "bar", }; const rawOutputTwo = { bar: "baz", }; const chunk1 = new ToolMessageChunk({ content: "Hello", tool_call_id: "1", artifact: rawOutputOne, }); const chunk2 = new ToolMessageChunk({ content: " world", tool_call_id: "1", artifact: rawOutputTwo, }); const concated = chunk1.concat(chunk2); expect(concated.artifact).toEqual({ foo: "bar", bar: "baz", }); }); describe("Complex AIMessageChunk concat", () => { it("concatenates content arrays of strings", () => { expect( new AIMessageChunk({ content: [{ type: "text", text: "I am" }], id: "ai4", }).concat( new AIMessageChunk({ content: [{ type: "text", text: " indeed." }] }) ) ).toEqual( new AIMessageChunk({ id: "ai4", content: [ { type: "text", text: "I am" }, { type: "text", text: " indeed." }, ], }) ); }); it("concatenates mixed content arrays", () => { expect( new AIMessageChunk({ content: [{ index: 0, type: "text", text: "I am" }], }).concat( new AIMessageChunk({ content: [{ type: "text", text: " indeed." }] }) ) ).toEqual( new AIMessageChunk({ content: [ { index: 0, type: "text", text: "I am" }, { type: "text", text: " indeed." }, ], }) ); }); it("merges content arrays with same index", () => { expect( new AIMessageChunk({ content: [{ index: 0, text: "I am" }] }).concat( new AIMessageChunk({ content: [{ index: 0, text: " indeed." }] }) ) ).toEqual( new AIMessageChunk({ content: [{ index: 0, text: "I am indeed." }] }) ); }); it("does not merge when one chunk is missing an index", () => { expect( new AIMessageChunk({ content: [{ index: 0, text: "I am" }] }).concat( new AIMessageChunk({ content: [{ text: " indeed." }] }) ) ).toEqual( new AIMessageChunk({ content: [{ index: 0, text: "I am" }, { text: " indeed." }], }) ); }); it("does not create a holey array when there's a gap between indexes", () => { expect( new AIMessageChunk({ content: [{ index: 0, text: "I am" }] }).concat( new AIMessageChunk({ content: [{ index: 2, text: " indeed." }] }) ) ).toEqual( new AIMessageChunk({ content: [ { index: 0, text: "I am" }, { index: 2, text: " indeed." }, ], }) ); }); it("does not merge content arrays with separate indexes", () => { expect( new AIMessageChunk({ content: [{ index: 0, text: "I am" }] }).concat( new AIMessageChunk({ content: [{ index: 1, text: " indeed." }] }) ) ).toEqual( new AIMessageChunk({ content: [ { index: 0, text: "I am" }, { index: 1, text: " indeed." }, ], }) ); }); it("merges content arrays with same index and type", () => { expect( new AIMessageChunk({ content: [{ index: 0, text: "I am", type: "text_block" }], }).concat( new AIMessageChunk({ content: [{ index: 0, text: " indeed.", type: "text_block" }], }) ) ).toEqual( new AIMessageChunk({ content: [{ index: 0, text: "I am indeed.", type: "text_block" }], }) ); }); it("merges content arrays with same index and different types without updating type", () => { expect( new AIMessageChunk({ content: [{ index: 0, text: "I am", type: "text_block" }], }).concat( new AIMessageChunk({ content: [{ index: 0, text: " indeed.", type: "text_block_delta" }], }) ) ).toEqual( new AIMessageChunk({ content: [{ index: 0, text: "I am indeed.", type: "text_block" }], }) ); }); it("concatenates empty string content and merges other fields", () => { expect( new AIMessageChunk({ content: [{ index: 0, type: "text", text: "I am" }], }).concat( new AIMessageChunk({ content: [{ type: "text", text: "" }], response_metadata: { extra: "value" }, }) ) ).toEqual( new AIMessageChunk({ content: [{ index: 0, type: "text", text: "I am" }], response_metadata: { extra: "value" }, }) ); }); }); describe("Message like coercion", () => { it("Should convert OpenAI format messages", async () => { const messages = [ { id: "foobar", role: "system", content: "6", }, { role: "user", content: [{ type: "image_url", image_url: { url: "7.1" } }], }, { role: "assistant", content: [{ type: "text", text: "8.1" }], tool_calls: [ { id: "8.5", function: { name: "8.4", arguments: JSON.stringify({ "8.2": "8.3" }), }, type: "function", }, ], }, { role: "tool", content: "10.2", tool_call_id: "10.2", }, ].map(coerceMessageLikeToMessage); expect(messages).toEqual([ new SystemMessage({ id: "foobar", content: "6", }), new HumanMessage({ content: [{ type: "image_url", image_url: { url: "7.1" } }], }), new AIMessage({ content: [{ type: "text", text: "8.1" }], tool_calls: [ { id: "8.5", name: "8.4", args: { "8.2": "8.3" }, type: "tool_call", }, ], }), new ToolMessage({ name: undefined, content: "10.2", tool_call_id: "10.2", }), ]); }); }); describe("usage_metadata serialized", () => { test("usage_metadata is serialized when included in constructor", async () => { const aiMsg = new AIMessage({ content: "hello", usage_metadata: { input_tokens: 1, output_tokens: 1, total_tokens: 2, }, }); const jsonAIMessage = JSON.stringify(aiMsg); expect(jsonAIMessage).toContain("usage_metadata"); expect(jsonAIMessage).toContain("input_tokens"); expect(jsonAIMessage).toContain("output_tokens"); expect(jsonAIMessage).toContain("total_tokens"); }); test("usage_metadata is serialized when included in constructor", async () => { const aiMsg = new AIMessageChunk({ content: "hello", usage_metadata: { input_tokens: 1, output_tokens: 1, total_tokens: 2, }, }); const jsonAIMessage = JSON.stringify(aiMsg); expect(jsonAIMessage).toContain("usage_metadata"); expect(jsonAIMessage).toContain("input_tokens"); expect(jsonAIMessage).toContain("output_tokens"); expect(jsonAIMessage).toContain("total_tokens"); }); test("usage_metadata is serialized even when not included in constructor", async () => { const aiMsg = new AIMessageChunk("hello"); const concatenatedAIMessageChunk = concat( aiMsg, new AIMessageChunk({ content: "", usage_metadata: { input_tokens: 1, output_tokens: 1, total_tokens: 2, }, }) ); const jsonConcatenatedAIMessageChunk = JSON.stringify( concatenatedAIMessageChunk ); expect(jsonConcatenatedAIMessageChunk).toContain("usage_metadata"); expect(jsonConcatenatedAIMessageChunk).toContain("input_tokens"); expect(jsonConcatenatedAIMessageChunk).toContain("output_tokens"); expect(jsonConcatenatedAIMessageChunk).toContain("total_tokens"); }); });
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/tests/caches.test.ts
import { test, expect } from "@jest/globals"; import { InMemoryCache } from "../caches/base.js"; test("InMemoryCache", async () => { const cache = new InMemoryCache(); await cache.update("foo", "bar", [{ text: "baz" }]); expect(await cache.lookup("foo", "bar")).toEqual([{ text: "baz" }]); });
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/tests/document.test.ts
import { test, expect } from "@jest/globals"; import { Document } from "../documents/document.js"; test("Document should handle empty pageContent", () => { const doc = new Document({ pageContent: "" }); expect(doc.pageContent).toEqual(""); });
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/tests/context.test.ts
import { test, expect } from "@jest/globals"; import { RunnableLambda } from "../runnables/base.js"; import { getContextVariable, setContextVariable } from "../context.js"; test("Getting and setting context variables within nested runnables", async () => { const nested = RunnableLambda.from(() => { expect(getContextVariable("foo")).toEqual("bar"); expect(getContextVariable("toplevel")).toEqual(9); setContextVariable("foo", "baz"); return getContextVariable("foo"); }); const runnable = RunnableLambda.from(async () => { setContextVariable("foo", "bar"); expect(getContextVariable("foo")).toEqual("bar"); expect(getContextVariable("toplevel")).toEqual(9); const res = await nested.invoke({}); expect(getContextVariable("foo")).toEqual("bar"); return res; }); expect(getContextVariable("foo")).toEqual(undefined); setContextVariable("toplevel", 9); expect(getContextVariable("toplevel")).toEqual(9); const result = await runnable.invoke({}); expect(getContextVariable("toplevel")).toEqual(9); expect(result).toEqual("baz"); });
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/language_models/llms.ts
import { AIMessage, type BaseMessage, getBufferString, } from "../messages/index.js"; import type { BasePromptValueInterface } from "../prompt_values.js"; import { type LLMResult, RUN_KEY, type Generation, GenerationChunk, } from "../outputs.js"; import { type BaseCallbackConfig, CallbackManager, type CallbackManagerForLLMRun, type Callbacks, } from "../callbacks/manager.js"; import { BaseLanguageModel, type BaseLanguageModelCallOptions, type BaseLanguageModelInput, type BaseLanguageModelParams, } from "./base.js"; import type { RunnableConfig } from "../runnables/config.js"; import type { BaseCache } from "../caches/base.js"; import { isStreamEventsHandler } from "../tracers/event_stream.js"; import { isLogStreamHandler } from "../tracers/log_stream.js"; import { concat } from "../utils/stream.js"; export type SerializedLLM = { _model: string; _type: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any } & Record<string, any>; export interface BaseLLMParams extends BaseLanguageModelParams { /** * @deprecated Use `maxConcurrency` instead */ concurrency?: number; } export interface BaseLLMCallOptions extends BaseLanguageModelCallOptions {} /** * LLM Wrapper. Takes in a prompt (or prompts) and returns a string. */ export abstract class BaseLLM< CallOptions extends BaseLLMCallOptions = BaseLLMCallOptions > extends BaseLanguageModel<string, CallOptions> { // Backwards compatibility since fields have been moved to RunnableConfig declare ParsedCallOptions: Omit< CallOptions, Exclude<keyof RunnableConfig, "signal" | "timeout" | "maxConcurrency"> >; // Only ever instantiated in main LangChain lc_namespace = ["langchain", "llms", this._llmType()]; constructor({ concurrency, ...rest }: BaseLLMParams) { super(concurrency ? { maxConcurrency: concurrency, ...rest } : rest); } /** * This method takes an input and options, and returns a string. It * converts the input to a prompt value and generates a result based on * the prompt. * @param input Input for the LLM. * @param options Options for the LLM call. * @returns A string result based on the prompt. */ async invoke( input: BaseLanguageModelInput, options?: CallOptions ): Promise<string> { const promptValue = BaseLLM._convertInputToPromptValue(input); const result = await this.generatePrompt( [promptValue], options, options?.callbacks ); return result.generations[0][0].text; } // eslint-disable-next-line require-yield async *_streamResponseChunks( _input: string, _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun ): AsyncGenerator<GenerationChunk> { throw new Error("Not implemented."); } protected _separateRunnableConfigFromCallOptionsCompat( options?: Partial<CallOptions> ): [RunnableConfig, this["ParsedCallOptions"]] { // For backwards compat, keep `signal` in both runnableConfig and callOptions const [runnableConfig, callOptions] = super._separateRunnableConfigFromCallOptions(options); (callOptions as this["ParsedCallOptions"]).signal = runnableConfig.signal; return [runnableConfig, callOptions as this["ParsedCallOptions"]]; } async *_streamIterator( input: BaseLanguageModelInput, options?: CallOptions ): AsyncGenerator<string> { // Subclass check required to avoid double callbacks with default implementation if ( this._streamResponseChunks === BaseLLM.prototype._streamResponseChunks ) { yield this.invoke(input, options); } else { const prompt = BaseLLM._convertInputToPromptValue(input); const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptionsCompat(options); const callbackManager_ = await CallbackManager.configure( runnableConfig.callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose } ); const extra = { options: callOptions, invocation_params: this?.invocationParams(callOptions), batch_size: 1, }; const runManagers = await callbackManager_?.handleLLMStart( this.toJSON(), [prompt.toString()], runnableConfig.runId, undefined, extra, undefined, undefined, runnableConfig.runName ); let generation = new GenerationChunk({ text: "", }); try { for await (const chunk of this._streamResponseChunks( prompt.toString(), callOptions, runManagers?.[0] )) { if (!generation) { generation = chunk; } else { generation = generation.concat(chunk); } if (typeof chunk.text === "string") { yield chunk.text; } } } catch (err) { await Promise.all( (runManagers ?? []).map((runManager) => runManager?.handleLLMError(err) ) ); throw err; } await Promise.all( (runManagers ?? []).map((runManager) => runManager?.handleLLMEnd({ generations: [[generation]], }) ) ); } } /** * This method takes prompt values, options, and callbacks, and generates * a result based on the prompts. * @param promptValues Prompt values for the LLM. * @param options Options for the LLM call. * @param callbacks Callbacks for the LLM call. * @returns An LLMResult based on the prompts. */ async generatePrompt( promptValues: BasePromptValueInterface[], options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<LLMResult> { const prompts: string[] = promptValues.map((promptValue) => promptValue.toString() ); return this.generate(prompts, options, callbacks); } /** * Run the LLM on the given prompts and input. */ abstract _generate( prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<LLMResult>; /** * Get the parameters used to invoke the model */ // eslint-disable-next-line @typescript-eslint/no-explicit-any invocationParams(_options?: this["ParsedCallOptions"]): any { return {}; } _flattenLLMResult(llmResult: LLMResult): LLMResult[] { const llmResults: LLMResult[] = []; for (let i = 0; i < llmResult.generations.length; i += 1) { const genList = llmResult.generations[i]; if (i === 0) { llmResults.push({ generations: [genList], llmOutput: llmResult.llmOutput, }); } else { const llmOutput = llmResult.llmOutput ? { ...llmResult.llmOutput, tokenUsage: {} } : undefined; llmResults.push({ generations: [genList], llmOutput, }); } } return llmResults; } /** @ignore */ async _generateUncached( prompts: string[], parsedOptions: this["ParsedCallOptions"], handledOptions: BaseCallbackConfig ): Promise<LLMResult> { const callbackManager_ = await CallbackManager.configure( handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose } ); const extra = { options: parsedOptions, invocation_params: this?.invocationParams(parsedOptions), batch_size: prompts.length, }; const runManagers = await callbackManager_?.handleLLMStart( this.toJSON(), prompts, handledOptions.runId, undefined, extra, undefined, undefined, handledOptions?.runName ); // Even if stream is not explicitly called, check if model is implicitly // called from streamEvents() or streamLog() to get all streamed events. // Bail out if _streamResponseChunks not overridden const hasStreamingHandler = !!runManagers?.[0].handlers.find((handler) => { return isStreamEventsHandler(handler) || isLogStreamHandler(handler); }); let output: LLMResult; if ( hasStreamingHandler && prompts.length === 1 && this._streamResponseChunks !== BaseLLM.prototype._streamResponseChunks ) { try { const stream = await this._streamResponseChunks( prompts[0], parsedOptions, runManagers?.[0] ); let aggregated; for await (const chunk of stream) { if (aggregated === undefined) { aggregated = chunk; } else { aggregated = concat(aggregated, chunk); } } if (aggregated === undefined) { throw new Error("Received empty response from chat model call."); } output = { generations: [[aggregated]], llmOutput: {} }; await runManagers?.[0].handleLLMEnd(output); } catch (e) { await runManagers?.[0].handleLLMError(e); throw e; } } else { try { output = await this._generate(prompts, parsedOptions, runManagers?.[0]); } catch (err) { await Promise.all( (runManagers ?? []).map((runManager) => runManager?.handleLLMError(err) ) ); throw err; } const flattenedOutputs: LLMResult[] = this._flattenLLMResult(output); await Promise.all( (runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i]) ) ); } const runIds = runManagers?.map((manager) => manager.runId) || undefined; // This defines RUN_KEY as a non-enumerable property on the output object // so that it is not serialized when the output is stringified, and so that // it isnt included when listing the keys of the output object. Object.defineProperty(output, RUN_KEY, { value: runIds ? { runIds } : undefined, configurable: true, }); return output; } async _generateCached({ prompts, cache, llmStringKey, parsedOptions, handledOptions, runId, }: { prompts: string[]; cache: BaseCache<Generation[]>; llmStringKey: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any parsedOptions: any; handledOptions: RunnableConfig; runId?: string; }): Promise<LLMResult & { missingPromptIndices: number[] }> { const callbackManager_ = await CallbackManager.configure( handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose } ); const extra = { options: parsedOptions, invocation_params: this?.invocationParams(parsedOptions), batch_size: prompts.length, cached: true, }; const runManagers = await callbackManager_?.handleLLMStart( this.toJSON(), prompts, runId, undefined, extra, undefined, undefined, handledOptions?.runName ); // generate results const missingPromptIndices: number[] = []; const results = await Promise.allSettled( prompts.map(async (prompt, index) => { const result = await cache.lookup(prompt, llmStringKey); if (result == null) { missingPromptIndices.push(index); } return result; }) ); // Map run managers to the results before filtering out null results // Null results are just absent from the cache. const cachedResults = results .map((result, index) => ({ result, runManager: runManagers?.[index] })) .filter( ({ result }) => (result.status === "fulfilled" && result.value != null) || result.status === "rejected" ); // Handle results and call run managers const generations: Generation[][] = []; await Promise.all( cachedResults.map(async ({ result: promiseResult, runManager }, i) => { if (promiseResult.status === "fulfilled") { const result = promiseResult.value as Generation[]; generations[i] = result; if (result.length) { await runManager?.handleLLMNewToken(result[0].text); } return runManager?.handleLLMEnd({ generations: [result], }); } else { // status === "rejected" await runManager?.handleLLMError(promiseResult.reason); return Promise.reject(promiseResult.reason); } }) ); const output = { generations, missingPromptIndices, }; // This defines RUN_KEY as a non-enumerable property on the output object // so that it is not serialized when the output is stringified, and so that // it isnt included when listing the keys of the output object. Object.defineProperty(output, RUN_KEY, { value: runManagers ? { runIds: runManagers?.map((manager) => manager.runId) } : undefined, configurable: true, }); return output; } /** * Run the LLM on the given prompts and input, handling caching. */ async generate( prompts: string[], options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<LLMResult> { if (!Array.isArray(prompts)) { throw new Error("Argument 'prompts' is expected to be a string[]"); } let parsedOptions: CallOptions | undefined; if (Array.isArray(options)) { parsedOptions = { stop: options } as CallOptions; } else { parsedOptions = options; } const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptionsCompat(parsedOptions); runnableConfig.callbacks = runnableConfig.callbacks ?? callbacks; if (!this.cache) { return this._generateUncached(prompts, callOptions, runnableConfig); } const { cache } = this; const llmStringKey = this._getSerializedCacheKeyParametersForCall( callOptions as CallOptions ); const { generations, missingPromptIndices } = await this._generateCached({ prompts, cache, llmStringKey, parsedOptions: callOptions, handledOptions: runnableConfig, runId: runnableConfig.runId, }); let llmOutput = {}; if (missingPromptIndices.length > 0) { const results = await this._generateUncached( missingPromptIndices.map((i) => prompts[i]), callOptions, runnableConfig ); await Promise.all( results.generations.map(async (generation, index) => { const promptIndex = missingPromptIndices[index]; generations[promptIndex] = generation; return cache.update(prompts[promptIndex], llmStringKey, generation); }) ); llmOutput = results.llmOutput ?? {}; } return { generations, llmOutput } as LLMResult; } /** * @deprecated Use .invoke() instead. Will be removed in 0.2.0. * Convenience wrapper for {@link generate} that takes in a single string prompt and returns a single string output. */ async call( prompt: string, options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<string> { const { generations } = await this.generate([prompt], options, callbacks); return generations[0][0].text; } /** * @deprecated Use .invoke() instead. Will be removed in 0.2.0. * * This method is similar to `call`, but it's used for making predictions * based on the input text. * @param text Input text for the prediction. * @param options Options for the LLM call. * @param callbacks Callbacks for the LLM call. * @returns A prediction based on the input text. */ async predict( text: string, options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<string> { return this.call(text, options, callbacks); } /** * @deprecated Use .invoke() instead. Will be removed in 0.2.0. * * This method takes a list of messages, options, and callbacks, and * returns a predicted message. * @param messages A list of messages for the prediction. * @param options Options for the LLM call. * @param callbacks Callbacks for the LLM call. * @returns A predicted message based on the list of messages. */ async predictMessages( messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<BaseMessage> { const text = getBufferString(messages); const prediction = await this.call(text, options, callbacks); return new AIMessage(prediction); } /** * Get the identifying parameters of the LLM. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any _identifyingParams(): Record<string, any> { return {}; } /** * Return the string type key uniquely identifying this class of LLM. */ abstract _llmType(): string; /** * @deprecated * Return a json-like object representing this LLM. */ serialize(): SerializedLLM { return { ...this._identifyingParams(), _type: this._llmType(), _model: this._modelType(), }; } _modelType(): string { return "base_llm" as const; } } /** * LLM class that provides a simpler interface to subclass than {@link BaseLLM}. * * Requires only implementing a simpler {@link _call} method instead of {@link _generate}. * * @augments BaseLLM */ export abstract class LLM< CallOptions extends BaseLLMCallOptions = BaseLLMCallOptions > extends BaseLLM<CallOptions> { /** * Run the LLM on the given prompt and input. */ abstract _call( prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<string>; async _generate( prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<LLMResult> { const generations: Generation[][] = await Promise.all( prompts.map((prompt, promptIndex) => this._call(prompt, { ...options, promptIndex }, runManager).then( (text) => [{ text }] ) ) ); return { generations }; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/language_models/chat_models.ts
import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { AIMessage, type BaseMessage, BaseMessageChunk, type BaseMessageLike, HumanMessage, coerceMessageLikeToMessage, AIMessageChunk, isAIMessageChunk, } from "../messages/index.js"; import type { BasePromptValueInterface } from "../prompt_values.js"; import { LLMResult, RUN_KEY, type ChatGeneration, ChatGenerationChunk, type ChatResult, type Generation, } from "../outputs.js"; import { BaseLanguageModel, type StructuredOutputMethodOptions, type ToolDefinition, type BaseLanguageModelCallOptions, type BaseLanguageModelInput, type BaseLanguageModelParams, } from "./base.js"; import { CallbackManager, type CallbackManagerForLLMRun, type Callbacks, } from "../callbacks/manager.js"; import type { RunnableConfig } from "../runnables/config.js"; import type { BaseCache } from "../caches/base.js"; import { StructuredToolInterface, StructuredToolParams, } from "../tools/index.js"; import { Runnable, RunnableLambda, RunnableSequence, RunnableToolLike, } from "../runnables/base.js"; import { isStreamEventsHandler } from "../tracers/event_stream.js"; import { isLogStreamHandler } from "../tracers/log_stream.js"; import { concat } from "../utils/stream.js"; import { RunnablePassthrough } from "../runnables/passthrough.js"; import { isZodSchema } from "../utils/types/is_zod_schema.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any export type ToolChoice = string | Record<string, any> | "auto" | "any"; /** * Represents a serialized chat model. */ export type SerializedChatModel = { _model: string; _type: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any } & Record<string, any>; // todo? /** * Represents a serialized large language model. */ export type SerializedLLM = { _model: string; _type: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any } & Record<string, any>; /** * Represents the parameters for a base chat model. */ export type BaseChatModelParams = BaseLanguageModelParams; /** * Represents the call options for a base chat model. */ export type BaseChatModelCallOptions = BaseLanguageModelCallOptions & { /** * Specifies how the chat model should use tools. * @default undefined * * Possible values: * - "auto": The model may choose to use any of the provided tools, or none. * - "any": The model must use one of the provided tools. * - "none": The model must not use any tools. * - A string (not "auto", "any", or "none"): The name of a specific tool the model must use. * - An object: A custom schema specifying tool choice parameters. Specific to the provider. * * Note: Not all providers support tool_choice. An error will be thrown * if used with an unsupported model. */ tool_choice?: ToolChoice; }; /** * Creates a transform stream for encoding chat message chunks. * @deprecated Use {@link BytesOutputParser} instead * @returns A TransformStream instance that encodes chat message chunks. */ export function createChatMessageChunkEncoderStream() { const textEncoder = new TextEncoder(); return new TransformStream<BaseMessageChunk>({ transform(chunk: BaseMessageChunk, controller) { controller.enqueue( textEncoder.encode( typeof chunk.content === "string" ? chunk.content : JSON.stringify(chunk.content) ) ); }, }); } export type LangSmithParams = { ls_provider?: string; ls_model_name?: string; ls_model_type: "chat"; ls_temperature?: number; ls_max_tokens?: number; ls_stop?: Array<string>; }; export type BindToolsInput = | StructuredToolInterface // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any> | ToolDefinition | RunnableToolLike | StructuredToolParams; /** * Base class for chat models. It extends the BaseLanguageModel class and * provides methods for generating chat based on input messages. */ export abstract class BaseChatModel< CallOptions extends BaseChatModelCallOptions = BaseChatModelCallOptions, // TODO: Fix the parameter order on the next minor version. OutputMessageType extends BaseMessageChunk = AIMessageChunk > extends BaseLanguageModel<OutputMessageType, CallOptions> { // Backwards compatibility since fields have been moved to RunnableConfig declare ParsedCallOptions: Omit< CallOptions, Exclude<keyof RunnableConfig, "signal" | "timeout" | "maxConcurrency"> >; // Only ever instantiated in main LangChain lc_namespace = ["langchain", "chat_models", this._llmType()]; constructor(fields: BaseChatModelParams) { super(fields); } _combineLLMOutput?( ...llmOutputs: LLMResult["llmOutput"][] ): LLMResult["llmOutput"]; protected _separateRunnableConfigFromCallOptionsCompat( options?: Partial<CallOptions> ): [RunnableConfig, this["ParsedCallOptions"]] { // For backwards compat, keep `signal` in both runnableConfig and callOptions const [runnableConfig, callOptions] = super._separateRunnableConfigFromCallOptions(options); (callOptions as this["ParsedCallOptions"]).signal = runnableConfig.signal; return [runnableConfig, callOptions as this["ParsedCallOptions"]]; } /** * Bind tool-like objects to this chat model. * * @param tools A list of tool definitions to bind to this chat model. * Can be a structured tool, an OpenAI formatted tool, or an object * matching the provider's specific tool schema. * @param kwargs Any additional parameters to bind. */ bindTools?( tools: BindToolsInput[], kwargs?: Partial<CallOptions> ): Runnable<BaseLanguageModelInput, OutputMessageType, CallOptions>; /** * Invokes the chat model with a single input. * @param input The input for the language model. * @param options The call options. * @returns A Promise that resolves to a BaseMessageChunk. */ async invoke( input: BaseLanguageModelInput, options?: CallOptions ): Promise<OutputMessageType> { const promptValue = BaseChatModel._convertInputToPromptValue(input); const result = await this.generatePrompt( [promptValue], options, options?.callbacks ); const chatGeneration = result.generations[0][0] as ChatGeneration; // TODO: Remove cast after figuring out inheritance return chatGeneration.message as OutputMessageType; } // eslint-disable-next-line require-yield async *_streamResponseChunks( _messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun ): AsyncGenerator<ChatGenerationChunk> { throw new Error("Not implemented."); } async *_streamIterator( input: BaseLanguageModelInput, options?: CallOptions ): AsyncGenerator<OutputMessageType> { // Subclass check required to avoid double callbacks with default implementation if ( this._streamResponseChunks === BaseChatModel.prototype._streamResponseChunks ) { yield this.invoke(input, options); } else { const prompt = BaseChatModel._convertInputToPromptValue(input); const messages = prompt.toChatMessages(); const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptionsCompat(options); const inheritableMetadata = { ...runnableConfig.metadata, ...this.getLsParams(callOptions), }; const callbackManager_ = await CallbackManager.configure( runnableConfig.callbacks, this.callbacks, runnableConfig.tags, this.tags, inheritableMetadata, this.metadata, { verbose: this.verbose } ); const extra = { options: callOptions, invocation_params: this?.invocationParams(callOptions), batch_size: 1, }; const runManagers = await callbackManager_?.handleChatModelStart( this.toJSON(), [messages], runnableConfig.runId, undefined, extra, undefined, undefined, runnableConfig.runName ); let generationChunk: ChatGenerationChunk | undefined; // eslint-disable-next-line @typescript-eslint/no-explicit-any let llmOutput: Record<string, any> | undefined; try { for await (const chunk of this._streamResponseChunks( messages, callOptions, runManagers?.[0] )) { if (chunk.message.id == null) { const runId = runManagers?.at(0)?.runId; if (runId != null) chunk.message._updateId(`run-${runId}`); } chunk.message.response_metadata = { ...chunk.generationInfo, ...chunk.message.response_metadata, }; yield chunk.message as OutputMessageType; if (!generationChunk) { generationChunk = chunk; } else { generationChunk = generationChunk.concat(chunk); } if ( isAIMessageChunk(chunk.message) && chunk.message.usage_metadata !== undefined ) { llmOutput = { tokenUsage: { promptTokens: chunk.message.usage_metadata.input_tokens, completionTokens: chunk.message.usage_metadata.output_tokens, totalTokens: chunk.message.usage_metadata.total_tokens, }, }; } } } catch (err) { await Promise.all( (runManagers ?? []).map((runManager) => runManager?.handleLLMError(err) ) ); throw err; } await Promise.all( (runManagers ?? []).map((runManager) => runManager?.handleLLMEnd({ // TODO: Remove cast after figuring out inheritance generations: [[generationChunk as ChatGeneration]], llmOutput, }) ) ); } } getLsParams(options: this["ParsedCallOptions"]): LangSmithParams { const providerName = this.getName().startsWith("Chat") ? this.getName().replace("Chat", "") : this.getName(); return { ls_model_type: "chat", ls_stop: options.stop, ls_provider: providerName, }; } /** @ignore */ async _generateUncached( messages: BaseMessageLike[][], parsedOptions: this["ParsedCallOptions"], handledOptions: RunnableConfig ): Promise<LLMResult> { const baseMessages = messages.map((messageList) => messageList.map(coerceMessageLikeToMessage) ); const inheritableMetadata = { ...handledOptions.metadata, ...this.getLsParams(parsedOptions), }; // create callback manager and start run const callbackManager_ = await CallbackManager.configure( handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, inheritableMetadata, this.metadata, { verbose: this.verbose } ); const extra = { options: parsedOptions, invocation_params: this?.invocationParams(parsedOptions), batch_size: 1, }; const runManagers = await callbackManager_?.handleChatModelStart( this.toJSON(), baseMessages, handledOptions.runId, undefined, extra, undefined, undefined, handledOptions.runName ); const generations: ChatGeneration[][] = []; const llmOutputs: LLMResult["llmOutput"][] = []; // Even if stream is not explicitly called, check if model is implicitly // called from streamEvents() or streamLog() to get all streamed events. // Bail out if _streamResponseChunks not overridden const hasStreamingHandler = !!runManagers?.[0].handlers.find((handler) => { return isStreamEventsHandler(handler) || isLogStreamHandler(handler); }); if ( hasStreamingHandler && baseMessages.length === 1 && this._streamResponseChunks !== BaseChatModel.prototype._streamResponseChunks ) { try { const stream = await this._streamResponseChunks( baseMessages[0], parsedOptions, runManagers?.[0] ); let aggregated; // eslint-disable-next-line @typescript-eslint/no-explicit-any let llmOutput: Record<string, any> | undefined; for await (const chunk of stream) { if (chunk.message.id == null) { const runId = runManagers?.at(0)?.runId; if (runId != null) chunk.message._updateId(`run-${runId}`); } if (aggregated === undefined) { aggregated = chunk; } else { aggregated = concat(aggregated, chunk); } if ( isAIMessageChunk(chunk.message) && chunk.message.usage_metadata !== undefined ) { llmOutput = { tokenUsage: { promptTokens: chunk.message.usage_metadata.input_tokens, completionTokens: chunk.message.usage_metadata.output_tokens, totalTokens: chunk.message.usage_metadata.total_tokens, }, }; } } if (aggregated === undefined) { throw new Error("Received empty response from chat model call."); } generations.push([aggregated]); await runManagers?.[0].handleLLMEnd({ generations, llmOutput, }); } catch (e) { await runManagers?.[0].handleLLMError(e); throw e; } } else { // generate results const results = await Promise.allSettled( baseMessages.map((messageList, i) => this._generate( messageList, { ...parsedOptions, promptIndex: i }, runManagers?.[i] ) ) ); // handle results await Promise.all( results.map(async (pResult, i) => { if (pResult.status === "fulfilled") { const result = pResult.value; for (const generation of result.generations) { if (generation.message.id == null) { const runId = runManagers?.at(0)?.runId; if (runId != null) generation.message._updateId(`run-${runId}`); } generation.message.response_metadata = { ...generation.generationInfo, ...generation.message.response_metadata, }; } if (result.generations.length === 1) { result.generations[0].message.response_metadata = { ...result.llmOutput, ...result.generations[0].message.response_metadata, }; } generations[i] = result.generations; llmOutputs[i] = result.llmOutput; return runManagers?.[i]?.handleLLMEnd({ generations: [result.generations], llmOutput: result.llmOutput, }); } else { // status === "rejected" await runManagers?.[i]?.handleLLMError(pResult.reason); return Promise.reject(pResult.reason); } }) ); } // create combined output const output: LLMResult = { generations, llmOutput: llmOutputs.length ? this._combineLLMOutput?.(...llmOutputs) : undefined, }; Object.defineProperty(output, RUN_KEY, { value: runManagers ? { runIds: runManagers?.map((manager) => manager.runId) } : undefined, configurable: true, }); return output; } async _generateCached({ messages, cache, llmStringKey, parsedOptions, handledOptions, }: { messages: BaseMessageLike[][]; cache: BaseCache<Generation[]>; llmStringKey: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any parsedOptions: any; handledOptions: RunnableConfig; }): Promise<LLMResult & { missingPromptIndices: number[] }> { const baseMessages = messages.map((messageList) => messageList.map(coerceMessageLikeToMessage) ); const inheritableMetadata = { ...handledOptions.metadata, ...this.getLsParams(parsedOptions), }; // create callback manager and start run const callbackManager_ = await CallbackManager.configure( handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, inheritableMetadata, this.metadata, { verbose: this.verbose } ); const extra = { options: parsedOptions, invocation_params: this?.invocationParams(parsedOptions), batch_size: 1, cached: true, }; const runManagers = await callbackManager_?.handleChatModelStart( this.toJSON(), baseMessages, handledOptions.runId, undefined, extra, undefined, undefined, handledOptions.runName ); // generate results const missingPromptIndices: number[] = []; const results = await Promise.allSettled( baseMessages.map(async (baseMessage, index) => { // Join all content into one string for the prompt index const prompt = BaseChatModel._convertInputToPromptValue(baseMessage).toString(); const result = await cache.lookup(prompt, llmStringKey); if (result == null) { missingPromptIndices.push(index); } return result; }) ); // Map run managers to the results before filtering out null results // Null results are just absent from the cache. const cachedResults = results .map((result, index) => ({ result, runManager: runManagers?.[index] })) .filter( ({ result }) => (result.status === "fulfilled" && result.value != null) || result.status === "rejected" ); // Handle results and call run managers const generations: Generation[][] = []; await Promise.all( cachedResults.map(async ({ result: promiseResult, runManager }, i) => { if (promiseResult.status === "fulfilled") { const result = promiseResult.value as Generation[]; generations[i] = result; if (result.length) { await runManager?.handleLLMNewToken(result[0].text); } return runManager?.handleLLMEnd({ generations: [result], }); } else { // status === "rejected" await runManager?.handleLLMError(promiseResult.reason); return Promise.reject(promiseResult.reason); } }) ); const output = { generations, missingPromptIndices, }; // This defines RUN_KEY as a non-enumerable property on the output object // so that it is not serialized when the output is stringified, and so that // it isnt included when listing the keys of the output object. Object.defineProperty(output, RUN_KEY, { value: runManagers ? { runIds: runManagers?.map((manager) => manager.runId) } : undefined, configurable: true, }); return output; } /** * Generates chat based on the input messages. * @param messages An array of arrays of BaseMessage instances. * @param options The call options or an array of stop sequences. * @param callbacks The callbacks for the language model. * @returns A Promise that resolves to an LLMResult. */ async generate( messages: BaseMessageLike[][], options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<LLMResult> { // parse call options let parsedOptions: CallOptions | undefined; if (Array.isArray(options)) { parsedOptions = { stop: options } as CallOptions; } else { parsedOptions = options; } const baseMessages = messages.map((messageList) => messageList.map(coerceMessageLikeToMessage) ); const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptionsCompat(parsedOptions); runnableConfig.callbacks = runnableConfig.callbacks ?? callbacks; if (!this.cache) { return this._generateUncached(baseMessages, callOptions, runnableConfig); } const { cache } = this; const llmStringKey = this._getSerializedCacheKeyParametersForCall( callOptions as CallOptions ); const { generations, missingPromptIndices } = await this._generateCached({ messages: baseMessages, cache, llmStringKey, parsedOptions: callOptions, handledOptions: runnableConfig, }); let llmOutput = {}; if (missingPromptIndices.length > 0) { const results = await this._generateUncached( missingPromptIndices.map((i) => baseMessages[i]), callOptions, runnableConfig ); await Promise.all( results.generations.map(async (generation, index) => { const promptIndex = missingPromptIndices[index]; generations[promptIndex] = generation; // Join all content into one string for the prompt index const prompt = BaseChatModel._convertInputToPromptValue( baseMessages[promptIndex] ).toString(); return cache.update(prompt, llmStringKey, generation); }) ); llmOutput = results.llmOutput ?? {}; } return { generations, llmOutput } as LLMResult; } /** * Get the parameters used to invoke the model */ // eslint-disable-next-line @typescript-eslint/no-explicit-any invocationParams(_options?: this["ParsedCallOptions"]): any { return {}; } _modelType(): string { return "base_chat_model" as const; } abstract _llmType(): string; /** * @deprecated * Return a json-like object representing this LLM. */ serialize(): SerializedLLM { return { ...this.invocationParams(), _type: this._llmType(), _model: this._modelType(), }; } /** * Generates a prompt based on the input prompt values. * @param promptValues An array of BasePromptValue instances. * @param options The call options or an array of stop sequences. * @param callbacks The callbacks for the language model. * @returns A Promise that resolves to an LLMResult. */ async generatePrompt( promptValues: BasePromptValueInterface[], options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<LLMResult> { const promptMessages: BaseMessage[][] = promptValues.map((promptValue) => promptValue.toChatMessages() ); return this.generate(promptMessages, options, callbacks); } abstract _generate( messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<ChatResult>; /** * @deprecated Use .invoke() instead. Will be removed in 0.2.0. * * Makes a single call to the chat model. * @param messages An array of BaseMessage instances. * @param options The call options or an array of stop sequences. * @param callbacks The callbacks for the language model. * @returns A Promise that resolves to a BaseMessage. */ async call( messages: BaseMessageLike[], options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<BaseMessage> { const result = await this.generate( [messages.map(coerceMessageLikeToMessage)], options, callbacks ); const generations = result.generations as ChatGeneration[][]; return generations[0][0].message; } /** * @deprecated Use .invoke() instead. Will be removed in 0.2.0. * * Makes a single call to the chat model with a prompt value. * @param promptValue The value of the prompt. * @param options The call options or an array of stop sequences. * @param callbacks The callbacks for the language model. * @returns A Promise that resolves to a BaseMessage. */ async callPrompt( promptValue: BasePromptValueInterface, options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<BaseMessage> { const promptMessages: BaseMessage[] = promptValue.toChatMessages(); return this.call(promptMessages, options, callbacks); } /** * @deprecated Use .invoke() instead. Will be removed in 0.2.0. * * Predicts the next message based on the input messages. * @param messages An array of BaseMessage instances. * @param options The call options or an array of stop sequences. * @param callbacks The callbacks for the language model. * @returns A Promise that resolves to a BaseMessage. */ async predictMessages( messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<BaseMessage> { return this.call(messages, options, callbacks); } /** * @deprecated Use .invoke() instead. Will be removed in 0.2.0. * * Predicts the next message based on a text input. * @param text The text input. * @param options The call options or an array of stop sequences. * @param callbacks The callbacks for the language model. * @returns A Promise that resolves to a string. */ async predict( text: string, options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<string> { const message = new HumanMessage(text); const result = await this.call([message], options, callbacks); if (typeof result.content !== "string") { throw new Error("Cannot use predict when output is not a string."); } return result.content; } withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( outputSchema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: StructuredOutputMethodOptions<false> ): Runnable<BaseLanguageModelInput, RunOutput>; withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( outputSchema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: StructuredOutputMethodOptions<true> ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>; withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( outputSchema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: StructuredOutputMethodOptions<boolean> ): | Runnable<BaseLanguageModelInput, RunOutput> | Runnable< BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput; } > { if (typeof this.bindTools !== "function") { throw new Error( `Chat model must implement ".bindTools()" to use withStructuredOutput.` ); } if (config?.strict) { throw new Error( `"strict" mode is not supported for this model by default.` ); } // eslint-disable-next-line @typescript-eslint/no-explicit-any const schema: z.ZodType<RunOutput> | Record<string, any> = outputSchema; const name = config?.name; const description = schema.description ?? "A function available to call."; const method = config?.method; const includeRaw = config?.includeRaw; if (method === "jsonMode") { throw new Error( `Base withStructuredOutput implementation only supports "functionCalling" as a method.` ); } let functionName = name ?? "extract"; let tools: ToolDefinition[]; if (isZodSchema(schema)) { tools = [ { type: "function", function: { name: functionName, description, parameters: zodToJsonSchema(schema), }, }, ]; } else { if ("name" in schema) { functionName = schema.name; } tools = [ { type: "function", function: { name: functionName, description, parameters: schema, }, }, ]; } const llm = this.bindTools(tools); const outputParser = RunnableLambda.from<AIMessageChunk, RunOutput>( (input: AIMessageChunk): RunOutput => { if (!input.tool_calls || input.tool_calls.length === 0) { throw new Error("No tool calls found in the response."); } const toolCall = input.tool_calls.find( (tc) => tc.name === functionName ); if (!toolCall) { throw new Error(`No tool call found with name ${functionName}.`); } return toolCall.args as RunOutput; } ); if (!includeRaw) { return llm.pipe(outputParser).withConfig({ runName: "StructuredOutput", }) as Runnable<BaseLanguageModelInput, RunOutput>; } const parserAssign = RunnablePassthrough.assign({ // eslint-disable-next-line @typescript-eslint/no-explicit-any parsed: (input: any, config) => outputParser.invoke(input.raw, config), }); const parserNone = RunnablePassthrough.assign({ parsed: () => null, }); const parsedWithFallback = parserAssign.withFallbacks({ fallbacks: [parserNone], }); return RunnableSequence.from< BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput } >([ { raw: llm, }, parsedWithFallback, ]).withConfig({ runName: "StructuredOutputRunnable", }); } } /** * An abstract class that extends BaseChatModel and provides a simple * implementation of _generate. */ export abstract class SimpleChatModel< CallOptions extends BaseChatModelCallOptions = BaseChatModelCallOptions > extends BaseChatModel<CallOptions> { abstract _call( messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<string>; async _generate( messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<ChatResult> { const text = await this._call(messages, options, runManager); const message = new AIMessage(text); if (typeof message.content !== "string") { throw new Error( "Cannot generate with a simple chat model when output is not a string." ); } return { generations: [ { text: message.content, message, }, ], }; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/language_models/base.ts
import type { Tiktoken, TiktokenModel } from "js-tiktoken/lite"; import { z } from "zod"; import { type BaseCache, InMemoryCache } from "../caches/base.js"; import { type BasePromptValueInterface, StringPromptValue, ChatPromptValue, } from "../prompt_values.js"; import { type BaseMessage, type BaseMessageLike, type MessageContent, } from "../messages/base.js"; import { coerceMessageLikeToMessage } from "../messages/utils.js"; import { type LLMResult } from "../outputs.js"; import { CallbackManager, Callbacks } from "../callbacks/manager.js"; import { AsyncCaller, AsyncCallerParams } from "../utils/async_caller.js"; import { encodingForModel } from "../utils/tiktoken.js"; import { Runnable, type RunnableInterface } from "../runnables/base.js"; import { RunnableConfig } from "../runnables/config.js"; // https://www.npmjs.com/package/js-tiktoken export const getModelNameForTiktoken = (modelName: string): TiktokenModel => { if (modelName.startsWith("gpt-3.5-turbo-16k")) { return "gpt-3.5-turbo-16k"; } if (modelName.startsWith("gpt-3.5-turbo-")) { return "gpt-3.5-turbo"; } if (modelName.startsWith("gpt-4-32k")) { return "gpt-4-32k"; } if (modelName.startsWith("gpt-4-")) { return "gpt-4"; } if (modelName.startsWith("gpt-4o")) { return "gpt-4o"; } return modelName as TiktokenModel; }; export const getEmbeddingContextSize = (modelName?: string): number => { switch (modelName) { case "text-embedding-ada-002": return 8191; default: return 2046; } }; export const getModelContextSize = (modelName: string): number => { switch (getModelNameForTiktoken(modelName)) { case "gpt-3.5-turbo-16k": return 16384; case "gpt-3.5-turbo": return 4096; case "gpt-4-32k": return 32768; case "gpt-4": return 8192; case "text-davinci-003": return 4097; case "text-curie-001": return 2048; case "text-babbage-001": return 2048; case "text-ada-001": return 2048; case "code-davinci-002": return 8000; case "code-cushman-001": return 2048; default: return 4097; } }; /** * Whether or not the input matches the OpenAI tool definition. * @param {unknown} tool The input to check. * @returns {boolean} Whether the input is an OpenAI tool definition. */ export function isOpenAITool(tool: unknown): tool is ToolDefinition { if (typeof tool !== "object" || !tool) return false; if ( "type" in tool && tool.type === "function" && "function" in tool && typeof tool.function === "object" && tool.function && "name" in tool.function && "parameters" in tool.function ) { return true; } return false; } interface CalculateMaxTokenProps { prompt: string; modelName: TiktokenModel; } export const calculateMaxTokens = async ({ prompt, modelName, }: CalculateMaxTokenProps) => { let numTokens; try { numTokens = ( await encodingForModel(getModelNameForTiktoken(modelName)) ).encode(prompt).length; } catch (error) { console.warn( "Failed to calculate number of tokens, falling back to approximate count" ); // fallback to approximate calculation if tiktoken is not available // each token is ~4 characters: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them# numTokens = Math.ceil(prompt.length / 4); } const maxTokens = getModelContextSize(modelName); return maxTokens - numTokens; }; const getVerbosity = () => false; export type SerializedLLM = { _model: string; _type: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any } & Record<string, any>; export interface BaseLangChainParams { verbose?: boolean; callbacks?: Callbacks; tags?: string[]; metadata?: Record<string, unknown>; } /** * Base class for language models, chains, tools. */ export abstract class BaseLangChain< RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig > extends Runnable<RunInput, RunOutput, CallOptions> implements BaseLangChainParams { /** * Whether to print out response text. */ verbose: boolean; callbacks?: Callbacks; tags?: string[]; metadata?: Record<string, unknown>; get lc_attributes(): { [key: string]: undefined } | undefined { return { callbacks: undefined, verbose: undefined, }; } constructor(params: BaseLangChainParams) { super(params); this.verbose = params.verbose ?? getVerbosity(); this.callbacks = params.callbacks; this.tags = params.tags ?? []; this.metadata = params.metadata ?? {}; } } /** * Base interface for language model parameters. * A subclass of {@link BaseLanguageModel} should have a constructor that * takes in a parameter that extends this interface. */ export interface BaseLanguageModelParams extends AsyncCallerParams, BaseLangChainParams { /** * @deprecated Use `callbacks` instead */ callbackManager?: CallbackManager; cache?: BaseCache | boolean; } export interface BaseLanguageModelCallOptions extends RunnableConfig { /** * Stop tokens to use for this call. * If not provided, the default stop tokens for the model will be used. */ stop?: string[]; } export interface FunctionDefinition { /** * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain * underscores and dashes, with a maximum length of 64. */ name: string; /** * The parameters the functions accepts, described as a JSON Schema object. See the * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for * examples, and the * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for * documentation about the format. * * To describe a function that accepts no parameters, provide the value * `{"type": "object", "properties": {}}`. */ parameters: Record<string, unknown>; /** * A description of what the function does, used by the model to choose when and * how to call the function. */ description?: string; } export interface ToolDefinition { type: "function"; function: FunctionDefinition; } export type FunctionCallOption = { name: string; }; export interface BaseFunctionCallOptions extends BaseLanguageModelCallOptions { function_call?: FunctionCallOption; functions?: FunctionDefinition[]; } export type BaseLanguageModelInput = | BasePromptValueInterface | string | BaseMessageLike[]; // eslint-disable-next-line @typescript-eslint/no-explicit-any export type StructuredOutputType = z.infer<z.ZodObject<any, any, any, any>>; export type StructuredOutputMethodOptions<IncludeRaw extends boolean = false> = { name?: string; method?: "functionCalling" | "jsonMode" | "jsonSchema" | string; includeRaw?: IncludeRaw; /** Whether to use strict mode. Currently only supported by OpenAI models. */ strict?: boolean; }; /** @deprecated Use StructuredOutputMethodOptions instead */ export type StructuredOutputMethodParams< RunOutput, IncludeRaw extends boolean = false > = { /** @deprecated Pass schema in as the first argument */ // eslint-disable-next-line @typescript-eslint/no-explicit-any schema: z.ZodType<RunOutput> | Record<string, any>; name?: string; method?: "functionCalling" | "jsonMode"; includeRaw?: IncludeRaw; }; export interface BaseLanguageModelInterface< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput = any, CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions > extends RunnableInterface<BaseLanguageModelInput, RunOutput, CallOptions> { get callKeys(): string[]; generatePrompt( promptValues: BasePromptValueInterface[], options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<LLMResult>; /** * @deprecated Use .invoke() instead. Will be removed in 0.2.0. */ predict( text: string, options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<string>; /** * @deprecated Use .invoke() instead. Will be removed in 0.2.0. */ predictMessages( messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<BaseMessage>; _modelType(): string; _llmType(): string; getNumTokens(content: MessageContent): Promise<number>; /** * Get the identifying parameters of the LLM. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any _identifyingParams(): Record<string, any>; serialize(): SerializedLLM; } export type LanguageModelOutput = BaseMessage | string; export type LanguageModelLike = Runnable< BaseLanguageModelInput, LanguageModelOutput >; /** * Base class for language models. */ export abstract class BaseLanguageModel< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput = any, CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions > extends BaseLangChain<BaseLanguageModelInput, RunOutput, CallOptions> implements BaseLanguageModelParams, BaseLanguageModelInterface<RunOutput, CallOptions> { /** * Keys that the language model accepts as call options. */ get callKeys(): string[] { return ["stop", "timeout", "signal", "tags", "metadata", "callbacks"]; } /** * The async caller should be used by subclasses to make any async calls, * which will thus benefit from the concurrency and retry logic. */ caller: AsyncCaller; cache?: BaseCache; constructor({ callbacks, callbackManager, ...params }: BaseLanguageModelParams) { const { cache, ...rest } = params; super({ callbacks: callbacks ?? callbackManager, ...rest, }); if (typeof cache === "object") { this.cache = cache; } else if (cache) { this.cache = InMemoryCache.global(); } else { this.cache = undefined; } this.caller = new AsyncCaller(params ?? {}); } abstract generatePrompt( promptValues: BasePromptValueInterface[], options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<LLMResult>; /** * @deprecated Use .invoke() instead. Will be removed in 0.2.0. */ abstract predict( text: string, options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<string>; /** * @deprecated Use .invoke() instead. Will be removed in 0.2.0. */ abstract predictMessages( messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks ): Promise<BaseMessage>; abstract _modelType(): string; abstract _llmType(): string; private _encoding?: Tiktoken; async getNumTokens(content: MessageContent) { // TODO: Figure out correct value. if (typeof content !== "string") { return 0; } // fallback to approximate calculation if tiktoken is not available let numTokens = Math.ceil(content.length / 4); if (!this._encoding) { try { this._encoding = await encodingForModel( "modelName" in this ? getModelNameForTiktoken(this.modelName as string) : "gpt2" ); } catch (error) { console.warn( "Failed to calculate number of tokens, falling back to approximate count", error ); } } if (this._encoding) { try { numTokens = this._encoding.encode(content).length; } catch (error) { console.warn( "Failed to calculate number of tokens, falling back to approximate count", error ); } } return numTokens; } protected static _convertInputToPromptValue( input: BaseLanguageModelInput ): BasePromptValueInterface { if (typeof input === "string") { return new StringPromptValue(input); } else if (Array.isArray(input)) { return new ChatPromptValue(input.map(coerceMessageLikeToMessage)); } else { return input; } } /** * Get the identifying parameters of the LLM. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any _identifyingParams(): Record<string, any> { return {}; } /** * Create a unique cache key for a specific call to a specific language model. * @param callOptions Call options for the model * @returns A unique cache key. */ _getSerializedCacheKeyParametersForCall( // TODO: Fix when we remove the RunnableLambda backwards compatibility shim. { config, ...callOptions }: CallOptions & { config?: RunnableConfig } ): string { // eslint-disable-next-line @typescript-eslint/no-explicit-any const params: Record<string, any> = { ...this._identifyingParams(), ...callOptions, _type: this._llmType(), _model: this._modelType(), }; const filteredEntries = Object.entries(params).filter( ([_, value]) => value !== undefined ); const serializedEntries = filteredEntries .map(([key, value]) => `${key}:${JSON.stringify(value)}`) .sort() .join(","); return serializedEntries; } /** * @deprecated * Return a json-like object representing this LLM. */ serialize(): SerializedLLM { return { ...this._identifyingParams(), _type: this._llmType(), _model: this._modelType(), }; } /** * @deprecated * Load an LLM from a json-like object describing it. */ static async deserialize(_data: SerializedLLM): Promise<BaseLanguageModel> { throw new Error("Use .toJSON() instead"); } withStructuredOutput?< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( schema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: StructuredOutputMethodOptions<false> ): Runnable<BaseLanguageModelInput, RunOutput>; withStructuredOutput?< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( schema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: StructuredOutputMethodOptions<true> ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>; /** * Model wrapper that returns outputs formatted to match the given schema. * * @template {BaseLanguageModelInput} RunInput The input type for the Runnable, expected to be the same input for the LLM. * @template {Record<string, any>} RunOutput The output type for the Runnable, expected to be a Zod schema object for structured output validation. * * @param {z.ZodEffects<RunOutput>} schema The schema for the structured output. Either as a Zod schema or a valid JSON schema object. * If a Zod schema is passed, the returned attributes will be validated, whereas with JSON schema they will not be. * @param {string} name The name of the function to call. * @param {"functionCalling" | "jsonMode"} [method=functionCalling] The method to use for getting the structured output. Defaults to "functionCalling". * @param {boolean | undefined} [includeRaw=false] Whether to include the raw output in the result. Defaults to false. * @returns {Runnable<RunInput, RunOutput> | Runnable<RunInput, { raw: BaseMessage; parsed: RunOutput }>} A new runnable that calls the LLM with structured output. */ withStructuredOutput?< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( schema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: StructuredOutputMethodOptions<boolean> ): | Runnable<BaseLanguageModelInput, RunOutput> | Runnable< BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput; } >; } /** * Shared interface for token usage * return type from LLM calls. */ export interface TokenUsage { completionTokens?: number; promptTokens?: number; totalTokens?: number; }
0
lc_public_repos/langchainjs/langchain-core/src/language_models
lc_public_repos/langchainjs/langchain-core/src/language_models/tests/llms.test.ts
/* eslint-disable no-promise-executor-return */ import { test, expect } from "@jest/globals"; import { FakeLLM, FakeStreamingLLM } from "../../utils/testing/index.js"; import { HumanMessagePromptTemplate } from "../../prompts/chat.js"; test("Test FakeLLM uses callbacks", async () => { const model = new FakeLLM({}); let acc = ""; const response = await model.invoke("Hello there!", { callbacks: [ { handleLLMNewToken: (token: string) => { console.log(token); acc += token; }, }, ], }); expect(response).toEqual(acc); }); test("Test FakeLLM uses callbacks with a cache", async () => { const model = new FakeLLM({ cache: true, }); let acc = ""; const response = await model.invoke("Hello there!"); const response2 = await model.invoke("Hello there!", { callbacks: [ { handleLLMNewToken: (token: string) => { console.log(token); acc += token; }, }, ], }); // If callbacks are backgrounded await new Promise((resolve) => setTimeout(resolve, 1000)); expect(response).toEqual(response2); expect(response2).toEqual(acc); }); test("Test FakeStreamingLLM works when streaming through a prompt", async () => { const prompt = HumanMessagePromptTemplate.fromTemplate("hello there {name}"); const model = new FakeStreamingLLM({}); const chain = prompt.pipe(model); const stream = await chain.stream({ name: "test" }); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); expect(chunks.join("")).toEqual("Human: hello there test"); });
0
lc_public_repos/langchainjs/langchain-core/src/language_models
lc_public_repos/langchainjs/langchain-core/src/language_models/tests/count_tokens.test.ts
import { test, expect } from "@jest/globals"; import { calculateMaxTokens, getModelContextSize } from "../base.js"; test("properly calculates correct max tokens", async () => { expect( await calculateMaxTokens({ prompt: "", modelName: "gpt-3.5-turbo-16k" }) ).toBe(16384); expect( await calculateMaxTokens({ prompt: "", modelName: "gpt-3.5-turbo-16k-0613", }) ).toBe(16384); expect( await calculateMaxTokens({ prompt: "", modelName: "gpt-3.5-turbo" }) ).toBe(4096); expect(await calculateMaxTokens({ prompt: "", modelName: "gpt-4" })).toBe( 8192 ); expect(await calculateMaxTokens({ prompt: "", modelName: "gpt-4-32k" })).toBe( 32768 ); }); test("properly gets model context size", async () => { expect(await getModelContextSize("gpt-3.5-turbo-16k")).toBe(16384); expect(await getModelContextSize("gpt-3.5-turbo-16k-0613")).toBe(16384); expect(await getModelContextSize("gpt-3.5-turbo")).toBe(4096); expect(await getModelContextSize("gpt-4")).toBe(8192); expect(await getModelContextSize("gpt-4-32k")).toBe(32768); });
0
lc_public_repos/langchainjs/langchain-core/src/language_models
lc_public_repos/langchainjs/langchain-core/src/language_models/tests/chat_models.test.ts
/* eslint-disable no-promise-executor-return */ import { test, expect } from "@jest/globals"; import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { FakeChatModel, FakeListChatModel } from "../../utils/testing/index.js"; import { HumanMessage } from "../../messages/human.js"; import { getBufferString } from "../../messages/utils.js"; import { AIMessage } from "../../messages/ai.js"; test("Test ChatModel accepts array shorthand for messages", async () => { const model = new FakeChatModel({}); const response = await model.invoke([["human", "Hello there!"]]); expect(response.content).toEqual("Hello there!"); }); test("Test ChatModel accepts object shorthand for messages", async () => { const model = new FakeChatModel({}); const response = await model.invoke([ { type: "human", content: "Hello there!", additional_kwargs: {}, example: true, }, ]); expect(response.content).toEqual("Hello there!"); }); test("Test ChatModel accepts object with role for messages", async () => { const model = new FakeChatModel({}); const response = await model.invoke([ { role: "human", content: "Hello there!!", example: true, }, ]); expect(response.content).toEqual("Hello there!!"); }); test("Test ChatModel accepts several messages as objects with role", async () => { const model = new FakeChatModel({}); const response = await model.invoke([ { role: "system", content: "You are an assistant.", }, { role: "human", content: [{ type: "text", text: "What is the weather in SF?" }], example: true, }, { role: "assistant", content: "", tool_calls: [ { id: "call_123", function: { name: "get_weather", arguments: JSON.stringify({ location: "sf" }), }, type: "function", }, ], }, { role: "tool", content: "Pretty nice right now!", tool_call_id: "call_123", }, ]); expect(response.content).toEqual( [ "You are an assistant.", JSON.stringify( [{ type: "text", text: "What is the weather in SF?" }], null, 2 ), "", "Pretty nice right now!", ].join("\n") ); }); test("Test ChatModel uses callbacks", async () => { const model = new FakeChatModel({}); let acc = ""; const response = await model.invoke("Hello there!", { callbacks: [ { handleLLMNewToken: (token: string) => { console.log(token); acc += token; }, }, ], }); expect(response.content).toEqual(acc); }); test("Test ChatModel uses callbacks with a cache", async () => { const model = new FakeChatModel({ cache: true, }); let acc = ""; const response = await model.invoke("Hello there!"); const response2 = await model.invoke("Hello there!", { callbacks: [ { handleLLMNewToken: (token: string) => { console.log(token); acc += token; }, }, ], }); // If callbacks are backgrounded await new Promise((resolve) => setTimeout(resolve, 1000)); expect(response.content).toEqual(response2.content); expect(response2.content).toEqual(acc); }); test("Test ChatModel legacy params withStructuredOutput", async () => { const model = new FakeListChatModel({ responses: [`{ "test": true, "nested": { "somethingelse": "somevalue" } }`], }).withStructuredOutput({ includeRaw: false, schema: z.object({ test: z.boolean(), nested: z.object({ somethingelse: z.string(), }), }), }); const response = await model.invoke("Hello there!"); // @ts-expect-error not in run output type console.log(response.notthere); console.log(response.nested.somethingelse); expect(response).toEqual({ test: true, nested: { somethingelse: "somevalue" }, }); }); // test("Test ChatModel legacy params includeRaw withStructuredOutput", async () => { // const model = new FakeListChatModel({ // responses: [`{ "test": true, "nested": { "somethingelse": "somevalue" } }`], // }).withStructuredOutput({ // includeRaw: true, // schema: z.object({ // test: z.boolean(), // nested: z.object({ // somethingelse: z.string(), // }), // }), // }); // const response = await model.invoke("Hello there!"); // // @ts-expect-error legacy // console.log(response.nested); // console.log(response.parsed.nested); // }); test("Test ChatModel withStructuredOutput with supplied type arg", async () => { const model = new FakeListChatModel({ responses: [`{ "test": true, "nested": { "somethingelse": "somevalue" } }`], }).withStructuredOutput<{ forcedArg: number }>({ includeRaw: false, schema: z.object({ test: z.boolean(), nested: z.object({ somethingelse: z.string(), }), }), }); const response = await model.invoke("Hello there!"); // @ts-expect-error run output type forced to something else console.log(response.nested.somethingelse); // No error here console.log(response.forcedArg); expect(response).toEqual({ test: true, nested: { somethingelse: "somevalue" }, }); }); test("Test ChatModel withStructuredOutput new syntax", async () => { const model = new FakeListChatModel({ responses: [`{ "test": true, "nested": { "somethingelse": "somevalue" } }`], }).withStructuredOutput<{ forcedArg: number }>( z.object({ test: z.boolean(), nested: z.object({ somethingelse: z.string(), }), }) ); const response = await model.invoke("Hello there!"); // @ts-expect-error run output type forced to something else console.log(response.nested.somethingelse); // No error here console.log(response.forcedArg); expect(response).toEqual({ test: true, nested: { somethingelse: "somevalue" }, }); }); test("Test ChatModel withStructuredOutput new syntax and JSON schema", async () => { const model = new FakeListChatModel({ responses: [`{ "test": true, "nested": { "somethingelse": "somevalue" } }`], }).withStructuredOutput( zodToJsonSchema( z.object({ test: z.boolean(), nested: z.object({ somethingelse: z.string(), }), }) ) ); const response = await model.invoke("Hello there!"); // No error here console.log(response.nested.somethingelse); // Also no error here console.log(response.forcedArg); expect(response).toEqual({ test: true, nested: { somethingelse: "somevalue" }, }); }); test("Test ChatModel withStructuredOutput new syntax and includeRaw", async () => { const model = new FakeListChatModel({ responses: [`{ "test": true, "nested": { "somethingelse": "somevalue" } }`], }).withStructuredOutput( z.object({ test: z.boolean(), nested: z.object({ somethingelse: z.string(), }), }), { includeRaw: true } ); const response = await model.invoke("Hello there!"); // @ts-expect-error run output includes raw console.log(response.nested.somethingelse); // No error console.log(response.parsed); }); test("Test ChatModel can cache complex messages", async () => { const model = new FakeChatModel({ cache: true, }); if (!model.cache) { throw new Error("Cache not enabled"); } const contentToCache = [ { type: "text", text: "Hello there!", }, ]; const humanMessage = new HumanMessage({ content: contentToCache, }); const prompt = getBufferString([humanMessage]); const llmKey = model._getSerializedCacheKeyParametersForCall({}); // Invoke model to trigger cache update await model.invoke([humanMessage]); const value = await model.cache.lookup(prompt, llmKey); expect(value).toBeDefined(); if (!value) return; expect(value[0].text).toEqual(JSON.stringify(contentToCache, null, 2)); expect("message" in value[0]).toBeTruthy(); if (!("message" in value[0])) return; const cachedMsg = value[0].message as AIMessage; expect(cachedMsg.content).toEqual(JSON.stringify(contentToCache, null, 2)); }); test("Test ChatModel can emit a custom event", async () => { const model = new FakeListChatModel({ responses: ["hi"], emitCustomEvent: true, }); let customEvent; const response = await model.invoke([["human", "Hello there!"]], { callbacks: [ { handleCustomEvent(_, data) { customEvent = data; }, }, ], }); await new Promise((resolve) => setTimeout(resolve, 100)); expect(response.content).toEqual("hi"); expect(customEvent).toBeDefined(); }); test("Test ChatModel can stream back a custom event", async () => { const model = new FakeListChatModel({ responses: ["hi"], emitCustomEvent: true, }); let customEvent; const eventStream = await model.streamEvents([["human", "Hello there!"]], { version: "v2", }); for await (const event of eventStream) { if (event.event === "on_custom_event") { customEvent = event; } } expect(customEvent).toBeDefined(); }); test(`Test ChatModel should not serialize a passed "cache" parameter`, async () => { const model = new FakeListChatModel({ responses: ["hi"], emitCustomEvent: true, cache: true, }); console.log(JSON.stringify(model)); expect(JSON.stringify(model)).toEqual( `{"lc":1,"type":"constructor","id":["langchain","chat_models","fake-list","FakeListChatModel"],"kwargs":{"responses":["hi"],"emit_custom_event":true}}` ); });
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/retrievers/index.ts
import { BaseCallbackConfig, CallbackManager, CallbackManagerForRetrieverRun, Callbacks, parseCallbackConfigArg, } from "../callbacks/manager.js"; import type { DocumentInterface } from "../documents/document.js"; import { Runnable, type RunnableInterface } from "../runnables/base.js"; import { RunnableConfig, ensureConfig } from "../runnables/config.js"; /** * Input configuration options for initializing a retriever that extends * the `BaseRetriever` class. This interface provides base properties * common to all retrievers, allowing customization of callback functions, * tagging, metadata, and logging verbosity. * * Fields: * - `callbacks` (optional): An array of callback functions that handle various * events during retrieval, such as logging, error handling, or progress updates. * * - `tags` (optional): An array of strings used to add contextual tags to * retrieval operations, allowing for easier categorization and tracking. * * - `metadata` (optional): A record of key-value pairs to store additional * contextual information for retrieval operations, which can be useful * for logging or auditing purposes. * * - `verbose` (optional): A boolean flag that, if set to `true`, enables * detailed logging and output during the retrieval process. Defaults to `false`. */ export interface BaseRetrieverInput { callbacks?: Callbacks; tags?: string[]; metadata?: Record<string, unknown>; verbose?: boolean; } /** * Interface for a base retriever that defines core functionality for * retrieving relevant documents from a source based on a query. * * The `BaseRetrieverInterface` standardizes the `getRelevantDocuments` method, * enabling retrieval of documents that match the query criteria. * * @template Metadata - The type of metadata associated with each document, * defaulting to `Record<string, any>`. */ export interface BaseRetrieverInterface< // eslint-disable-next-line @typescript-eslint/no-explicit-any Metadata extends Record<string, any> = Record<string, any> > extends RunnableInterface<string, DocumentInterface<Metadata>[]> { /** * Retrieves documents relevant to a given query, allowing optional * configurations for customization. * * @param query - A string representing the query to search for relevant documents. * @param config - (optional) Configuration options for the retrieval process, * which may include callbacks and additional context settings. * @returns A promise that resolves to an array of `DocumentInterface` instances, * each containing metadata specified by the `Metadata` type parameter. */ getRelevantDocuments( query: string, config?: Callbacks | BaseCallbackConfig ): Promise<DocumentInterface<Metadata>[]>; } /** * Abstract base class for a document retrieval system, designed to * process string queries and return the most relevant documents from a source. * * `BaseRetriever` provides common properties and methods for derived retrievers, * such as callbacks, tagging, and verbose logging. Custom retrieval systems * should extend this class and implement `_getRelevantDocuments` to define * the specific retrieval logic. * * @template Metadata - The type of metadata associated with each document, * defaulting to `Record<string, any>`. */ export abstract class BaseRetriever< // eslint-disable-next-line @typescript-eslint/no-explicit-any Metadata extends Record<string, any> = Record<string, any> > extends Runnable<string, DocumentInterface<Metadata>[]> implements BaseRetrieverInterface { /** * Optional callbacks to handle various events in the retrieval process. */ callbacks?: Callbacks; /** * Tags to label or categorize the retrieval operation. */ tags?: string[]; /** * Metadata to provide additional context or information about the retrieval * operation. */ metadata?: Record<string, unknown>; /** * If set to `true`, enables verbose logging for the retrieval process. */ verbose?: boolean; /** * Constructs a new `BaseRetriever` instance with optional configuration fields. * * @param fields - Optional input configuration that can include `callbacks`, * `tags`, `metadata`, and `verbose` settings for custom retriever behavior. */ constructor(fields?: BaseRetrieverInput) { super(fields); this.callbacks = fields?.callbacks; this.tags = fields?.tags ?? []; this.metadata = fields?.metadata ?? {}; this.verbose = fields?.verbose ?? false; } /** * TODO: This should be an abstract method, but we'd like to avoid breaking * changes to people currently using subclassed custom retrievers. * Change it on next major release. */ /** * Placeholder method for retrieving relevant documents based on a query. * * This method is intended to be implemented by subclasses and will be * converted to an abstract method in the next major release. Currently, it * throws an error if not implemented, ensuring that custom retrievers define * the specific retrieval logic. * * @param _query - The query string used to search for relevant documents. * @param _callbacks - (optional) Callback manager for managing callbacks * during retrieval. * @returns A promise resolving to an array of `DocumentInterface` instances relevant to the query. * @throws {Error} Throws an error indicating the method is not implemented. */ _getRelevantDocuments( _query: string, _callbacks?: CallbackManagerForRetrieverRun ): Promise<DocumentInterface<Metadata>[]> { throw new Error("Not implemented!"); } /** * Executes a retrieval operation. * * @param input - The query string used to search for relevant documents. * @param options - (optional) Configuration options for the retrieval run, * which may include callbacks, tags, and metadata. * @returns A promise that resolves to an array of `DocumentInterface` instances * representing the most relevant documents to the query. */ async invoke( input: string, options?: RunnableConfig ): Promise<DocumentInterface<Metadata>[]> { return this.getRelevantDocuments(input, ensureConfig(options)); } /** * @deprecated Use .invoke() instead. Will be removed in 0.3.0. * * Main method used to retrieve relevant documents. It takes a query * string and an optional configuration object, and returns a promise that * resolves to an array of `Document` objects. This method handles the * retrieval process, including starting and ending callbacks, and error * handling. * @param query The query string to retrieve relevant documents for. * @param config Optional configuration object for the retrieval process. * @returns A promise that resolves to an array of `Document` objects. */ async getRelevantDocuments( query: string, config?: Callbacks | BaseCallbackConfig ): Promise<DocumentInterface<Metadata>[]> { const parsedConfig = ensureConfig(parseCallbackConfigArg(config)); const callbackManager_ = await CallbackManager.configure( parsedConfig.callbacks, this.callbacks, parsedConfig.tags, this.tags, parsedConfig.metadata, this.metadata, { verbose: this.verbose } ); const runManager = await callbackManager_?.handleRetrieverStart( this.toJSON(), query, parsedConfig.runId, undefined, undefined, undefined, parsedConfig.runName ); try { const results = await this._getRelevantDocuments(query, runManager); await runManager?.handleRetrieverEnd(results); return results; } catch (error) { await runManager?.handleRetrieverError(error); throw error; } } }
0
lc_public_repos/langchainjs/langchain-core/src/retrievers
lc_public_repos/langchainjs/langchain-core/src/retrievers/document_compressors/base.ts
import { Callbacks } from "../../callbacks/manager.js"; import { DocumentInterface } from "../../documents/document.js"; /** * Base Document Compression class. All compressors should extend this class. */ export abstract class BaseDocumentCompressor { /** * Abstract method that must be implemented by any class that extends * `BaseDocumentCompressor`. This method takes an array of `Document` * objects and a query string as parameters and returns a Promise that * resolves with an array of compressed `Document` objects. * @param documents An array of `Document` objects to be compressed. * @param query A query string. * @returns A Promise that resolves with an array of compressed `Document` objects. */ abstract compressDocuments( documents: DocumentInterface[], query: string, callbacks?: Callbacks ): Promise<DocumentInterface[]>; // eslint-disable-next-line @typescript-eslint/no-explicit-any static isBaseDocumentCompressor(x: any): x is BaseDocumentCompressor { return x?.compressDocuments !== undefined; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/utils/chunk_array.ts
export const chunkArray = <T>(arr: T[], chunkSize: number) => arr.reduce((chunks, elem, index) => { const chunkIndex = Math.floor(index / chunkSize); const chunk = chunks[chunkIndex] || []; // eslint-disable-next-line no-param-reassign chunks[chunkIndex] = chunk.concat([elem]); return chunks; }, [] as T[][]);
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/utils/tiktoken.ts
import { Tiktoken, TiktokenEncoding, TiktokenModel, getEncodingNameForModel, } from "js-tiktoken/lite"; import { AsyncCaller } from "./async_caller.js"; const cache: Record<string, Promise<Tiktoken>> = {}; const caller = /* #__PURE__ */ new AsyncCaller({}); export async function getEncoding(encoding: TiktokenEncoding) { if (!(encoding in cache)) { cache[encoding] = caller .fetch(`https://tiktoken.pages.dev/js/${encoding}.json`) .then((res) => res.json()) .then((data) => new Tiktoken(data)) .catch((e) => { delete cache[encoding]; throw e; }); } return await cache[encoding]; } export async function encodingForModel(model: TiktokenModel) { return getEncoding(getEncodingNameForModel(model)); }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/utils/async_caller.ts
import pRetry from "p-retry"; import PQueueMod from "p-queue"; const STATUS_NO_RETRY = [ 400, // Bad Request 401, // Unauthorized 402, // Payment Required 403, // Forbidden 404, // Not Found 405, // Method Not Allowed 406, // Not Acceptable 407, // Proxy Authentication Required 409, // Conflict ]; // eslint-disable-next-line @typescript-eslint/no-explicit-any const defaultFailedAttemptHandler = (error: any) => { if ( error.message.startsWith("Cancel") || error.message.startsWith("AbortError") || error.name === "AbortError" ) { throw error; } // eslint-disable-next-line @typescript-eslint/no-explicit-any if ((error as any)?.code === "ECONNABORTED") { throw error; } const status = // eslint-disable-next-line @typescript-eslint/no-explicit-any (error as any)?.response?.status ?? (error as any)?.status; if (status && STATUS_NO_RETRY.includes(+status)) { throw error; } // eslint-disable-next-line @typescript-eslint/no-explicit-any if ((error as any)?.error?.code === "insufficient_quota") { const err = new Error(error?.message); err.name = "InsufficientQuotaError"; throw err; } }; // eslint-disable-next-line @typescript-eslint/no-explicit-any export type FailedAttemptHandler = (error: any) => any; export interface AsyncCallerParams { /** * The maximum number of concurrent calls that can be made. * Defaults to `Infinity`, which means no limit. */ maxConcurrency?: number; /** * The maximum number of retries that can be made for a single call, * with an exponential backoff between each attempt. Defaults to 6. */ maxRetries?: number; /** * Custom handler to handle failed attempts. Takes the originally thrown * error object as input, and should itself throw an error if the input * error is not retryable. */ onFailedAttempt?: FailedAttemptHandler; } export interface AsyncCallerCallOptions { signal?: AbortSignal; } /** * A class that can be used to make async calls with concurrency and retry logic. * * This is useful for making calls to any kind of "expensive" external resource, * be it because it's rate-limited, subject to network issues, etc. * * Concurrent calls are limited by the `maxConcurrency` parameter, which defaults * to `Infinity`. This means that by default, all calls will be made in parallel. * * Retries are limited by the `maxRetries` parameter, which defaults to 6. This * means that by default, each call will be retried up to 6 times, with an * exponential backoff between each attempt. */ export class AsyncCaller { protected maxConcurrency: AsyncCallerParams["maxConcurrency"]; protected maxRetries: AsyncCallerParams["maxRetries"]; protected onFailedAttempt: AsyncCallerParams["onFailedAttempt"]; private queue: typeof import("p-queue")["default"]["prototype"]; constructor(params: AsyncCallerParams) { this.maxConcurrency = params.maxConcurrency ?? Infinity; this.maxRetries = params.maxRetries ?? 6; this.onFailedAttempt = params.onFailedAttempt ?? defaultFailedAttemptHandler; const PQueue = "default" in PQueueMod ? PQueueMod.default : PQueueMod; this.queue = new PQueue({ concurrency: this.maxConcurrency }); } // eslint-disable-next-line @typescript-eslint/no-explicit-any call<A extends any[], T extends (...args: A) => Promise<any>>( callable: T, ...args: Parameters<T> ): Promise<Awaited<ReturnType<T>>> { return this.queue.add( () => pRetry( () => callable(...args).catch((error) => { // eslint-disable-next-line no-instanceof/no-instanceof if (error instanceof Error) { throw error; } else { throw new Error(error); } }), { onFailedAttempt: this.onFailedAttempt, retries: this.maxRetries, randomize: true, // If needed we can change some of the defaults here, // but they're quite sensible. } ), { throwOnTimeout: true } ); } // eslint-disable-next-line @typescript-eslint/no-explicit-any callWithOptions<A extends any[], T extends (...args: A) => Promise<any>>( options: AsyncCallerCallOptions, callable: T, ...args: Parameters<T> ): Promise<Awaited<ReturnType<T>>> { // Note this doesn't cancel the underlying request, // when available prefer to use the signal option of the underlying call if (options.signal) { return Promise.race([ this.call<A, T>(callable, ...args), new Promise<never>((_, reject) => { options.signal?.addEventListener("abort", () => { reject(new Error("AbortError")); }); }), ]); } return this.call<A, T>(callable, ...args); } fetch(...args: Parameters<typeof fetch>): ReturnType<typeof fetch> { return this.call(() => fetch(...args).then((res) => (res.ok ? res : Promise.reject(res))) ); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/utils/event_source_parse.ts
/* eslint-disable prefer-template */ /* eslint-disable default-case */ /* eslint-disable no-plusplus */ // Adapted from https://github.com/gfortaine/fetch-event-source/blob/main/src/parse.ts // due to a packaging issue in the original. // MIT License import { IterableReadableStream } from "./stream.js"; export const EventStreamContentType = "text/event-stream"; /** * Represents a message sent in an event stream * https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format */ export interface EventSourceMessage { /** The event ID to set the EventSource object's last event ID value. */ id: string; /** A string identifying the type of event described. */ event: string; /** The event data */ data: string; /** The reconnection interval (in milliseconds) to wait before retrying the connection */ retry?: number; } /** * Converts a ReadableStream into a callback pattern. * @param stream The input ReadableStream. * @param onChunk A function that will be called on each new byte chunk in the stream. * @returns {Promise<void>} A promise that will be resolved when the stream closes. */ export async function getBytes( // eslint-disable-next-line @typescript-eslint/no-explicit-any stream: ReadableStream<Uint8Array> | AsyncIterable<any>, onChunk: (arr: Uint8Array, flush?: boolean) => void ) { // TODO: Use Async iteration for both cases? // eslint-disable-next-line no-instanceof/no-instanceof if (stream instanceof ReadableStream) { const reader = stream.getReader(); // CHANGED: Introduced a "flush" mechanism to process potential pending messages when the stream ends. // This change is essential to ensure that we capture every last piece of information from streams, // such as those from Azure OpenAI, which may not terminate with a blank line. Without this // mechanism, we risk ignoring a possibly significant last message. // See https://github.com/langchain-ai/langchainjs/issues/1299 for details. // eslint-disable-next-line no-constant-condition while (true) { const result = await reader.read(); if (result.done) { onChunk(new Uint8Array(), true); break; } onChunk(result.value); } } else { try { // Handle Node.js Readable streams with async iteration for await (const chunk of stream) { onChunk(new Uint8Array(chunk)); } onChunk(new Uint8Array(), true); // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e: any) { throw new Error( [ "Parsing event source stream failed.", "Ensure your implementation of fetch returns a web or Node readable stream.", `Error: ${e.message}`, ].join("\n") ); } } } const enum ControlChars { NewLine = 10, CarriageReturn = 13, Space = 32, Colon = 58, } /** * Parses arbitary byte chunks into EventSource line buffers. * Each line should be of the format "field: value" and ends with \r, \n, or \r\n. * @param onLine A function that will be called on each new EventSource line. * @returns A function that should be called for each incoming byte chunk. */ export function getLines( onLine: (line: Uint8Array, fieldLength: number, flush?: boolean) => void ) { let buffer: Uint8Array | undefined; let position: number; // current read position let fieldLength: number; // length of the `field` portion of the line let discardTrailingNewline = false; // return a function that can process each incoming byte chunk: return function onChunk(arr: Uint8Array, flush?: boolean) { if (flush) { onLine(arr, 0, true); return; } if (buffer === undefined) { buffer = arr; position = 0; fieldLength = -1; } else { // we're still parsing the old line. Append the new bytes into buffer: buffer = concat(buffer, arr); } const bufLength = buffer.length; let lineStart = 0; // index where the current line starts while (position < bufLength) { if (discardTrailingNewline) { if (buffer[position] === ControlChars.NewLine) { lineStart = ++position; // skip to next char } discardTrailingNewline = false; } // start looking forward till the end of line: let lineEnd = -1; // index of the \r or \n char for (; position < bufLength && lineEnd === -1; ++position) { switch (buffer[position]) { case ControlChars.Colon: if (fieldLength === -1) { // first colon in line fieldLength = position - lineStart; } break; // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore:7029 \r case below should fallthrough to \n: case ControlChars.CarriageReturn: discardTrailingNewline = true; // eslint-disable-next-line no-fallthrough case ControlChars.NewLine: lineEnd = position; break; } } if (lineEnd === -1) { // We reached the end of the buffer but the line hasn't ended. // Wait for the next arr and then continue parsing: break; } // we've reached the line end, send it out: onLine(buffer.subarray(lineStart, lineEnd), fieldLength); lineStart = position; // we're now on the next line fieldLength = -1; } if (lineStart === bufLength) { buffer = undefined; // we've finished reading it } else if (lineStart !== 0) { // Create a new view into buffer beginning at lineStart so we don't // need to copy over the previous lines when we get the new arr: buffer = buffer.subarray(lineStart); position -= lineStart; } }; } /** * Parses line buffers into EventSourceMessages. * @param onId A function that will be called on each `id` field. * @param onRetry A function that will be called on each `retry` field. * @param onMessage A function that will be called on each message. * @returns A function that should be called for each incoming line buffer. */ export function getMessages( onMessage?: (msg: EventSourceMessage) => void, onId?: (id: string) => void, onRetry?: (retry: number) => void ) { let message = newMessage(); const decoder = new TextDecoder(); // return a function that can process each incoming line buffer: return function onLine( line: Uint8Array, fieldLength: number, flush?: boolean ) { if (flush) { if (!isEmpty(message)) { onMessage?.(message); message = newMessage(); } return; } if (line.length === 0) { // empty line denotes end of message. Trigger the callback and start a new message: onMessage?.(message); message = newMessage(); } else if (fieldLength > 0) { // exclude comments and lines with no values // line is of format "<field>:<value>" or "<field>: <value>" // https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation const field = decoder.decode(line.subarray(0, fieldLength)); const valueOffset = fieldLength + (line[fieldLength + 1] === ControlChars.Space ? 2 : 1); const value = decoder.decode(line.subarray(valueOffset)); switch (field) { case "data": // if this message already has data, append the new value to the old. // otherwise, just set to the new value: message.data = message.data ? message.data + "\n" + value : value; // otherwise, break; case "event": message.event = value; break; case "id": onId?.((message.id = value)); break; case "retry": { const retry = parseInt(value, 10); if (!Number.isNaN(retry)) { // per spec, ignore non-integers onRetry?.((message.retry = retry)); } break; } } } }; } function concat(a: Uint8Array, b: Uint8Array) { const res = new Uint8Array(a.length + b.length); res.set(a); res.set(b, a.length); return res; } function newMessage(): EventSourceMessage { // data, event, and id must be initialized to empty strings: // https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation // retry should be initialized to undefined so we return a consistent shape // to the js engine all the time: https://mathiasbynens.be/notes/shapes-ics#takeaways return { data: "", event: "", id: "", retry: undefined, }; } export function convertEventStreamToIterableReadableDataStream( stream: ReadableStream, onMetadataEvent?: (e: unknown) => unknown ) { const dataStream = new ReadableStream({ async start(controller) { const enqueueLine = getMessages((msg) => { if (msg.event === "error") { throw new Error(msg.data ?? "Unspecified event streaming error."); } else if (msg.event === "metadata") { onMetadataEvent?.(msg); } else { if (msg.data) controller.enqueue(msg.data); } }); const onLine = ( line: Uint8Array, fieldLength: number, flush?: boolean ) => { enqueueLine(line, fieldLength, flush); if (flush) controller.close(); }; await getBytes(stream, getLines(onLine)); }, }); return IterableReadableStream.fromReadableStream(dataStream); } function isEmpty(message: EventSourceMessage): boolean { return ( message.data === "" && message.event === "" && message.id === "" && message.retry === undefined ); }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/utils/signal.ts
export async function raceWithSignal<T>( promise: Promise<T>, signal?: AbortSignal ): Promise<T> { if (signal === undefined) { return promise; } let listener: () => void; return Promise.race([ promise.catch<T>((err) => { if (!signal?.aborted) { throw err; } else { return undefined as T; } }), new Promise<never>((_, reject) => { listener = () => { reject(new Error("Aborted")); }; signal.addEventListener("abort", listener); // Must be here inside the promise to avoid a race condition if (signal.aborted) { reject(new Error("Aborted")); } }), ]).finally(() => signal.removeEventListener("abort", listener)); }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/utils/env.ts
// Inlined from https://github.com/flexdinesh/browser-or-node declare global { const Deno: | { version: { deno: string; }; env: { get: (name: string) => string | undefined; }; } | undefined; } export const isBrowser = () => typeof window !== "undefined" && typeof window.document !== "undefined"; export const isWebWorker = () => typeof globalThis === "object" && globalThis.constructor && globalThis.constructor.name === "DedicatedWorkerGlobalScope"; export const isJsDom = () => (typeof window !== "undefined" && window.name === "nodejs") || (typeof navigator !== "undefined" && (navigator.userAgent.includes("Node.js") || navigator.userAgent.includes("jsdom"))); // Supabase Edge Function provides a `Deno` global object // without `version` property export const isDeno = () => typeof Deno !== "undefined"; // Mark not-as-node if in Supabase Edge Function export const isNode = () => typeof process !== "undefined" && typeof process.versions !== "undefined" && typeof process.versions.node !== "undefined" && !isDeno(); export const getEnv = () => { let env: string; if (isBrowser()) { env = "browser"; } else if (isNode()) { env = "node"; } else if (isWebWorker()) { env = "webworker"; } else if (isJsDom()) { env = "jsdom"; } else if (isDeno()) { env = "deno"; } else { env = "other"; } return env; }; export type RuntimeEnvironment = { library: string; libraryVersion?: string; runtime: string; runtimeVersion?: string; }; let runtimeEnvironment: RuntimeEnvironment | undefined; export async function getRuntimeEnvironment(): Promise<RuntimeEnvironment> { if (runtimeEnvironment === undefined) { const env = getEnv(); runtimeEnvironment = { library: "langchain-js", runtime: env, }; } return runtimeEnvironment; } export function getEnvironmentVariable(name: string): string | undefined { // Certain Deno setups will throw an error if you try to access environment variables // https://github.com/langchain-ai/langchainjs/issues/1412 try { if (typeof process !== "undefined") { // eslint-disable-next-line no-process-env return process.env?.[name]; } else if (isDeno()) { return Deno?.env.get(name); } else { return undefined; } } catch (e) { return undefined; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/utils/json_patch.ts
export { compare, type Operation, applyPatch, } from "./fast-json-patch/index.js";
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/utils/hash.ts
export { insecureHash } from "./js-sha1/hash.js";
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/utils/json_schema.ts
export { deepCompareStrict, Validator } from "./@cfworker/json-schema/index.js";
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/utils/math.ts
import { cosine } from "./ml-distance/similarities.js"; import { innerProduct as innerProductDistance } from "./ml-distance/distances.js"; import { euclidean } from "./ml-distance-euclidean/euclidean.js"; type VectorFunction = (xVector: number[], yVector: number[]) => number; /** * Apply a row-wise function between two matrices with the same number of columns. * * @param {number[][]} X - The first matrix. * @param {number[][]} Y - The second matrix. * @param {VectorFunction} func - The function to apply. * * @throws {Error} If the number of columns in X and Y are not the same. * * @returns {number[][] | [[]]} A matrix where each row represents the result of applying the function between the corresponding rows of X and Y. */ export function matrixFunc( X: number[][], Y: number[][], func: VectorFunction ): number[][] { if ( X.length === 0 || X[0].length === 0 || Y.length === 0 || Y[0].length === 0 ) { return [[]]; } if (X[0].length !== Y[0].length) { throw new Error( `Number of columns in X and Y must be the same. X has shape ${[ X.length, X[0].length, ]} and Y has shape ${[Y.length, Y[0].length]}.` ); } return X.map((xVector) => Y.map((yVector) => func(xVector, yVector)).map((similarity) => Number.isNaN(similarity) ? 0 : similarity ) ); } export function normalize(M: number[][], similarity = false): number[][] { const max = matrixMaxVal(M); return M.map((row) => row.map((val) => (similarity ? 1 - val / max : val / max)) ); } /** * This function calculates the row-wise cosine similarity between two matrices with the same number of columns. * * @param {number[][]} X - The first matrix. * @param {number[][]} Y - The second matrix. * * @throws {Error} If the number of columns in X and Y are not the same. * * @returns {number[][] | [[]]} A matrix where each row represents the cosine similarity values between the corresponding rows of X and Y. */ export function cosineSimilarity(X: number[][], Y: number[][]): number[][] { return matrixFunc(X, Y, cosine); } export function innerProduct(X: number[][], Y: number[][]): number[][] { return matrixFunc(X, Y, innerProductDistance); } export function euclideanDistance(X: number[][], Y: number[][]): number[][] { return matrixFunc(X, Y, euclidean); } /** * This function implements the Maximal Marginal Relevance algorithm * to select a set of embeddings that maximizes the diversity and relevance to a query embedding. * * @param {number[]|number[][]} queryEmbedding - The query embedding. * @param {number[][]} embeddingList - The list of embeddings to select from. * @param {number} [lambda=0.5] - The trade-off parameter between relevance and diversity. * @param {number} [k=4] - The maximum number of embeddings to select. * * @returns {number[]} The indexes of the selected embeddings in the embeddingList. */ export function maximalMarginalRelevance( queryEmbedding: number[] | number[][], embeddingList: number[][], lambda = 0.5, k = 4 ): number[] { if (Math.min(k, embeddingList.length) <= 0) { return []; } const queryEmbeddingExpanded = ( Array.isArray(queryEmbedding[0]) ? queryEmbedding : [queryEmbedding] ) as number[][]; const similarityToQuery = cosineSimilarity( queryEmbeddingExpanded, embeddingList )[0]; const mostSimilarEmbeddingIndex = argMax(similarityToQuery).maxIndex; const selectedEmbeddings = [embeddingList[mostSimilarEmbeddingIndex]]; const selectedEmbeddingsIndexes = [mostSimilarEmbeddingIndex]; while (selectedEmbeddingsIndexes.length < Math.min(k, embeddingList.length)) { let bestScore = -Infinity; let bestIndex = -1; const similarityToSelected = cosineSimilarity( embeddingList, selectedEmbeddings ); similarityToQuery.forEach((queryScore, queryScoreIndex) => { if (selectedEmbeddingsIndexes.includes(queryScoreIndex)) { return; } const maxSimilarityToSelected = Math.max( ...similarityToSelected[queryScoreIndex] ); const score = lambda * queryScore - (1 - lambda) * maxSimilarityToSelected; if (score > bestScore) { bestScore = score; bestIndex = queryScoreIndex; } }); selectedEmbeddings.push(embeddingList[bestIndex]); selectedEmbeddingsIndexes.push(bestIndex); } return selectedEmbeddingsIndexes; } type MaxInfo = { maxIndex: number; maxValue: number; }; /** * Finds the index of the maximum value in the given array. * @param {number[]} array - The input array. * * @returns {number} The index of the maximum value in the array. If the array is empty, returns -1. */ function argMax(array: number[]): MaxInfo { if (array.length === 0) { return { maxIndex: -1, maxValue: NaN, }; } let maxValue = array[0]; let maxIndex = 0; for (let i = 1; i < array.length; i += 1) { if (array[i] > maxValue) { maxIndex = i; maxValue = array[i]; } } return { maxIndex, maxValue }; } function matrixMaxVal(arrays: number[][]): number { return arrays.reduce( (acc, array) => Math.max(acc, argMax(array).maxValue), 0 ); }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/utils/callbacks.ts
import { getEnvironmentVariable } from "./env.js"; export const isTracingEnabled = (tracingEnabled?: boolean): boolean => { if (tracingEnabled !== undefined) { return tracingEnabled; } const envVars = [ "LANGSMITH_TRACING_V2", "LANGCHAIN_TRACING_V2", "LANGSMITH_TRACING", "LANGCHAIN_TRACING", ]; return !!envVars.find((envVar) => getEnvironmentVariable(envVar) === "true"); };
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/utils/stream.ts
import { pickRunnableConfigKeys } from "../runnables/config.js"; import { AsyncLocalStorageProviderSingleton } from "../singletons/index.js"; import type { IterableReadableStreamInterface } from "../types/stream.js"; import { raceWithSignal } from "./signal.js"; // Re-exported for backwards compatibility // Do NOT import this type from this file inside the project. Instead, always import from `types/stream.js` export type { IterableReadableStreamInterface }; /* * Support async iterator syntax for ReadableStreams in all environments. * Source: https://github.com/MattiasBuelens/web-streams-polyfill/pull/122#issuecomment-1627354490 */ export class IterableReadableStream<T> extends ReadableStream<T> implements IterableReadableStreamInterface<T> { public reader: ReadableStreamDefaultReader<T>; ensureReader() { if (!this.reader) { this.reader = this.getReader(); } } async next(): Promise<IteratorResult<T>> { this.ensureReader(); try { const result = await this.reader.read(); if (result.done) { this.reader.releaseLock(); // release lock when stream becomes closed return { done: true, value: undefined, }; } else { return { done: false, value: result.value, }; } } catch (e) { this.reader.releaseLock(); // release lock when stream becomes errored throw e; } } async return(): Promise<IteratorResult<T>> { this.ensureReader(); // If wrapped in a Node stream, cancel is already called. if (this.locked) { const cancelPromise = this.reader.cancel(); // cancel first, but don't await yet this.reader.releaseLock(); // release lock first await cancelPromise; // now await it } return { done: true, value: undefined }; } // eslint-disable-next-line @typescript-eslint/no-explicit-any async throw(e: any): Promise<IteratorResult<T>> { this.ensureReader(); if (this.locked) { const cancelPromise = this.reader.cancel(); // cancel first, but don't await yet this.reader.releaseLock(); // release lock first await cancelPromise; // now await it } throw e; } [Symbol.asyncIterator]() { return this; } // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore Not present in Node 18 types, required in latest Node 22 async [Symbol.asyncDispose]() { await this.return(); } static fromReadableStream<T>(stream: ReadableStream<T>) { // From https://developer.mozilla.org/en-US/docs/Web/API/Streams_API/Using_readable_streams#reading_the_stream const reader = stream.getReader(); return new IterableReadableStream<T>({ start(controller) { return pump(); function pump(): Promise<T | undefined> { return reader.read().then(({ done, value }) => { // When no more data needs to be consumed, close the stream if (done) { controller.close(); return; } // Enqueue the next data chunk into our target stream controller.enqueue(value); return pump(); }); } }, cancel() { reader.releaseLock(); }, }); } static fromAsyncGenerator<T>(generator: AsyncGenerator<T>) { return new IterableReadableStream<T>({ async pull(controller) { const { value, done } = await generator.next(); // When no more data needs to be consumed, close the stream if (done) { controller.close(); } // Fix: `else if (value)` will hang the streaming when nullish value (e.g. empty string) is pulled controller.enqueue(value); }, async cancel(reason) { await generator.return(reason); }, }); } } export function atee<T>( iter: AsyncGenerator<T>, length = 2 ): AsyncGenerator<T>[] { const buffers = Array.from( { length }, () => [] as Array<IteratorResult<T> | IteratorReturnResult<T>> ); return buffers.map(async function* makeIter(buffer) { while (true) { if (buffer.length === 0) { const result = await iter.next(); for (const buffer of buffers) { buffer.push(result); } } else if (buffer[0].done) { return; } else { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion yield buffer.shift()!.value; } } }); } export function concat< // eslint-disable-next-line @typescript-eslint/no-explicit-any T extends Array<any> | string | number | Record<string, any> | any >(first: T, second: T): T { if (Array.isArray(first) && Array.isArray(second)) { return first.concat(second) as T; } else if (typeof first === "string" && typeof second === "string") { return (first + second) as T; } else if (typeof first === "number" && typeof second === "number") { return (first + second) as T; } else if ( // eslint-disable-next-line @typescript-eslint/no-explicit-any "concat" in (first as any) && // eslint-disable-next-line @typescript-eslint/no-explicit-any typeof (first as any).concat === "function" ) { // eslint-disable-next-line @typescript-eslint/no-explicit-any return (first as any).concat(second) as T; } else if (typeof first === "object" && typeof second === "object") { // eslint-disable-next-line @typescript-eslint/no-explicit-any const chunk = { ...first } as Record<string, any>; // eslint-disable-next-line @typescript-eslint/no-explicit-any for (const [key, value] of Object.entries(second as Record<string, any>)) { if (key in chunk && !Array.isArray(chunk[key])) { chunk[key] = concat(chunk[key], value); } else { chunk[key] = value; } } return chunk as T; } else { throw new Error(`Cannot concat ${typeof first} and ${typeof second}`); } } export class AsyncGeneratorWithSetup< S = unknown, T = unknown, TReturn = unknown, TNext = unknown > implements AsyncGenerator<T, TReturn, TNext> { private generator: AsyncGenerator<T>; public setup: Promise<S>; public config?: unknown; public signal?: AbortSignal; private firstResult: Promise<IteratorResult<T>>; private firstResultUsed = false; constructor(params: { generator: AsyncGenerator<T>; startSetup?: () => Promise<S>; config?: unknown; signal?: AbortSignal; }) { this.generator = params.generator; this.config = params.config; // eslint-disable-next-line @typescript-eslint/no-explicit-any this.signal = params.signal ?? (this.config as any)?.signal; // setup is a promise that resolves only after the first iterator value // is available. this is useful when setup of several piped generators // needs to happen in logical order, ie. in the order in which input to // to each generator is available. this.setup = new Promise((resolve, reject) => { void AsyncLocalStorageProviderSingleton.runWithConfig( pickRunnableConfigKeys( params.config as Record<string, unknown> | undefined ), async () => { this.firstResult = params.generator.next(); if (params.startSetup) { this.firstResult.then(params.startSetup).then(resolve, reject); } else { this.firstResult.then((_result) => resolve(undefined as S), reject); } }, true ); }); } async next(...args: [] | [TNext]): Promise<IteratorResult<T>> { this.signal?.throwIfAborted(); if (!this.firstResultUsed) { this.firstResultUsed = true; return this.firstResult; } return AsyncLocalStorageProviderSingleton.runWithConfig( pickRunnableConfigKeys( this.config as Record<string, unknown> | undefined ), this.signal ? async () => { return raceWithSignal(this.generator.next(...args), this.signal); } : async () => { return this.generator.next(...args); }, true ); } async return( value?: TReturn | PromiseLike<TReturn> ): Promise<IteratorResult<T>> { return this.generator.return(value); } async throw(e: Error): Promise<IteratorResult<T>> { return this.generator.throw(e); } [Symbol.asyncIterator]() { return this; } // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore Not present in Node 18 types, required in latest Node 22 async [Symbol.asyncDispose]() { await this.return(); } } export async function pipeGeneratorWithSetup< S, A extends unknown[], T, TReturn, TNext, U, UReturn, UNext >( to: ( g: AsyncGenerator<T, TReturn, TNext>, s: S, ...args: A ) => AsyncGenerator<U, UReturn, UNext>, generator: AsyncGenerator<T, TReturn, TNext>, startSetup: () => Promise<S>, signal: AbortSignal | undefined, ...args: A ) { const gen = new AsyncGeneratorWithSetup({ generator, startSetup, signal, }); const setup = await gen.setup; return { output: to(gen, setup, ...args), setup }; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/utils/json.ts
export function parseJsonMarkdown(s: string, parser = parsePartialJson) { // eslint-disable-next-line no-param-reassign s = s.trim(); const match = /```(json)?(.*)```/s.exec(s); if (!match) { return parser(s); } else { return parser(match[2]); } } // Adapted from https://github.com/KillianLucas/open-interpreter/blob/main/interpreter/core/llm/utils/parse_partial_json.py // MIT License export function parsePartialJson(s: string) { // If the input is undefined, return null to indicate failure. if (typeof s === "undefined") { return null; } // Attempt to parse the string as-is. try { return JSON.parse(s); } catch (error) { // Pass } // Initialize variables. let new_s = ""; const stack = []; let isInsideString = false; let escaped = false; // Process each character in the string one at a time. for (let char of s) { if (isInsideString) { if (char === '"' && !escaped) { isInsideString = false; } else if (char === "\n" && !escaped) { char = "\\n"; // Replace the newline character with the escape sequence. } else if (char === "\\") { escaped = !escaped; } else { escaped = false; } } else { if (char === '"') { isInsideString = true; escaped = false; } else if (char === "{") { stack.push("}"); } else if (char === "[") { stack.push("]"); } else if (char === "}" || char === "]") { if (stack && stack[stack.length - 1] === char) { stack.pop(); } else { // Mismatched closing character; the input is malformed. return null; } } } // Append the processed character to the new string. new_s += char; } // If we're still inside a string at the end of processing, // we need to close the string. if (isInsideString) { new_s += '"'; } // Close any remaining open structures in the reverse order that they were opened. for (let i = stack.length - 1; i >= 0; i -= 1) { new_s += stack[i]; } // Attempt to parse the modified string as JSON. try { return JSON.parse(new_s); } catch (error) { // If we still can't parse the string as JSON, return null to indicate failure. return null; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/utils/function_calling.ts
import { zodToJsonSchema } from "zod-to-json-schema"; import { StructuredToolInterface, StructuredToolParams, } from "../tools/index.js"; import { FunctionDefinition, ToolDefinition } from "../language_models/base.js"; import { Runnable, RunnableToolLike } from "../runnables/base.js"; import { isZodSchema } from "./types/is_zod_schema.js"; /** * Formats a `StructuredTool` or `RunnableToolLike` instance into a format * that is compatible with OpenAI function calling. It uses the `zodToJsonSchema` * function to convert the schema of the `StructuredTool` or `RunnableToolLike` * into a JSON schema, which is then used as the parameters for the OpenAI function. * * @param {StructuredToolInterface | RunnableToolLike} tool The tool to convert to an OpenAI function. * @returns {FunctionDefinition} The inputted tool in OpenAI function format. */ export function convertToOpenAIFunction( tool: StructuredToolInterface | RunnableToolLike | StructuredToolParams, fields?: | { /** * If `true`, model output is guaranteed to exactly match the JSON Schema * provided in the function definition. */ strict?: boolean; } | number ): FunctionDefinition { // @TODO 0.3.0 Remove the `number` typing const fieldsCopy = typeof fields === "number" ? undefined : fields; return { name: tool.name, description: tool.description, parameters: zodToJsonSchema(tool.schema), // Do not include the `strict` field if it is `undefined`. ...(fieldsCopy?.strict !== undefined ? { strict: fieldsCopy.strict } : {}), }; } /** * Formats a `StructuredTool` or `RunnableToolLike` instance into a * format that is compatible with OpenAI tool calling. It uses the * `zodToJsonSchema` function to convert the schema of the `StructuredTool` * or `RunnableToolLike` into a JSON schema, which is then used as the * parameters for the OpenAI tool. * * @param {StructuredToolInterface | Record<string, any> | RunnableToolLike} tool The tool to convert to an OpenAI tool. * @returns {ToolDefinition} The inputted tool in OpenAI tool format. */ export function convertToOpenAITool( // eslint-disable-next-line @typescript-eslint/no-explicit-any tool: StructuredToolInterface | Record<string, any> | RunnableToolLike, fields?: | { /** * If `true`, model output is guaranteed to exactly match the JSON Schema * provided in the function definition. */ strict?: boolean; } | number ): ToolDefinition { // @TODO 0.3.0 Remove the `number` typing const fieldsCopy = typeof fields === "number" ? undefined : fields; let toolDef: ToolDefinition | undefined; if (isLangChainTool(tool)) { toolDef = { type: "function", function: convertToOpenAIFunction(tool), }; } else { toolDef = tool as ToolDefinition; } if (fieldsCopy?.strict !== undefined) { // eslint-disable-next-line @typescript-eslint/no-explicit-any (toolDef.function as any).strict = fieldsCopy.strict; } return toolDef; } /** * Confirm whether the inputted tool is an instance of `StructuredToolInterface`. * * @param {StructuredToolInterface | Record<string, any> | undefined} tool The tool to check if it is an instance of `StructuredToolInterface`. * @returns {tool is StructuredToolInterface} Whether the inputted tool is an instance of `StructuredToolInterface`. */ export function isStructuredTool( // eslint-disable-next-line @typescript-eslint/no-explicit-any tool?: StructuredToolInterface | Record<string, any> ): tool is StructuredToolInterface { return ( tool !== undefined && Array.isArray((tool as StructuredToolInterface).lc_namespace) ); } /** * Confirm whether the inputted tool is an instance of `RunnableToolLike`. * * @param {unknown | undefined} tool The tool to check if it is an instance of `RunnableToolLike`. * @returns {tool is RunnableToolLike} Whether the inputted tool is an instance of `RunnableToolLike`. */ export function isRunnableToolLike(tool?: unknown): tool is RunnableToolLike { return ( tool !== undefined && Runnable.isRunnable(tool) && "lc_name" in tool.constructor && typeof tool.constructor.lc_name === "function" && tool.constructor.lc_name() === "RunnableToolLike" ); } /** * Confirm whether or not the tool contains the necessary properties to be considered a `StructuredToolParams`. * * @param {unknown | undefined} tool The object to check if it is a `StructuredToolParams`. * @returns {tool is StructuredToolParams} Whether the inputted object is a `StructuredToolParams`. */ export function isStructuredToolParams( tool?: unknown ): tool is StructuredToolParams { return ( !!tool && typeof tool === "object" && "name" in tool && "schema" in tool && // eslint-disable-next-line @typescript-eslint/no-explicit-any isZodSchema(tool.schema as Record<string, any>) ); } /** * Whether or not the tool is one of StructuredTool, RunnableTool or StructuredToolParams. * It returns `is StructuredToolParams` since that is the most minimal interface of the three, * while still containing the necessary properties to be passed to a LLM for tool calling. * * @param {unknown | undefined} tool The tool to check if it is a LangChain tool. * @returns {tool is StructuredToolParams} Whether the inputted tool is a LangChain tool. */ export function isLangChainTool(tool?: unknown): tool is StructuredToolParams { return ( isStructuredToolParams(tool) || isRunnableToolLike(tool) || // eslint-disable-next-line @typescript-eslint/no-explicit-any isStructuredTool(tool as any) ); }
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/types/is_zod_schema.ts
import { type z } from "zod"; /** * Given either a Zod schema, or plain object, determine if the input is a Zod schema. * * @param {z.ZodType<RunOutput> | Record<string, any>} input * @returns {boolean} Whether or not the provided input is a Zod schema. */ export function isZodSchema< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( // eslint-disable-next-line @typescript-eslint/no-explicit-any input: z.ZodType<RunOutput> | Record<string, any> ): input is z.ZodType<RunOutput> { // Check for a characteristic method of Zod schemas return typeof (input as z.ZodType<RunOutput>)?.parse === "function"; }
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/types/index.ts
export * from "./is_zod_schema.js"; /** * Represents a string value with autocompleted, but not required, suggestions. */ export type StringWithAutocomplete<T> = T | (string & Record<never, never>); // eslint-disable-next-line @typescript-eslint/no-explicit-any export type InputValues<K extends string = string> = Record<K, any>; export type PartialValues<K extends string = string> = Record< K, string | (() => Promise<string>) | (() => string) >; // eslint-disable-next-line @typescript-eslint/no-explicit-any export type ChainValues = Record<string, any>;
0
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema/LICENSE.md
MIT License Copyright (c) 2020 Jeremy Danyow Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema/README.md
# @cfworker/json-schema ![](https://badgen.net/bundlephobia/minzip/@cfworker/json-schema) ![](https://badgen.net/bundlephobia/min/@cfworker/json-schema) ![](https://badgen.net/bundlephobia/dependency-count/@cfworker/json-schema) ![](https://badgen.net/bundlephobia/tree-shaking/@cfworker/json-schema) ![](https://badgen.net/npm/types/@cfworker/json-schema?icon=typescript) A JSON schema validator that will run on Cloudflare workers. Supports drafts 4, 7, 2019-09, and 2020-12. This library is validated against the [json-schema-test-suite](https://github.com/json-schema-org/JSON-Schema-Test-Suite), a series of approximately 4,500 assertions maintained along with the json-schema specification. A small set of test cases are intentionally not supported due to performance constraints or lack of feature use. These list of unsupported features are maintained in [test/unsupported.ts](./test/unsupported.ts). While this library is not the fastest due to lack of code generation, it's consistently among the [most spec compliant](https://json-schema.org/implementations.html#benchmarks). ## Background _Why another JSON schema validator?_ Cloudflare workers do not have APIs required by [Ajv](https://ajv.js.org/) schema compilation (`eval` or `new Function(code)`). If possible use Ajv in a build step to precompile your schema. Otherwise this library could work for you. ## Basic usage ```js import { Validator } from "@cfworker/json-schema"; const validator = new Validator({ type: "number" }); const result = validator.validate(7); ``` ## Specify meta schema draft ```js const validator = new Validator({ type: "number" }, "4"); // draft-4 ``` ## Add schemas ```js const validator = new Validator({ $id: "https://foo.bar/baz", $ref: "/beep", }); validator.addSchema({ $id: "https://foo.bar/beep", type: "boolean" }); ``` ## Include all errors By default the validator stops processing after the first error. Set the `shortCircuit` parameter to `false` to emit all errors. ```js const shortCircuit = false; const draft = '2019-09'; const schema = { type: 'object', required: ['name', 'email', 'number', 'bool'], properties: { name: { type: 'string' }, email: { type: 'string', format: 'email' }, number: { type: 'number' }, bool: { type: 'boolean' } } }; const validator = new Validator(schema, draft, shortCircuit); const result = validator.validate({ name: 'hello', email: 5, // invalid type number: 'Hello' // invalid type bool: 'false' // invalid type }); ```
0
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema/index.ts
export * from "./src/index.js";
0
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema/package.json
{ "name": "@cfworker/json-schema", "type": "module", "version": "1.12.5", "description": "A JSON schema validator that will run on Cloudflare workers. Supports drafts 4, 7, 2019-09, and 2020-12.", "keywords": [ "json-schema", "jsonschema", "json", "schema", "cloudflare", "worker", "workers", "service-worker" ], "sideEffects": false, "main": "dist/index.js", "module": "dist/index.js", "types": "dist/index.d.ts", "files": [ "dist/**/*", "src/**/*", "tsconfig.json", "README.md", "package.json" ], "repository": "https://github.com/cfworker/cfworker", "author": "Jeremy Danyow <jdanyow@gmail.com>", "homepage": "https://github.com/cfworker/cfworker/tree/master/packages/json-schema/README.md", "license": "MIT", "publishConfig": { "access": "public" }, "scripts": { "build": "tsc --build", "clean": "tsc --build --clean", "pretest": "node --experimental-import-meta-resolve suite-gen.js", "test": "cfworker test test/**/*.spec.ts --nocheck" }, "devDependencies": { "@cfworker/dev": "^1.14.2", "@types/chai": "^4.3.3", "@types/mocha": "^10.0.0", "chai": "^4.3.6", "json-schema-test-suite": "git+https://github.com/json-schema-org/JSON-Schema-Test-Suite#76b529f", "mocha": "^10.0.0", "typescript": "^4.8.4" } }
0
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema/src/ucs2-length.ts
/** * Get UCS-2 length of a string * https://mathiasbynens.be/notes/javascript-encoding * https://github.com/bestiejs/punycode.js - punycode.ucs2.decode */ export function ucs2length(s: string) { let result = 0; let length = s.length; let index = 0; let charCode: number; while (index < length) { result++; charCode = s.charCodeAt(index++); if (charCode >= 0xd800 && charCode <= 0xdbff && index < length) { // high surrogate, and there is a next character charCode = s.charCodeAt(index); if ((charCode & 0xfc00) == 0xdc00) { // low surrogate index++; } } } return result; }
0
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema/src/types.ts
export type SchemaDraft = "4" | "7" | "2019-09" | "2020-12"; export const enum OutputFormat { Flag = 1 << 0, Basic = 1 << 1, Detailed = 1 << 2, } export type InstanceType = | "array" | "boolean" | "integer" | "null" | "number" | "object" | "string"; export interface Schema { $id?: string; $anchor?: string; $recursiveAnchor?: boolean; $ref?: string; $recursiveRef?: "#"; $schema?: string; $comment?: string; $defs?: any; $vocabulary?: Record<string, boolean>; type?: InstanceType | InstanceType[]; const?: any; enum?: any[]; required?: string[]; not?: Schema; anyOf?: Schema[]; allOf?: Schema[]; oneOf?: Schema[]; if?: Schema; then?: Schema; else?: Schema; format?: string; properties?: Record<string | number, Schema | boolean>; patternProperties?: Record<string, Schema | boolean>; additionalProperties?: Schema | boolean; unevaluatedProperties?: Schema | boolean; minProperties?: number; maxProperties?: number; propertyNames?: Schema; dependentRequired?: Record<string, string[]>; dependentSchemas?: Record<string, Schema>; dependencies?: Record<string, Schema | string[]>; prefixItems?: Array<Schema | boolean>[]; items?: Schema | boolean | Array<Schema | boolean>; additionalItems?: Schema | boolean; unevaluatedItems?: Schema | boolean; contains?: Schema | boolean; minContains?: number; maxContains?: number; minItems?: number; maxItems?: number; uniqueItems?: boolean; minimum?: number; maximum?: number; exclusiveMinimum?: number | boolean; exclusiveMaximum?: number | boolean; multipleOf?: number; minLength?: number; maxLength?: number; pattern?: string; __absolute_ref__?: string; __absolute_recursive_ref__?: string; __absolute_uri__?: string; [key: string]: any; } export interface OutputUnit { keyword: string; keywordLocation: string; instanceLocation: string; error: string; } export interface ValidationResult { valid: boolean; errors: OutputUnit[]; }
0
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema/src/dereference.ts
import { encodePointer } from "./pointer.js"; import { Schema } from "./types.js"; export const schemaKeyword: Record<string, boolean> = { additionalItems: true, unevaluatedItems: true, items: true, contains: true, additionalProperties: true, unevaluatedProperties: true, propertyNames: true, not: true, if: true, then: true, else: true, }; export const schemaArrayKeyword: Record<string, boolean> = { prefixItems: true, items: true, allOf: true, anyOf: true, oneOf: true, }; export const schemaMapKeyword: Record<string, boolean> = { $defs: true, definitions: true, properties: true, patternProperties: true, dependentSchemas: true, }; export const ignoredKeyword: Record<string, boolean> = { id: true, $id: true, $ref: true, $schema: true, $anchor: true, $vocabulary: true, $comment: true, default: true, enum: true, const: true, required: true, type: true, maximum: true, minimum: true, exclusiveMaximum: true, exclusiveMinimum: true, multipleOf: true, maxLength: true, minLength: true, pattern: true, format: true, maxItems: true, minItems: true, uniqueItems: true, maxProperties: true, minProperties: true, }; /** * Default base URI for schemas without an $id. * https://json-schema.org/draft/2019-09/json-schema-core.html#initial-base * https://tools.ietf.org/html/rfc3986#section-5.1 */ export let initialBaseURI = // @ts-ignore typeof self !== "undefined" && self.location && self.location.origin !== "null" ? //@ts-ignore /* #__PURE__ */ new URL( self.location.origin + self.location.pathname + location.search ) : /* #__PURE__ */ new URL("https://github.com/cfworker"); export function dereference( schema: Schema | boolean, lookup: Record<string, Schema | boolean> = Object.create(null), baseURI = initialBaseURI, basePointer = "" ) { if (schema && typeof schema === "object" && !Array.isArray(schema)) { const id: string = schema.$id || schema.id; if (id) { const url = new URL(id, baseURI.href); if (url.hash.length > 1) { lookup[url.href] = schema; } else { url.hash = ""; // normalize hash https://url.spec.whatwg.org/#dom-url-hash if (basePointer === "") { baseURI = url; } else { dereference(schema, lookup, baseURI); } } } } else if (schema !== true && schema !== false) { return lookup; } // compute the schema's URI and add it to the mapping. const schemaURI = baseURI.href + (basePointer ? "#" + basePointer : ""); if (lookup[schemaURI] !== undefined) { throw new Error(`Duplicate schema URI "${schemaURI}".`); } lookup[schemaURI] = schema; // exit early if this is a boolean schema. if (schema === true || schema === false) { return lookup; } // set the schema's absolute URI. if (schema.__absolute_uri__ === undefined) { Object.defineProperty(schema, "__absolute_uri__", { enumerable: false, value: schemaURI, }); } // if a $ref is found, resolve it's absolute URI. if (schema.$ref && schema.__absolute_ref__ === undefined) { const url = new URL(schema.$ref, baseURI.href); url.hash = url.hash; // normalize hash https://url.spec.whatwg.org/#dom-url-hash Object.defineProperty(schema, "__absolute_ref__", { enumerable: false, value: url.href, }); } // if a $recursiveRef is found, resolve it's absolute URI. if (schema.$recursiveRef && schema.__absolute_recursive_ref__ === undefined) { const url = new URL(schema.$recursiveRef, baseURI.href); url.hash = url.hash; // normalize hash https://url.spec.whatwg.org/#dom-url-hash Object.defineProperty(schema, "__absolute_recursive_ref__", { enumerable: false, value: url.href, }); } // if an $anchor is found, compute it's URI and add it to the mapping. if (schema.$anchor) { const url = new URL("#" + schema.$anchor, baseURI.href); lookup[url.href] = schema; } // process subschemas. for (let key in schema) { if (ignoredKeyword[key]) { continue; } const keyBase = `${basePointer}/${encodePointer(key)}`; const subSchema = schema[key]; if (Array.isArray(subSchema)) { if (schemaArrayKeyword[key]) { const length = subSchema.length; for (let i = 0; i < length; i++) { dereference(subSchema[i], lookup, baseURI, `${keyBase}/${i}`); } } } else if (schemaMapKeyword[key]) { for (let subKey in subSchema) { dereference( subSchema[subKey], lookup, baseURI, `${keyBase}/${encodePointer(subKey)}` ); } } else { dereference(subSchema, lookup, baseURI, keyBase); } } return lookup; } // schema identification examples // https://json-schema.org/draft/2019-09/json-schema-core.html#rfc.appendix.A // $ref delegation // https://github.com/json-schema-org/json-schema-spec/issues/514 // output format // https://json-schema.org/draft/2019-09/json-schema-core.html#output // JSON pointer // https://tools.ietf.org/html/rfc6901 // JSON relative pointer // https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01
0
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema/src/validate.ts
import { deepCompareStrict } from "./deep-compare-strict.js"; import { dereference } from "./dereference.js"; import { fastFormat } from "./format.js"; import { encodePointer } from "./pointer.js"; import { InstanceType, OutputUnit, Schema, SchemaDraft, ValidationResult, } from "./types.js"; import { ucs2length } from "./ucs2-length.js"; export type Evaluated = Record<string | number, boolean>; export function validate( instance: any, schema: Schema | boolean, draft: SchemaDraft = "2019-09", lookup = dereference(schema), shortCircuit = true, recursiveAnchor: Schema | null = null, instanceLocation = "#", schemaLocation = "#", evaluated: Evaluated = Object.create(null) ): ValidationResult { if (schema === true) { return { valid: true, errors: [] }; } if (schema === false) { return { valid: false, errors: [ { instanceLocation, keyword: "false", keywordLocation: instanceLocation, error: "False boolean schema.", }, ], }; } const rawInstanceType = typeof instance; let instanceType: Exclude<InstanceType, "integer">; switch (rawInstanceType) { case "boolean": case "number": case "string": instanceType = rawInstanceType; break; case "object": if (instance === null) { instanceType = "null"; } else if (Array.isArray(instance)) { instanceType = "array"; } else { instanceType = "object"; } break; default: // undefined, bigint, function, symbol throw new Error( `Instances of "${rawInstanceType}" type are not supported.` ); } const { $ref, $recursiveRef, $recursiveAnchor, type: $type, const: $const, enum: $enum, required: $required, not: $not, anyOf: $anyOf, allOf: $allOf, oneOf: $oneOf, if: $if, then: $then, else: $else, format: $format, properties: $properties, patternProperties: $patternProperties, additionalProperties: $additionalProperties, unevaluatedProperties: $unevaluatedProperties, minProperties: $minProperties, maxProperties: $maxProperties, propertyNames: $propertyNames, dependentRequired: $dependentRequired, dependentSchemas: $dependentSchemas, dependencies: $dependencies, prefixItems: $prefixItems, items: $items, additionalItems: $additionalItems, unevaluatedItems: $unevaluatedItems, contains: $contains, minContains: $minContains, maxContains: $maxContains, minItems: $minItems, maxItems: $maxItems, uniqueItems: $uniqueItems, minimum: $minimum, maximum: $maximum, exclusiveMinimum: $exclusiveMinimum, exclusiveMaximum: $exclusiveMaximum, multipleOf: $multipleOf, minLength: $minLength, maxLength: $maxLength, pattern: $pattern, __absolute_ref__, __absolute_recursive_ref__, } = schema; const errors: OutputUnit[] = []; if ($recursiveAnchor === true && recursiveAnchor === null) { recursiveAnchor = schema; } if ($recursiveRef === "#") { const refSchema = recursiveAnchor === null ? (lookup[__absolute_recursive_ref__!] as Schema) : recursiveAnchor; const keywordLocation = `${schemaLocation}/$recursiveRef`; const result = validate( instance, recursiveAnchor === null ? schema : recursiveAnchor, draft, lookup, shortCircuit, refSchema, instanceLocation, keywordLocation, evaluated ); if (!result.valid) { errors.push( { instanceLocation, keyword: "$recursiveRef", keywordLocation, error: "A subschema had errors.", }, ...result.errors ); } } if ($ref !== undefined) { const uri = __absolute_ref__ || $ref; const refSchema = lookup[uri]; if (refSchema === undefined) { let message = `Unresolved $ref "${$ref}".`; if (__absolute_ref__ && __absolute_ref__ !== $ref) { message += ` Absolute URI "${__absolute_ref__}".`; } message += `\nKnown schemas:\n- ${Object.keys(lookup).join("\n- ")}`; throw new Error(message); } const keywordLocation = `${schemaLocation}/$ref`; const result = validate( instance, refSchema, draft, lookup, shortCircuit, recursiveAnchor, instanceLocation, keywordLocation, evaluated ); if (!result.valid) { errors.push( { instanceLocation, keyword: "$ref", keywordLocation, error: "A subschema had errors.", }, ...result.errors ); } if (draft === "4" || draft === "7") { return { valid: errors.length === 0, errors }; } } if (Array.isArray($type)) { let length = $type.length; let valid = false; for (let i = 0; i < length; i++) { if ( instanceType === $type[i] || ($type[i] === "integer" && instanceType === "number" && instance % 1 === 0 && instance === instance) ) { valid = true; break; } } if (!valid) { errors.push({ instanceLocation, keyword: "type", keywordLocation: `${schemaLocation}/type`, error: `Instance type "${instanceType}" is invalid. Expected "${$type.join( '", "' )}".`, }); } } else if ($type === "integer") { if (instanceType !== "number" || instance % 1 || instance !== instance) { errors.push({ instanceLocation, keyword: "type", keywordLocation: `${schemaLocation}/type`, error: `Instance type "${instanceType}" is invalid. Expected "${$type}".`, }); } } else if ($type !== undefined && instanceType !== $type) { errors.push({ instanceLocation, keyword: "type", keywordLocation: `${schemaLocation}/type`, error: `Instance type "${instanceType}" is invalid. Expected "${$type}".`, }); } if ($const !== undefined) { if (instanceType === "object" || instanceType === "array") { if (!deepCompareStrict(instance, $const)) { errors.push({ instanceLocation, keyword: "const", keywordLocation: `${schemaLocation}/const`, error: `Instance does not match ${JSON.stringify($const)}.`, }); } } else if (instance !== $const) { errors.push({ instanceLocation, keyword: "const", keywordLocation: `${schemaLocation}/const`, error: `Instance does not match ${JSON.stringify($const)}.`, }); } } if ($enum !== undefined) { if (instanceType === "object" || instanceType === "array") { if (!$enum.some((value) => deepCompareStrict(instance, value))) { errors.push({ instanceLocation, keyword: "enum", keywordLocation: `${schemaLocation}/enum`, error: `Instance does not match any of ${JSON.stringify($enum)}.`, }); } } else if (!$enum.some((value) => instance === value)) { errors.push({ instanceLocation, keyword: "enum", keywordLocation: `${schemaLocation}/enum`, error: `Instance does not match any of ${JSON.stringify($enum)}.`, }); } } if ($not !== undefined) { const keywordLocation = `${schemaLocation}/not`; const result = validate( instance, $not, draft, lookup, shortCircuit, recursiveAnchor, instanceLocation, keywordLocation /*, evaluated*/ ); if (result.valid) { errors.push({ instanceLocation, keyword: "not", keywordLocation, error: 'Instance matched "not" schema.', }); } } let subEvaluateds: Array<Evaluated> = []; if ($anyOf !== undefined) { const keywordLocation = `${schemaLocation}/anyOf`; const errorsLength = errors.length; let anyValid = false; for (let i = 0; i < $anyOf.length; i++) { const subSchema = $anyOf[i]; const subEvaluated: Evaluated = Object.create(evaluated); const result = validate( instance, subSchema, draft, lookup, shortCircuit, $recursiveAnchor === true ? recursiveAnchor : null, instanceLocation, `${keywordLocation}/${i}`, subEvaluated ); errors.push(...result.errors); anyValid = anyValid || result.valid; if (result.valid) { subEvaluateds.push(subEvaluated); } } if (anyValid) { errors.length = errorsLength; } else { errors.splice(errorsLength, 0, { instanceLocation, keyword: "anyOf", keywordLocation, error: "Instance does not match any subschemas.", }); } } if ($allOf !== undefined) { const keywordLocation = `${schemaLocation}/allOf`; const errorsLength = errors.length; let allValid = true; for (let i = 0; i < $allOf.length; i++) { const subSchema = $allOf[i]; const subEvaluated: Evaluated = Object.create(evaluated); const result = validate( instance, subSchema, draft, lookup, shortCircuit, $recursiveAnchor === true ? recursiveAnchor : null, instanceLocation, `${keywordLocation}/${i}`, subEvaluated ); errors.push(...result.errors); allValid = allValid && result.valid; if (result.valid) { subEvaluateds.push(subEvaluated); } } if (allValid) { errors.length = errorsLength; } else { errors.splice(errorsLength, 0, { instanceLocation, keyword: "allOf", keywordLocation, error: `Instance does not match every subschema.`, }); } } if ($oneOf !== undefined) { const keywordLocation = `${schemaLocation}/oneOf`; const errorsLength = errors.length; const matches = $oneOf.filter((subSchema, i) => { const subEvaluated: Evaluated = Object.create(evaluated); const result = validate( instance, subSchema, draft, lookup, shortCircuit, $recursiveAnchor === true ? recursiveAnchor : null, instanceLocation, `${keywordLocation}/${i}`, subEvaluated ); errors.push(...result.errors); if (result.valid) { subEvaluateds.push(subEvaluated); } return result.valid; }).length; if (matches === 1) { errors.length = errorsLength; } else { errors.splice(errorsLength, 0, { instanceLocation, keyword: "oneOf", keywordLocation, error: `Instance does not match exactly one subschema (${matches} matches).`, }); } } if (instanceType === "object" || instanceType === "array") { Object.assign(evaluated, ...subEvaluateds); } if ($if !== undefined) { const keywordLocation = `${schemaLocation}/if`; const conditionResult = validate( instance, $if, draft, lookup, shortCircuit, recursiveAnchor, instanceLocation, keywordLocation, evaluated ).valid; if (conditionResult) { if ($then !== undefined) { const thenResult = validate( instance, $then, draft, lookup, shortCircuit, recursiveAnchor, instanceLocation, `${schemaLocation}/then`, evaluated ); if (!thenResult.valid) { errors.push( { instanceLocation, keyword: "if", keywordLocation, error: `Instance does not match "then" schema.`, }, ...thenResult.errors ); } } } else if ($else !== undefined) { const elseResult = validate( instance, $else, draft, lookup, shortCircuit, recursiveAnchor, instanceLocation, `${schemaLocation}/else`, evaluated ); if (!elseResult.valid) { errors.push( { instanceLocation, keyword: "if", keywordLocation, error: `Instance does not match "else" schema.`, }, ...elseResult.errors ); } } } if (instanceType === "object") { if ($required !== undefined) { for (const key of $required) { if (!(key in instance)) { errors.push({ instanceLocation, keyword: "required", keywordLocation: `${schemaLocation}/required`, error: `Instance does not have required property "${key}".`, }); } } } const keys = Object.keys(instance); if ($minProperties !== undefined && keys.length < $minProperties) { errors.push({ instanceLocation, keyword: "minProperties", keywordLocation: `${schemaLocation}/minProperties`, error: `Instance does not have at least ${$minProperties} properties.`, }); } if ($maxProperties !== undefined && keys.length > $maxProperties) { errors.push({ instanceLocation, keyword: "maxProperties", keywordLocation: `${schemaLocation}/maxProperties`, error: `Instance does not have at least ${$maxProperties} properties.`, }); } if ($propertyNames !== undefined) { const keywordLocation = `${schemaLocation}/propertyNames`; for (const key in instance) { const subInstancePointer = `${instanceLocation}/${encodePointer(key)}`; const result = validate( key, $propertyNames, draft, lookup, shortCircuit, recursiveAnchor, subInstancePointer, keywordLocation ); if (!result.valid) { errors.push( { instanceLocation, keyword: "propertyNames", keywordLocation, error: `Property name "${key}" does not match schema.`, }, ...result.errors ); } } } if ($dependentRequired !== undefined) { const keywordLocation = `${schemaLocation}/dependantRequired`; for (const key in $dependentRequired) { if (key in instance) { const required = $dependentRequired[key] as string[]; for (const dependantKey of required) { if (!(dependantKey in instance)) { errors.push({ instanceLocation, keyword: "dependentRequired", keywordLocation, error: `Instance has "${key}" but does not have "${dependantKey}".`, }); } } } } } if ($dependentSchemas !== undefined) { for (const key in $dependentSchemas) { const keywordLocation = `${schemaLocation}/dependentSchemas`; if (key in instance) { const result = validate( instance, $dependentSchemas[key], draft, lookup, shortCircuit, recursiveAnchor, instanceLocation, `${keywordLocation}/${encodePointer(key)}`, evaluated ); if (!result.valid) { errors.push( { instanceLocation, keyword: "dependentSchemas", keywordLocation, error: `Instance has "${key}" but does not match dependant schema.`, }, ...result.errors ); } } } } if ($dependencies !== undefined) { const keywordLocation = `${schemaLocation}/dependencies`; for (const key in $dependencies) { if (key in instance) { const propsOrSchema = $dependencies[key] as Schema | string[]; if (Array.isArray(propsOrSchema)) { for (const dependantKey of propsOrSchema) { if (!(dependantKey in instance)) { errors.push({ instanceLocation, keyword: "dependencies", keywordLocation, error: `Instance has "${key}" but does not have "${dependantKey}".`, }); } } } else { const result = validate( instance, propsOrSchema, draft, lookup, shortCircuit, recursiveAnchor, instanceLocation, `${keywordLocation}/${encodePointer(key)}` ); if (!result.valid) { errors.push( { instanceLocation, keyword: "dependencies", keywordLocation, error: `Instance has "${key}" but does not match dependant schema.`, }, ...result.errors ); } } } } } const thisEvaluated = Object.create(null); let stop = false; if ($properties !== undefined) { const keywordLocation = `${schemaLocation}/properties`; for (const key in $properties) { if (!(key in instance)) { continue; } const subInstancePointer = `${instanceLocation}/${encodePointer(key)}`; const result = validate( instance[key], $properties[key], draft, lookup, shortCircuit, recursiveAnchor, subInstancePointer, `${keywordLocation}/${encodePointer(key)}` ); if (result.valid) { evaluated[key] = thisEvaluated[key] = true; } else { stop = shortCircuit; errors.push( { instanceLocation, keyword: "properties", keywordLocation, error: `Property "${key}" does not match schema.`, }, ...result.errors ); if (stop) break; } } } if (!stop && $patternProperties !== undefined) { const keywordLocation = `${schemaLocation}/patternProperties`; for (const pattern in $patternProperties) { const regex = new RegExp(pattern); const subSchema = $patternProperties[pattern]; for (const key in instance) { if (!regex.test(key)) { continue; } const subInstancePointer = `${instanceLocation}/${encodePointer( key )}`; const result = validate( instance[key], subSchema, draft, lookup, shortCircuit, recursiveAnchor, subInstancePointer, `${keywordLocation}/${encodePointer(pattern)}` ); if (result.valid) { evaluated[key] = thisEvaluated[key] = true; } else { stop = shortCircuit; errors.push( { instanceLocation, keyword: "patternProperties", keywordLocation, error: `Property "${key}" matches pattern "${pattern}" but does not match associated schema.`, }, ...result.errors ); } } } } if (!stop && $additionalProperties !== undefined) { const keywordLocation = `${schemaLocation}/additionalProperties`; for (const key in instance) { if (thisEvaluated[key]) { continue; } const subInstancePointer = `${instanceLocation}/${encodePointer(key)}`; const result = validate( instance[key], $additionalProperties, draft, lookup, shortCircuit, recursiveAnchor, subInstancePointer, keywordLocation ); if (result.valid) { evaluated[key] = true; } else { stop = shortCircuit; errors.push( { instanceLocation, keyword: "additionalProperties", keywordLocation, error: `Property "${key}" does not match additional properties schema.`, }, ...result.errors ); } } } else if (!stop && $unevaluatedProperties !== undefined) { const keywordLocation = `${schemaLocation}/unevaluatedProperties`; for (const key in instance) { if (!evaluated[key]) { const subInstancePointer = `${instanceLocation}/${encodePointer( key )}`; const result = validate( instance[key], $unevaluatedProperties, draft, lookup, shortCircuit, recursiveAnchor, subInstancePointer, keywordLocation ); if (result.valid) { evaluated[key] = true; } else { errors.push( { instanceLocation, keyword: "unevaluatedProperties", keywordLocation, error: `Property "${key}" does not match unevaluated properties schema.`, }, ...result.errors ); } } } } } else if (instanceType === "array") { if ($maxItems !== undefined && instance.length > $maxItems) { errors.push({ instanceLocation, keyword: "maxItems", keywordLocation: `${schemaLocation}/maxItems`, error: `Array has too many items (${instance.length} > ${$maxItems}).`, }); } if ($minItems !== undefined && instance.length < $minItems) { errors.push({ instanceLocation, keyword: "minItems", keywordLocation: `${schemaLocation}/minItems`, error: `Array has too few items (${instance.length} < ${$minItems}).`, }); } const length: number = instance.length; let i = 0; let stop = false; if ($prefixItems !== undefined) { const keywordLocation = `${schemaLocation}/prefixItems`; const length2 = Math.min($prefixItems.length, length); for (; i < length2; i++) { const result = validate( instance[i], $prefixItems[i], draft, lookup, shortCircuit, recursiveAnchor, `${instanceLocation}/${i}`, `${keywordLocation}/${i}` ); evaluated[i] = true; if (!result.valid) { stop = shortCircuit; errors.push( { instanceLocation, keyword: "prefixItems", keywordLocation, error: `Items did not match schema.`, }, ...result.errors ); if (stop) break; } } } if ($items !== undefined) { const keywordLocation = `${schemaLocation}/items`; if (Array.isArray($items)) { const length2 = Math.min($items.length, length); for (; i < length2; i++) { const result = validate( instance[i], $items[i], draft, lookup, shortCircuit, recursiveAnchor, `${instanceLocation}/${i}`, `${keywordLocation}/${i}` ); evaluated[i] = true; if (!result.valid) { stop = shortCircuit; errors.push( { instanceLocation, keyword: "items", keywordLocation, error: `Items did not match schema.`, }, ...result.errors ); if (stop) break; } } } else { for (; i < length; i++) { const result = validate( instance[i], $items, draft, lookup, shortCircuit, recursiveAnchor, `${instanceLocation}/${i}`, keywordLocation ); evaluated[i] = true; if (!result.valid) { stop = shortCircuit; errors.push( { instanceLocation, keyword: "items", keywordLocation, error: `Items did not match schema.`, }, ...result.errors ); if (stop) break; } } } if (!stop && $additionalItems !== undefined) { const keywordLocation = `${schemaLocation}/additionalItems`; for (; i < length; i++) { const result = validate( instance[i], $additionalItems, draft, lookup, shortCircuit, recursiveAnchor, `${instanceLocation}/${i}`, keywordLocation ); evaluated[i] = true; if (!result.valid) { stop = shortCircuit; errors.push( { instanceLocation, keyword: "additionalItems", keywordLocation, error: `Items did not match additional items schema.`, }, ...result.errors ); } } } } if ($contains !== undefined) { if (length === 0 && $minContains === undefined) { errors.push({ instanceLocation, keyword: "contains", keywordLocation: `${schemaLocation}/contains`, error: `Array is empty. It must contain at least one item matching the schema.`, }); } else if ($minContains !== undefined && length < $minContains) { errors.push({ instanceLocation, keyword: "minContains", keywordLocation: `${schemaLocation}/minContains`, error: `Array has less items (${length}) than minContains (${$minContains}).`, }); } else { const keywordLocation = `${schemaLocation}/contains`; const errorsLength = errors.length; let contained = 0; for (let j = 0; j < length; j++) { const result = validate( instance[j], $contains, draft, lookup, shortCircuit, recursiveAnchor, `${instanceLocation}/${j}`, keywordLocation ); if (result.valid) { evaluated[j] = true; contained++; } else { errors.push(...result.errors); } } if (contained >= ($minContains || 0)) { errors.length = errorsLength; } if ( $minContains === undefined && $maxContains === undefined && contained === 0 ) { errors.splice(errorsLength, 0, { instanceLocation, keyword: "contains", keywordLocation, error: `Array does not contain item matching schema.`, }); } else if ($minContains !== undefined && contained < $minContains) { errors.push({ instanceLocation, keyword: "minContains", keywordLocation: `${schemaLocation}/minContains`, error: `Array must contain at least ${$minContains} items matching schema. Only ${contained} items were found.`, }); } else if ($maxContains !== undefined && contained > $maxContains) { errors.push({ instanceLocation, keyword: "maxContains", keywordLocation: `${schemaLocation}/maxContains`, error: `Array may contain at most ${$maxContains} items matching schema. ${contained} items were found.`, }); } } } if (!stop && $unevaluatedItems !== undefined) { const keywordLocation = `${schemaLocation}/unevaluatedItems`; for (i; i < length; i++) { if (evaluated[i]) { continue; } const result = validate( instance[i], $unevaluatedItems, draft, lookup, shortCircuit, recursiveAnchor, `${instanceLocation}/${i}`, keywordLocation ); evaluated[i] = true; if (!result.valid) { errors.push( { instanceLocation, keyword: "unevaluatedItems", keywordLocation, error: `Items did not match unevaluated items schema.`, }, ...result.errors ); } } } if ($uniqueItems) { for (let j = 0; j < length; j++) { const a = instance[j]; const ao = typeof a === "object" && a !== null; for (let k = 0; k < length; k++) { if (j === k) { continue; } const b = instance[k]; const bo = typeof b === "object" && b !== null; if (a === b || (ao && bo && deepCompareStrict(a, b))) { errors.push({ instanceLocation, keyword: "uniqueItems", keywordLocation: `${schemaLocation}/uniqueItems`, error: `Duplicate items at indexes ${j} and ${k}.`, }); j = Number.MAX_SAFE_INTEGER; k = Number.MAX_SAFE_INTEGER; } } } } } else if (instanceType === "number") { if (draft === "4") { if ( $minimum !== undefined && (($exclusiveMinimum === true && instance <= $minimum) || instance < $minimum) ) { errors.push({ instanceLocation, keyword: "minimum", keywordLocation: `${schemaLocation}/minimum`, error: `${instance} is less than ${ $exclusiveMinimum ? "or equal to " : "" } ${$minimum}.`, }); } if ( $maximum !== undefined && (($exclusiveMaximum === true && instance >= $maximum) || instance > $maximum) ) { errors.push({ instanceLocation, keyword: "maximum", keywordLocation: `${schemaLocation}/maximum`, error: `${instance} is greater than ${ $exclusiveMaximum ? "or equal to " : "" } ${$maximum}.`, }); } } else { if ($minimum !== undefined && instance < $minimum) { errors.push({ instanceLocation, keyword: "minimum", keywordLocation: `${schemaLocation}/minimum`, error: `${instance} is less than ${$minimum}.`, }); } if ($maximum !== undefined && instance > $maximum) { errors.push({ instanceLocation, keyword: "maximum", keywordLocation: `${schemaLocation}/maximum`, error: `${instance} is greater than ${$maximum}.`, }); } if ($exclusiveMinimum !== undefined && instance <= $exclusiveMinimum) { errors.push({ instanceLocation, keyword: "exclusiveMinimum", keywordLocation: `${schemaLocation}/exclusiveMinimum`, error: `${instance} is less than ${$exclusiveMinimum}.`, }); } if ($exclusiveMaximum !== undefined && instance >= $exclusiveMaximum) { errors.push({ instanceLocation, keyword: "exclusiveMaximum", keywordLocation: `${schemaLocation}/exclusiveMaximum`, error: `${instance} is greater than or equal to ${$exclusiveMaximum}.`, }); } } if ($multipleOf !== undefined) { const remainder = instance % $multipleOf; if ( Math.abs(0 - remainder) >= 1.1920929e-7 && Math.abs($multipleOf - remainder) >= 1.1920929e-7 ) { errors.push({ instanceLocation, keyword: "multipleOf", keywordLocation: `${schemaLocation}/multipleOf`, error: `${instance} is not a multiple of ${$multipleOf}.`, }); } } } else if (instanceType === "string") { const length = $minLength === undefined && $maxLength === undefined ? 0 : ucs2length(instance); if ($minLength !== undefined && length < $minLength) { errors.push({ instanceLocation, keyword: "minLength", keywordLocation: `${schemaLocation}/minLength`, error: `String is too short (${length} < ${$minLength}).`, }); } if ($maxLength !== undefined && length > $maxLength) { errors.push({ instanceLocation, keyword: "maxLength", keywordLocation: `${schemaLocation}/maxLength`, error: `String is too long (${length} > ${$maxLength}).`, }); } if ($pattern !== undefined && !new RegExp($pattern).test(instance)) { errors.push({ instanceLocation, keyword: "pattern", keywordLocation: `${schemaLocation}/pattern`, error: `String does not match pattern.`, }); } if ( $format !== undefined && fastFormat[$format] && !fastFormat[$format](instance) ) { errors.push({ instanceLocation, keyword: "format", keywordLocation: `${schemaLocation}/format`, error: `String does not match format "${$format}".`, }); } } return { valid: errors.length === 0, errors }; }
0
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema/src/deep-compare-strict.ts
export function deepCompareStrict(a: any, b: any): boolean { const typeofa = typeof a; if (typeofa !== typeof b) { return false; } if (Array.isArray(a)) { if (!Array.isArray(b)) { return false; } const length = a.length; if (length !== b.length) { return false; } for (let i = 0; i < length; i++) { if (!deepCompareStrict(a[i], b[i])) { return false; } } return true; } if (typeofa === "object") { if (!a || !b) { return a === b; } const aKeys = Object.keys(a); const bKeys = Object.keys(b); const length = aKeys.length; if (length !== bKeys.length) { return false; } for (const k of aKeys) { if (!deepCompareStrict(a[k], b[k])) { return false; } } return true; } return a === b; }
0
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema/src/format.ts
// based on https://github.com/epoberezkin/ajv/blob/master/lib/compile/formats.js const DATE = /^(\d\d\d\d)-(\d\d)-(\d\d)$/; const DAYS = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]; const TIME = /^(\d\d):(\d\d):(\d\d)(\.\d+)?(z|[+-]\d\d(?::?\d\d)?)?$/i; const HOSTNAME = /^(?=.{1,253}\.?$)[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?(?:\.[a-z0-9](?:[-0-9a-z]{0,61}[0-9a-z])?)*\.?$/i; // const URI = /^(?:[a-z][a-z0-9+\-.]*:)(?:\/?\/(?:(?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9a-f]{2})*@)?(?:\[(?:(?:(?:(?:[0-9a-f]{1,4}:){6}|::(?:[0-9a-f]{1,4}:){5}|(?:[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){4}|(?:(?:[0-9a-f]{1,4}:){0,1}[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){3}|(?:(?:[0-9a-f]{1,4}:){0,2}[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){2}|(?:(?:[0-9a-f]{1,4}:){0,3}[0-9a-f]{1,4})?::[0-9a-f]{1,4}:|(?:(?:[0-9a-f]{1,4}:){0,4}[0-9a-f]{1,4})?::)(?:[0-9a-f]{1,4}:[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?))|(?:(?:[0-9a-f]{1,4}:){0,5}[0-9a-f]{1,4})?::[0-9a-f]{1,4}|(?:(?:[0-9a-f]{1,4}:){0,6}[0-9a-f]{1,4})?::)|[Vv][0-9a-f]+\.[a-z0-9\-._~!$&'()*+,;=:]+)\]|(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)|(?:[a-z0-9\-._~!$&'()*+,;=]|%[0-9a-f]{2})*)(?::\d*)?(?:\/(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})*)*|\/(?:(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})+(?:\/(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})*)*)?|(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})+(?:\/(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})*)*)(?:\?(?:[a-z0-9\-._~!$&'()*+,;=:@/?]|%[0-9a-f]{2})*)?(?:#(?:[a-z0-9\-._~!$&'()*+,;=:@/?]|%[0-9a-f]{2})*)?$/i; const URIREF = /^(?:[a-z][a-z0-9+\-.]*:)?(?:\/?\/(?:(?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9a-f]{2})*@)?(?:\[(?:(?:(?:(?:[0-9a-f]{1,4}:){6}|::(?:[0-9a-f]{1,4}:){5}|(?:[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){4}|(?:(?:[0-9a-f]{1,4}:){0,1}[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){3}|(?:(?:[0-9a-f]{1,4}:){0,2}[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){2}|(?:(?:[0-9a-f]{1,4}:){0,3}[0-9a-f]{1,4})?::[0-9a-f]{1,4}:|(?:(?:[0-9a-f]{1,4}:){0,4}[0-9a-f]{1,4})?::)(?:[0-9a-f]{1,4}:[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?))|(?:(?:[0-9a-f]{1,4}:){0,5}[0-9a-f]{1,4})?::[0-9a-f]{1,4}|(?:(?:[0-9a-f]{1,4}:){0,6}[0-9a-f]{1,4})?::)|[Vv][0-9a-f]+\.[a-z0-9\-._~!$&'()*+,;=:]+)\]|(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)|(?:[a-z0-9\-._~!$&'"()*+,;=]|%[0-9a-f]{2})*)(?::\d*)?(?:\/(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})*)*|\/(?:(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})+(?:\/(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})*)*)?|(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})+(?:\/(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})*)*)?(?:\?(?:[a-z0-9\-._~!$&'"()*+,;=:@/?]|%[0-9a-f]{2})*)?(?:#(?:[a-z0-9\-._~!$&'"()*+,;=:@/?]|%[0-9a-f]{2})*)?$/i; // uri-template: https://tools.ietf.org/html/rfc6570 const URITEMPLATE = /^(?:(?:[^\x00-\x20"'<>%\\^`{|}]|%[0-9a-f]{2})|\{[+#./;?&=,!@|]?(?:[a-z0-9_]|%[0-9a-f]{2})+(?::[1-9][0-9]{0,3}|\*)?(?:,(?:[a-z0-9_]|%[0-9a-f]{2})+(?::[1-9][0-9]{0,3}|\*)?)*\})*$/i; // For the source: https://gist.github.com/dperini/729294 // For test cases: https://mathiasbynens.be/demo/url-regex const URL_ = /^(?:(?:https?|ftp):\/\/)(?:\S+(?::\S*)?@)?(?:(?!10(?:\.\d{1,3}){3})(?!127(?:\.\d{1,3}){3})(?!169\.254(?:\.\d{1,3}){2})(?!192\.168(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z\u{00a1}-\u{ffff}0-9]+-?)*[a-z\u{00a1}-\u{ffff}0-9]+)(?:\.(?:[a-z\u{00a1}-\u{ffff}0-9]+-?)*[a-z\u{00a1}-\u{ffff}0-9]+)*(?:\.(?:[a-z\u{00a1}-\u{ffff}]{2,})))(?::\d{2,5})?(?:\/[^\s]*)?$/iu; const UUID = /^(?:urn:uuid:)?[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}$/i; const JSON_POINTER = /^(?:\/(?:[^~/]|~0|~1)*)*$/; const JSON_POINTER_URI_FRAGMENT = /^#(?:\/(?:[a-z0-9_\-.!$&'()*+,;:=@]|%[0-9a-f]{2}|~0|~1)*)*$/i; const RELATIVE_JSON_POINTER = /^(?:0|[1-9][0-9]*)(?:#|(?:\/(?:[^~/]|~0|~1)*)*)$/; // date: http://tools.ietf.org/html/rfc3339#section-5.6 const FASTDATE = /^\d\d\d\d-[0-1]\d-[0-3]\d$/; // date-time: http://tools.ietf.org/html/rfc3339#section-5.6 const FASTTIME = /^(?:[0-2]\d:[0-5]\d:[0-5]\d|23:59:60)(?:\.\d+)?(?:z|[+-]\d\d(?::?\d\d)?)?$/i; const FASTDATETIME = /^\d\d\d\d-[0-1]\d-[0-3]\d[t\s](?:[0-2]\d:[0-5]\d:[0-5]\d|23:59:60)(?:\.\d+)?(?:z|[+-]\d\d(?::?\d\d)?)$/i; // uri: https://github.com/mafintosh/is-my-json-valid/blob/master/formats.js // const FASTURI = /^(?:[a-z][a-z0-9+-.]*:)(?:\/?\/)?[^\s]*$/i; const FASTURIREFERENCE = /^(?:(?:[a-z][a-z0-9+-.]*:)?\/?\/)?(?:[^\\\s#][^\s#]*)?(?:#[^\\\s]*)?$/i; // https://github.com/ExodusMovement/schemasafe/blob/master/src/formats.js const EMAIL = (input: string) => { if (input[0] === '"') return false; const [name, host, ...rest] = input.split("@"); if ( !name || !host || rest.length !== 0 || name.length > 64 || host.length > 253 ) return false; if (name[0] === "." || name.endsWith(".") || name.includes("..")) return false; if ( !/^[a-z0-9.-]+$/i.test(host) || !/^[a-z0-9.!#$%&'*+/=?^_`{|}~-]+$/i.test(name) ) return false; return host .split(".") .every((part) => /^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$/i.test(part)); }; // optimized https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9780596802837/ch07s16.html const IPV4 = /^(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)$/; // optimized http://stackoverflow.com/questions/53497/regular-expression-that-matches-valid-ipv6-addresses const IPV6 = /^((([0-9a-f]{1,4}:){7}([0-9a-f]{1,4}|:))|(([0-9a-f]{1,4}:){6}(:[0-9a-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9a-f]{1,4}:){5}(((:[0-9a-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9a-f]{1,4}:){4}(((:[0-9a-f]{1,4}){1,3})|((:[0-9a-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9a-f]{1,4}:){3}(((:[0-9a-f]{1,4}){1,4})|((:[0-9a-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9a-f]{1,4}:){2}(((:[0-9a-f]{1,4}){1,5})|((:[0-9a-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9a-f]{1,4}:){1}(((:[0-9a-f]{1,4}){1,6})|((:[0-9a-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9a-f]{1,4}){1,7})|((:[0-9a-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))$/i; // https://github.com/ExodusMovement/schemasafe/blob/master/src/formats.js const DURATION = (input: string) => input.length > 1 && input.length < 80 && (/^P\d+([.,]\d+)?W$/.test(input) || (/^P[\dYMDTHS]*(\d[.,]\d+)?[YMDHS]$/.test(input) && /^P([.,\d]+Y)?([.,\d]+M)?([.,\d]+D)?(T([.,\d]+H)?([.,\d]+M)?([.,\d]+S)?)?$/.test( input ))); function bind(r: RegExp) { return r.test.bind(r); } export const fullFormat: Record<string, (s: string) => boolean> = { date, time: /* #__PURE__ */ time.bind(undefined, false), "date-time": date_time, duration: DURATION, uri, "uri-reference": /* #__PURE__ */ bind(URIREF), "uri-template": /* #__PURE__ */ bind(URITEMPLATE), url: /* #__PURE__ */ bind(URL_), email: EMAIL, hostname: /* #__PURE__ */ bind(HOSTNAME), ipv4: /* #__PURE__ */ bind(IPV4), ipv6: /* #__PURE__ */ bind(IPV6), regex: regex, uuid: /* #__PURE__ */ bind(UUID), "json-pointer": /* #__PURE__ */ bind(JSON_POINTER), "json-pointer-uri-fragment": /* #__PURE__ */ bind(JSON_POINTER_URI_FRAGMENT), "relative-json-pointer": /* #__PURE__ */ bind(RELATIVE_JSON_POINTER), }; export const fastFormat: Record<string, (s: string) => boolean> = { ...fullFormat, date: /* #__PURE__ */ bind(FASTDATE), time: /* #__PURE__ */ bind(FASTTIME), "date-time": /* #__PURE__ */ bind(FASTDATETIME), "uri-reference": /* #__PURE__ */ bind(FASTURIREFERENCE), }; function isLeapYear(year: number) { // https://tools.ietf.org/html/rfc3339#appendix-C return year % 4 === 0 && (year % 100 !== 0 || year % 400 === 0); } function date(str: string) { // full-date from http://tools.ietf.org/html/rfc3339#section-5.6 const matches = str.match(DATE); if (!matches) return false; const year = +matches[1]; const month = +matches[2]; const day = +matches[3]; return ( month >= 1 && month <= 12 && day >= 1 && day <= (month == 2 && isLeapYear(year) ? 29 : DAYS[month]) ); } function time(full: boolean, str: string) { const matches = str.match(TIME); if (!matches) return false; const hour = +matches[1]; const minute = +matches[2]; const second = +matches[3]; const timeZone = !!matches[5]; return ( ((hour <= 23 && minute <= 59 && second <= 59) || (hour == 23 && minute == 59 && second == 60)) && (!full || timeZone) ); } const DATE_TIME_SEPARATOR = /t|\s/i; function date_time(str: string) { // http://tools.ietf.org/html/rfc3339#section-5.6 const dateTime = str.split(DATE_TIME_SEPARATOR); return dateTime.length == 2 && date(dateTime[0]) && time(true, dateTime[1]); } const NOT_URI_FRAGMENT = /\/|:/; const URI_PATTERN = /^(?:[a-z][a-z0-9+\-.]*:)(?:\/?\/(?:(?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9a-f]{2})*@)?(?:\[(?:(?:(?:(?:[0-9a-f]{1,4}:){6}|::(?:[0-9a-f]{1,4}:){5}|(?:[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){4}|(?:(?:[0-9a-f]{1,4}:){0,1}[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){3}|(?:(?:[0-9a-f]{1,4}:){0,2}[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){2}|(?:(?:[0-9a-f]{1,4}:){0,3}[0-9a-f]{1,4})?::[0-9a-f]{1,4}:|(?:(?:[0-9a-f]{1,4}:){0,4}[0-9a-f]{1,4})?::)(?:[0-9a-f]{1,4}:[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?))|(?:(?:[0-9a-f]{1,4}:){0,5}[0-9a-f]{1,4})?::[0-9a-f]{1,4}|(?:(?:[0-9a-f]{1,4}:){0,6}[0-9a-f]{1,4})?::)|[Vv][0-9a-f]+\.[a-z0-9\-._~!$&'()*+,;=:]+)\]|(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)|(?:[a-z0-9\-._~!$&'()*+,;=]|%[0-9a-f]{2})*)(?::\d*)?(?:\/(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})*)*|\/(?:(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})+(?:\/(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})*)*)?|(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})+(?:\/(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})*)*)(?:\?(?:[a-z0-9\-._~!$&'()*+,;=:@/?]|%[0-9a-f]{2})*)?(?:#(?:[a-z0-9\-._~!$&'()*+,;=:@/?]|%[0-9a-f]{2})*)?$/i; function uri(str: string): boolean { // http://jmrware.com/articles/2009/uri_regexp/URI_regex.html + optional protocol + required "." return NOT_URI_FRAGMENT.test(str) && URI_PATTERN.test(str); } const Z_ANCHOR = /[^\\]\\Z/; function regex(str: string) { if (Z_ANCHOR.test(str)) return false; try { new RegExp(str); return true; } catch (e) { return false; } }
0
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema/src/pointer.ts
export function encodePointer(p: string): string { return encodeURI(escapePointer(p)); } export function escapePointer(p: string): string { return p.replace(/~/g, "~0").replace(/\//g, "~1"); }
0
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema/src/index.ts
export * from "./deep-compare-strict.js"; export * from "./dereference.js"; export * from "./format.js"; export * from "./pointer.js"; export * from "./types.js"; export * from "./ucs2-length.js"; export * from "./validate.js"; export * from "./validator.js";
0
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema
lc_public_repos/langchainjs/langchain-core/src/utils/@cfworker/json-schema/src/validator.ts
import { dereference } from "./dereference.js"; import { Schema, SchemaDraft } from "./types.js"; import { validate } from "./validate.js"; export class Validator { private readonly lookup: ReturnType<typeof dereference>; constructor( private readonly schema: Schema | boolean, private readonly draft: SchemaDraft = "2019-09", private readonly shortCircuit = true ) { this.lookup = dereference(schema); } public validate(instance: any) { return validate( instance, this.schema, this.draft, this.lookup, this.shortCircuit ); } public addSchema(schema: Schema, id?: string) { if (id) { schema = { ...schema, $id: id }; } dereference(schema, this.lookup); } }
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/fast-json-patch/LICENSE.md
(The MIT License) Copyright (c) 2013, 2014, 2020 Joachim Wester Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/fast-json-patch/index.ts
export * from "./src/core.js"; export * from "./src/duplex.js"; export { PatchError as JsonPatchError, _deepClone as deepClone, escapePathComponent, unescapePathComponent, } from "./src/helpers.js"; /** * Default export for backwards compat */ import * as core from "./src/core.js"; import { PatchError as JsonPatchError, _deepClone as deepClone, escapePathComponent, unescapePathComponent, } from "./src/helpers.js"; export default { ...core, // ...duplex, JsonPatchError, deepClone, escapePathComponent, unescapePathComponent, };
0
lc_public_repos/langchainjs/langchain-core/src/utils/fast-json-patch
lc_public_repos/langchainjs/langchain-core/src/utils/fast-json-patch/src/helpers.ts
// @ts-nocheck // Inlined because of ESM import issues /*! * https://github.com/Starcounter-Jack/JSON-Patch * (c) 2017-2022 Joachim Wester * MIT licensed */ const _hasOwnProperty = Object.prototype.hasOwnProperty; export function hasOwnProperty(obj, key) { return _hasOwnProperty.call(obj, key); } export function _objectKeys(obj) { if (Array.isArray(obj)) { const keys = new Array(obj.length); for (let k = 0; k < keys.length; k++) { keys[k] = "" + k; } return keys; } if (Object.keys) { return Object.keys(obj); } let keys = []; for (let i in obj) { if (hasOwnProperty(obj, i)) { keys.push(i); } } return keys; } /** * Deeply clone the object. * https://jsperf.com/deep-copy-vs-json-stringify-json-parse/25 (recursiveDeepCopy) * @param {any} obj value to clone * @return {any} cloned obj */ export function _deepClone(obj) { switch (typeof obj) { case "object": return JSON.parse(JSON.stringify(obj)); //Faster than ES5 clone - http://jsperf.com/deep-cloning-of-objects/5 case "undefined": return null; //this is how JSON.stringify behaves for array items default: return obj; //no need to clone primitives } } //3x faster than cached /^\d+$/.test(str) export function isInteger(str: string): boolean { let i = 0; const len = str.length; let charCode; while (i < len) { charCode = str.charCodeAt(i); if (charCode >= 48 && charCode <= 57) { i++; continue; } return false; } return true; } /** * Escapes a json pointer path * @param path The raw pointer * @return the Escaped path */ export function escapePathComponent(path: string): string { if (path.indexOf("/") === -1 && path.indexOf("~") === -1) return path; return path.replace(/~/g, "~0").replace(/\//g, "~1"); } /** * Unescapes a json pointer path * @param path The escaped pointer * @return The unescaped path */ export function unescapePathComponent(path: string): string { return path.replace(/~1/g, "/").replace(/~0/g, "~"); } export function _getPathRecursive(root: Object, obj: Object): string { let found; for (let key in root) { if (hasOwnProperty(root, key)) { if (root[key] === obj) { return escapePathComponent(key) + "/"; } else if (typeof root[key] === "object") { found = _getPathRecursive(root[key], obj); if (found != "") { return escapePathComponent(key) + "/" + found; } } } } return ""; } export function getPath(root: Object, obj: Object): string { if (root === obj) { return "/"; } const path = _getPathRecursive(root, obj); if (path === "") { throw new Error("Object not found in root"); } return `/${path}`; } /** * Recursively checks whether an object has any undefined values inside. */ export function hasUndefined(obj: any): boolean { if (obj === undefined) { return true; } if (obj) { if (Array.isArray(obj)) { for (let i = 0, len = obj.length; i < len; i++) { if (hasUndefined(obj[i])) { return true; } } } else if (typeof obj === "object") { const objKeys = _objectKeys(obj); const objKeysLength = objKeys.length; for (var i = 0; i < objKeysLength; i++) { if (hasUndefined(obj[objKeys[i]])) { return true; } } } } return false; } export type JsonPatchErrorName = | "SEQUENCE_NOT_AN_ARRAY" | "OPERATION_NOT_AN_OBJECT" | "OPERATION_OP_INVALID" | "OPERATION_PATH_INVALID" | "OPERATION_FROM_REQUIRED" | "OPERATION_VALUE_REQUIRED" | "OPERATION_VALUE_CANNOT_CONTAIN_UNDEFINED" | "OPERATION_PATH_CANNOT_ADD" | "OPERATION_PATH_UNRESOLVABLE" | "OPERATION_FROM_UNRESOLVABLE" | "OPERATION_PATH_ILLEGAL_ARRAY_INDEX" | "OPERATION_VALUE_OUT_OF_BOUNDS" | "TEST_OPERATION_FAILED"; function patchErrorMessageFormatter(message: String, args: Object): string { const messageParts = [message]; for (const key in args) { const value = typeof args[key] === "object" ? JSON.stringify(args[key], null, 2) : args[key]; // pretty print if (typeof value !== "undefined") { messageParts.push(`${key}: ${value}`); } } return messageParts.join("\n"); } export class PatchError extends Error { constructor( message: string, public name: JsonPatchErrorName, public index?: number, public operation?: any, public tree?: any ) { super( patchErrorMessageFormatter(message, { name, index, operation, tree }) ); Object.setPrototypeOf(this, new.target.prototype); // restore prototype chain, see https://stackoverflow.com/a/48342359 this.message = patchErrorMessageFormatter(message, { name, index, operation, tree, }); } }
0
lc_public_repos/langchainjs/langchain-core/src/utils/fast-json-patch
lc_public_repos/langchainjs/langchain-core/src/utils/fast-json-patch/src/duplex.ts
// @ts-nocheck // Inlined because of ESM import issues /*! * https://github.com/Starcounter-Jack/JSON-Patch * (c) 2013-2021 Joachim Wester * MIT license */ import { _deepClone, _objectKeys, escapePathComponent, hasOwnProperty, } from "./helpers.js"; import { applyPatch, Operation } from "./core.js"; export interface Observer<T> { object: T; patches: Operation[]; unobserve: () => void; callback: (patches: Operation[]) => void; } var beforeDict = new WeakMap(); class Mirror { obj: any; observers: Map<Function, ObserverInfo> = new Map(); value: Object | Array<any>; constructor(obj: Object) { this.obj = obj; } } class ObserverInfo { callback: Function; observer: ObserverInfo; constructor(callback: Function, observer: ObserverInfo) { this.callback = callback; this.observer = observer; } } function getMirror(obj: Object): Mirror { return beforeDict.get(obj); } function getObserverFromMirror(mirror: Mirror, callback): ObserverInfo { return mirror.observers.get(callback); } function removeObserverFromMirror(mirror: Mirror, observer): void { mirror.observers.delete(observer.callback); } /** * Detach an observer from an object */ export function unobserve<T>(root: T, observer: Observer<T>) { observer.unobserve(); } /** * Observes changes made to an object, which can then be retrieved using generate */ export function observe<T>( obj: Object | Array<T>, callback?: (patches: Operation[]) => void ): Observer<T> { var patches = []; var observer; var mirror = getMirror(obj); if (!mirror) { mirror = new Mirror(obj); beforeDict.set(obj, mirror); } else { const observerInfo = getObserverFromMirror(mirror, callback); observer = observerInfo && observerInfo.observer; } if (observer) { return observer; } observer = {}; mirror.value = _deepClone(obj); if (callback) { observer.callback = callback; observer.next = null; var dirtyCheck = () => { generate(observer); }; var fastCheck = () => { clearTimeout(observer.next); observer.next = setTimeout(dirtyCheck); }; if (typeof window !== "undefined") { //not Node window.addEventListener("mouseup", fastCheck); window.addEventListener("keyup", fastCheck); window.addEventListener("mousedown", fastCheck); window.addEventListener("keydown", fastCheck); window.addEventListener("change", fastCheck); } } observer.patches = patches; observer.object = obj; observer.unobserve = () => { generate(observer); clearTimeout(observer.next); removeObserverFromMirror(mirror, observer); if (typeof window !== "undefined") { window.removeEventListener("mouseup", fastCheck); window.removeEventListener("keyup", fastCheck); window.removeEventListener("mousedown", fastCheck); window.removeEventListener("keydown", fastCheck); window.removeEventListener("change", fastCheck); } }; mirror.observers.set(callback, new ObserverInfo(callback, observer)); return observer; } /** * Generate an array of patches from an observer */ export function generate<T>( observer: Observer<Object>, invertible = false ): Operation[] { var mirror = beforeDict.get(observer.object); _generate(mirror.value, observer.object, observer.patches, "", invertible); if (observer.patches.length) { applyPatch(mirror.value, observer.patches); } var temp = observer.patches; if (temp.length > 0) { observer.patches = []; if (observer.callback) { observer.callback(temp); } } return temp; } // Dirty check if obj is different from mirror, generate patches and update mirror function _generate(mirror, obj, patches, path, invertible) { if (obj === mirror) { return; } if (typeof obj.toJSON === "function") { obj = obj.toJSON(); } var newKeys = _objectKeys(obj); var oldKeys = _objectKeys(mirror); var changed = false; var deleted = false; //if ever "move" operation is implemented here, make sure this test runs OK: "should not generate the same patch twice (move)" for (var t = oldKeys.length - 1; t >= 0; t--) { var key = oldKeys[t]; var oldVal = mirror[key]; if ( hasOwnProperty(obj, key) && !( obj[key] === undefined && oldVal !== undefined && Array.isArray(obj) === false ) ) { var newVal = obj[key]; if ( typeof oldVal == "object" && oldVal != null && typeof newVal == "object" && newVal != null && Array.isArray(oldVal) === Array.isArray(newVal) ) { _generate( oldVal, newVal, patches, path + "/" + escapePathComponent(key), invertible ); } else { if (oldVal !== newVal) { changed = true; if (invertible) { patches.push({ op: "test", path: path + "/" + escapePathComponent(key), value: _deepClone(oldVal), }); } patches.push({ op: "replace", path: path + "/" + escapePathComponent(key), value: _deepClone(newVal), }); } } } else if (Array.isArray(mirror) === Array.isArray(obj)) { if (invertible) { patches.push({ op: "test", path: path + "/" + escapePathComponent(key), value: _deepClone(oldVal), }); } patches.push({ op: "remove", path: path + "/" + escapePathComponent(key), }); deleted = true; // property has been deleted } else { if (invertible) { patches.push({ op: "test", path, value: mirror }); } patches.push({ op: "replace", path, value: obj }); changed = true; } } if (!deleted && newKeys.length == oldKeys.length) { return; } for (var t = 0; t < newKeys.length; t++) { var key = newKeys[t]; if (!hasOwnProperty(mirror, key) && obj[key] !== undefined) { patches.push({ op: "add", path: path + "/" + escapePathComponent(key), value: _deepClone(obj[key]), }); } } } /** * Create an array of patches from the differences in two objects */ export function compare( tree1: Object | Array<any>, tree2: Object | Array<any>, invertible = false ): Operation[] { var patches = []; _generate(tree1, tree2, patches, "", invertible); return patches; }
0
lc_public_repos/langchainjs/langchain-core/src/utils/fast-json-patch
lc_public_repos/langchainjs/langchain-core/src/utils/fast-json-patch/src/core.ts
// @ts-nocheck // Inlined because of ESM import issues /*! * https://github.com/Starcounter-Jack/JSON-Patch * (c) 2013-2021 Joachim Wester * MIT license */ declare var require: any; import { PatchError, _deepClone, isInteger, unescapePathComponent, hasUndefined, } from "./helpers.js"; export const JsonPatchError = PatchError; export const deepClone = _deepClone; export type Operation = | AddOperation<any> | RemoveOperation | ReplaceOperation<any> | MoveOperation | CopyOperation | TestOperation<any> | GetOperation<any>; export interface Validator<T> { ( operation: Operation, index: number, document: T, existingPathFragment: string ): void; } export interface OperationResult<T> { removed?: any; test?: boolean; newDocument: T; } export interface BaseOperation { path: string; } export interface AddOperation<T> extends BaseOperation { op: "add"; value: T; } export interface RemoveOperation extends BaseOperation { op: "remove"; } export interface ReplaceOperation<T> extends BaseOperation { op: "replace"; value: T; } export interface MoveOperation extends BaseOperation { op: "move"; from: string; } export interface CopyOperation extends BaseOperation { op: "copy"; from: string; } export interface TestOperation<T> extends BaseOperation { op: "test"; value: T; } export interface GetOperation<T> extends BaseOperation { op: "_get"; value: T; } export interface PatchResult<T> extends Array<OperationResult<T>> { newDocument: T; } /* We use a Javascript hash to store each function. Each hash entry (property) uses the operation identifiers specified in rfc6902. In this way, we can map each patch operation to its dedicated function in efficient way. */ /* The operations applicable to an object */ const objOps = { add: function (obj, key, document) { obj[key] = this.value; return { newDocument: document }; }, remove: function (obj, key, document) { var removed = obj[key]; delete obj[key]; return { newDocument: document, removed }; }, replace: function (obj, key, document) { var removed = obj[key]; obj[key] = this.value; return { newDocument: document, removed }; }, move: function (obj, key, document) { /* in case move target overwrites an existing value, return the removed value, this can be taxing performance-wise, and is potentially unneeded */ let removed = getValueByPointer(document, this.path); if (removed) { removed = _deepClone(removed); } const originalValue = applyOperation(document, { op: "remove", path: this.from, }).removed; applyOperation(document, { op: "add", path: this.path, value: originalValue, }); return { newDocument: document, removed }; }, copy: function (obj, key, document) { const valueToCopy = getValueByPointer(document, this.from); // enforce copy by value so further operations don't affect source (see issue #177) applyOperation(document, { op: "add", path: this.path, value: _deepClone(valueToCopy), }); return { newDocument: document }; }, test: function (obj, key, document) { return { newDocument: document, test: _areEquals(obj[key], this.value) }; }, _get: function (obj, key, document) { this.value = obj[key]; return { newDocument: document }; }, }; /* The operations applicable to an array. Many are the same as for the object */ var arrOps = { add: function (arr, i, document) { if (isInteger(i)) { arr.splice(i, 0, this.value); } else { // array props arr[i] = this.value; } // this may be needed when using '-' in an array return { newDocument: document, index: i }; }, remove: function (arr, i, document) { var removedList = arr.splice(i, 1); return { newDocument: document, removed: removedList[0] }; }, replace: function (arr, i, document) { var removed = arr[i]; arr[i] = this.value; return { newDocument: document, removed }; }, move: objOps.move, copy: objOps.copy, test: objOps.test, _get: objOps._get, }; /** * Retrieves a value from a JSON document by a JSON pointer. * Returns the value. * * @param document The document to get the value from * @param pointer an escaped JSON pointer * @return The retrieved value */ export function getValueByPointer(document: any, pointer: string): any { if (pointer == "") { return document; } var getOriginalDestination = <GetOperation<any>>{ op: "_get", path: pointer }; applyOperation(document, getOriginalDestination); return getOriginalDestination.value; } /** * Apply a single JSON Patch Operation on a JSON document. * Returns the {newDocument, result} of the operation. * It modifies the `document` and `operation` objects - it gets the values by reference. * If you would like to avoid touching your values, clone them: * `jsonpatch.applyOperation(document, jsonpatch._deepClone(operation))`. * * @param document The document to patch * @param operation The operation to apply * @param validateOperation `false` is without validation, `true` to use default jsonpatch's validation, or you can pass a `validateOperation` callback to be used for validation. * @param mutateDocument Whether to mutate the original document or clone it before applying * @param banPrototypeModifications Whether to ban modifications to `__proto__`, defaults to `true`. * @return `{newDocument, result}` after the operation */ export function applyOperation<T>( document: T, operation: Operation, validateOperation: boolean | Validator<T> = false, mutateDocument: boolean = true, banPrototypeModifications: boolean = true, index: number = 0 ): OperationResult<T> { if (validateOperation) { if (typeof validateOperation == "function") { validateOperation(operation, 0, document, operation.path); } else { validator(operation, 0); } } /* ROOT OPERATIONS */ if (operation.path === "") { let returnValue: OperationResult<T> = { newDocument: document }; if (operation.op === "add") { returnValue.newDocument = operation.value; return returnValue; } else if (operation.op === "replace") { returnValue.newDocument = operation.value; returnValue.removed = document; //document we removed return returnValue; } else if (operation.op === "move" || operation.op === "copy") { // it's a move or copy to root returnValue.newDocument = getValueByPointer(document, operation.from); // get the value by json-pointer in `from` field if (operation.op === "move") { // report removed item returnValue.removed = document; } return returnValue; } else if (operation.op === "test") { returnValue.test = _areEquals(document, operation.value); if (returnValue.test === false) { throw new JsonPatchError( "Test operation failed", "TEST_OPERATION_FAILED", index, operation, document ); } returnValue.newDocument = document; return returnValue; } else if (operation.op === "remove") { // a remove on root returnValue.removed = document; returnValue.newDocument = null; return returnValue; } else if (operation.op === "_get") { operation.value = document; return returnValue; } else { /* bad operation */ if (validateOperation) { throw new JsonPatchError( "Operation `op` property is not one of operations defined in RFC-6902", "OPERATION_OP_INVALID", index, operation, document ); } else { return returnValue; } } } /* END ROOT OPERATIONS */ else { if (!mutateDocument) { document = _deepClone(document); } const path = operation.path || ""; const keys = path.split("/"); let obj = document; let t = 1; //skip empty element - http://jsperf.com/to-shift-or-not-to-shift let len = keys.length; let existingPathFragment = undefined; let key: string | number; let validateFunction; if (typeof validateOperation == "function") { validateFunction = validateOperation; } else { validateFunction = validator; } while (true) { key = keys[t]; if (key && key.indexOf("~") != -1) { key = unescapePathComponent(key); } if ( banPrototypeModifications && (key == "__proto__" || (key == "prototype" && t > 0 && keys[t - 1] == "constructor")) ) { throw new TypeError( "JSON-Patch: modifying `__proto__` or `constructor/prototype` prop is banned for security reasons, if this was on purpose, please set `banPrototypeModifications` flag false and pass it to this function. More info in fast-json-patch README" ); } if (validateOperation) { if (existingPathFragment === undefined) { if (obj[key] === undefined) { existingPathFragment = keys.slice(0, t).join("/"); } else if (t == len - 1) { existingPathFragment = operation.path; } if (existingPathFragment !== undefined) { validateFunction(operation, 0, document, existingPathFragment); } } } t++; if (Array.isArray(obj)) { if (key === "-") { key = obj.length; } else { if (validateOperation && !isInteger(key)) { throw new JsonPatchError( "Expected an unsigned base-10 integer value, making the new referenced value the array element with the zero-based index", "OPERATION_PATH_ILLEGAL_ARRAY_INDEX", index, operation, document ); } // only parse key when it's an integer for `arr.prop` to work else if (isInteger(key)) { key = ~~key; } } if (t >= len) { if (validateOperation && operation.op === "add" && key > obj.length) { throw new JsonPatchError( "The specified index MUST NOT be greater than the number of elements in the array", "OPERATION_VALUE_OUT_OF_BOUNDS", index, operation, document ); } const returnValue = arrOps[operation.op].call( operation, obj, key, document ); // Apply patch if (returnValue.test === false) { throw new JsonPatchError( "Test operation failed", "TEST_OPERATION_FAILED", index, operation, document ); } return returnValue; } } else { if (t >= len) { const returnValue = objOps[operation.op].call( operation, obj, key, document ); // Apply patch if (returnValue.test === false) { throw new JsonPatchError( "Test operation failed", "TEST_OPERATION_FAILED", index, operation, document ); } return returnValue; } } obj = obj[key]; // If we have more keys in the path, but the next value isn't a non-null object, // throw an OPERATION_PATH_UNRESOLVABLE error instead of iterating again. if (validateOperation && t < len && (!obj || typeof obj !== "object")) { throw new JsonPatchError( "Cannot perform operation at the desired path", "OPERATION_PATH_UNRESOLVABLE", index, operation, document ); } } } } /** * Apply a full JSON Patch array on a JSON document. * Returns the {newDocument, result} of the patch. * It modifies the `document` object and `patch` - it gets the values by reference. * If you would like to avoid touching your values, clone them: * `jsonpatch.applyPatch(document, jsonpatch._deepClone(patch))`. * * @param document The document to patch * @param patch The patch to apply * @param validateOperation `false` is without validation, `true` to use default jsonpatch's validation, or you can pass a `validateOperation` callback to be used for validation. * @param mutateDocument Whether to mutate the original document or clone it before applying * @param banPrototypeModifications Whether to ban modifications to `__proto__`, defaults to `true`. * @return An array of `{newDocument, result}` after the patch */ export function applyPatch<T>( document: T, patch: ReadonlyArray<Operation>, validateOperation?: boolean | Validator<T>, mutateDocument: boolean = true, banPrototypeModifications: boolean = true ): PatchResult<T> { if (validateOperation) { if (!Array.isArray(patch)) { throw new JsonPatchError( "Patch sequence must be an array", "SEQUENCE_NOT_AN_ARRAY" ); } } if (!mutateDocument) { document = _deepClone(document); } const results = new Array(patch.length) as PatchResult<T>; for (let i = 0, length = patch.length; i < length; i++) { // we don't need to pass mutateDocument argument because if it was true, we already deep cloned the object, we'll just pass `true` results[i] = applyOperation( document, patch[i], validateOperation, true, banPrototypeModifications, i ); document = results[i].newDocument; // in case root was replaced } results.newDocument = document; return results; } /** * Apply a single JSON Patch Operation on a JSON document. * Returns the updated document. * Suitable as a reducer. * * @param document The document to patch * @param operation The operation to apply * @return The updated document */ export function applyReducer<T>( document: T, operation: Operation, index: number ): T { const operationResult: OperationResult<T> = applyOperation( document, operation ); if (operationResult.test === false) { // failed test throw new JsonPatchError( "Test operation failed", "TEST_OPERATION_FAILED", index, operation, document ); } return operationResult.newDocument; } /** * Validates a single operation. Called from `jsonpatch.validate`. Throws `JsonPatchError` in case of an error. * @param {object} operation - operation object (patch) * @param {number} index - index of operation in the sequence * @param {object} [document] - object where the operation is supposed to be applied * @param {string} [existingPathFragment] - comes along with `document` */ export function validator( operation: Operation, index: number, document?: any, existingPathFragment?: string ): void { if ( typeof operation !== "object" || operation === null || Array.isArray(operation) ) { throw new JsonPatchError( "Operation is not an object", "OPERATION_NOT_AN_OBJECT", index, operation, document ); } else if (!objOps[operation.op]) { throw new JsonPatchError( "Operation `op` property is not one of operations defined in RFC-6902", "OPERATION_OP_INVALID", index, operation, document ); } else if (typeof operation.path !== "string") { throw new JsonPatchError( "Operation `path` property is not a string", "OPERATION_PATH_INVALID", index, operation, document ); } else if (operation.path.indexOf("/") !== 0 && operation.path.length > 0) { // paths that aren't empty string should start with "/" throw new JsonPatchError( 'Operation `path` property must start with "/"', "OPERATION_PATH_INVALID", index, operation, document ); } else if ( (operation.op === "move" || operation.op === "copy") && typeof operation.from !== "string" ) { throw new JsonPatchError( "Operation `from` property is not present (applicable in `move` and `copy` operations)", "OPERATION_FROM_REQUIRED", index, operation, document ); } else if ( (operation.op === "add" || operation.op === "replace" || operation.op === "test") && operation.value === undefined ) { throw new JsonPatchError( "Operation `value` property is not present (applicable in `add`, `replace` and `test` operations)", "OPERATION_VALUE_REQUIRED", index, operation, document ); } else if ( (operation.op === "add" || operation.op === "replace" || operation.op === "test") && hasUndefined(operation.value) ) { throw new JsonPatchError( "Operation `value` property is not present (applicable in `add`, `replace` and `test` operations)", "OPERATION_VALUE_CANNOT_CONTAIN_UNDEFINED", index, operation, document ); } else if (document) { if (operation.op == "add") { var pathLen = operation.path.split("/").length; var existingPathLen = existingPathFragment.split("/").length; if (pathLen !== existingPathLen + 1 && pathLen !== existingPathLen) { throw new JsonPatchError( "Cannot perform an `add` operation at the desired path", "OPERATION_PATH_CANNOT_ADD", index, operation, document ); } } else if ( operation.op === "replace" || operation.op === "remove" || <any>operation.op === "_get" ) { if (operation.path !== existingPathFragment) { throw new JsonPatchError( "Cannot perform the operation at a path that does not exist", "OPERATION_PATH_UNRESOLVABLE", index, operation, document ); } } else if (operation.op === "move" || operation.op === "copy") { var existingValue: any = { op: "_get", path: operation.from, value: undefined, }; var error = validate([existingValue], document); if (error && error.name === "OPERATION_PATH_UNRESOLVABLE") { throw new JsonPatchError( "Cannot perform the operation from a path that does not exist", "OPERATION_FROM_UNRESOLVABLE", index, operation, document ); } } } } /** * Validates a sequence of operations. If `document` parameter is provided, the sequence is additionally validated against the object document. * If error is encountered, returns a JsonPatchError object * @param sequence * @param document * @returns {JsonPatchError|undefined} */ export function validate<T>( sequence: ReadonlyArray<Operation>, document?: T, externalValidator?: Validator<T> ): PatchError { try { if (!Array.isArray(sequence)) { throw new JsonPatchError( "Patch sequence must be an array", "SEQUENCE_NOT_AN_ARRAY" ); } if (document) { //clone document and sequence so that we can safely try applying operations applyPatch( _deepClone(document), _deepClone(sequence), externalValidator || true ); } else { externalValidator = externalValidator || validator; for (var i = 0; i < sequence.length; i++) { externalValidator(sequence[i], i, document, undefined); } } } catch (e) { if (e instanceof JsonPatchError) { return e; } else { throw e; } } } // based on https://github.com/epoberezkin/fast-deep-equal // MIT License // Copyright (c) 2017 Evgeny Poberezkin // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. export function _areEquals(a: any, b: any): boolean { if (a === b) return true; if (a && b && typeof a == "object" && typeof b == "object") { var arrA = Array.isArray(a), arrB = Array.isArray(b), i, length, key; if (arrA && arrB) { length = a.length; if (length != b.length) return false; for (i = length; i-- !== 0; ) if (!_areEquals(a[i], b[i])) return false; return true; } if (arrA != arrB) return false; var keys = Object.keys(a); length = keys.length; if (length !== Object.keys(b).length) return false; for (i = length; i-- !== 0; ) if (!b.hasOwnProperty(keys[i])) return false; for (i = length; i-- !== 0; ) { key = keys[i]; if (!_areEquals(a[key], b[key])) return false; } return true; } return a !== a && b !== b; }
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/js-sha1/LICENSE.md
Copyright 2014-2017 Chen, Yi-Cyuan Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/js-sha1/hash.ts
// @ts-nocheck // Inlined to deal with portability issues with importing crypto module /* * [js-sha1]{@link https://github.com/emn178/js-sha1} * * @version 0.6.0 * @author Chen, Yi-Cyuan [emn178@gmail.com] * @copyright Chen, Yi-Cyuan 2014-2017 * @license MIT */ /*jslint bitwise: true */ "use strict"; var root = typeof window === "object" ? window : {}; var HEX_CHARS = "0123456789abcdef".split(""); var EXTRA = [-2147483648, 8388608, 32768, 128]; var SHIFT = [24, 16, 8, 0]; var OUTPUT_TYPES = ["hex", "array", "digest", "arrayBuffer"]; var blocks = []; function Sha1(sharedMemory) { if (sharedMemory) { blocks[0] = blocks[16] = blocks[1] = blocks[2] = blocks[3] = blocks[4] = blocks[5] = blocks[6] = blocks[7] = blocks[8] = blocks[9] = blocks[10] = blocks[11] = blocks[12] = blocks[13] = blocks[14] = blocks[15] = 0; this.blocks = blocks; } else { this.blocks = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; } this.h0 = 0x67452301; this.h1 = 0xefcdab89; this.h2 = 0x98badcfe; this.h3 = 0x10325476; this.h4 = 0xc3d2e1f0; this.block = this.start = this.bytes = this.hBytes = 0; this.finalized = this.hashed = false; this.first = true; } Sha1.prototype.update = function (message) { if (this.finalized) { return; } var notString = typeof message !== "string"; if (notString && message.constructor === root.ArrayBuffer) { message = new Uint8Array(message); } var code, index = 0, i, length = message.length || 0, blocks = this.blocks; while (index < length) { if (this.hashed) { this.hashed = false; blocks[0] = this.block; blocks[16] = blocks[1] = blocks[2] = blocks[3] = blocks[4] = blocks[5] = blocks[6] = blocks[7] = blocks[8] = blocks[9] = blocks[10] = blocks[11] = blocks[12] = blocks[13] = blocks[14] = blocks[15] = 0; } if (notString) { for (i = this.start; index < length && i < 64; ++index) { blocks[i >> 2] |= message[index] << SHIFT[i++ & 3]; } } else { for (i = this.start; index < length && i < 64; ++index) { code = message.charCodeAt(index); if (code < 0x80) { blocks[i >> 2] |= code << SHIFT[i++ & 3]; } else if (code < 0x800) { blocks[i >> 2] |= (0xc0 | (code >> 6)) << SHIFT[i++ & 3]; blocks[i >> 2] |= (0x80 | (code & 0x3f)) << SHIFT[i++ & 3]; } else if (code < 0xd800 || code >= 0xe000) { blocks[i >> 2] |= (0xe0 | (code >> 12)) << SHIFT[i++ & 3]; blocks[i >> 2] |= (0x80 | ((code >> 6) & 0x3f)) << SHIFT[i++ & 3]; blocks[i >> 2] |= (0x80 | (code & 0x3f)) << SHIFT[i++ & 3]; } else { code = 0x10000 + (((code & 0x3ff) << 10) | (message.charCodeAt(++index) & 0x3ff)); blocks[i >> 2] |= (0xf0 | (code >> 18)) << SHIFT[i++ & 3]; blocks[i >> 2] |= (0x80 | ((code >> 12) & 0x3f)) << SHIFT[i++ & 3]; blocks[i >> 2] |= (0x80 | ((code >> 6) & 0x3f)) << SHIFT[i++ & 3]; blocks[i >> 2] |= (0x80 | (code & 0x3f)) << SHIFT[i++ & 3]; } } } this.lastByteIndex = i; this.bytes += i - this.start; if (i >= 64) { this.block = blocks[16]; this.start = i - 64; this.hash(); this.hashed = true; } else { this.start = i; } } if (this.bytes > 4294967295) { this.hBytes += (this.bytes / 4294967296) << 0; this.bytes = this.bytes % 4294967296; } return this; }; Sha1.prototype.finalize = function () { if (this.finalized) { return; } this.finalized = true; var blocks = this.blocks, i = this.lastByteIndex; blocks[16] = this.block; blocks[i >> 2] |= EXTRA[i & 3]; this.block = blocks[16]; if (i >= 56) { if (!this.hashed) { this.hash(); } blocks[0] = this.block; blocks[16] = blocks[1] = blocks[2] = blocks[3] = blocks[4] = blocks[5] = blocks[6] = blocks[7] = blocks[8] = blocks[9] = blocks[10] = blocks[11] = blocks[12] = blocks[13] = blocks[14] = blocks[15] = 0; } blocks[14] = (this.hBytes << 3) | (this.bytes >>> 29); blocks[15] = this.bytes << 3; this.hash(); }; Sha1.prototype.hash = function () { var a = this.h0, b = this.h1, c = this.h2, d = this.h3, e = this.h4; var f, j, t, blocks = this.blocks; for (j = 16; j < 80; ++j) { t = blocks[j - 3] ^ blocks[j - 8] ^ blocks[j - 14] ^ blocks[j - 16]; blocks[j] = (t << 1) | (t >>> 31); } for (j = 0; j < 20; j += 5) { f = (b & c) | (~b & d); t = (a << 5) | (a >>> 27); e = (t + f + e + 1518500249 + blocks[j]) << 0; b = (b << 30) | (b >>> 2); f = (a & b) | (~a & c); t = (e << 5) | (e >>> 27); d = (t + f + d + 1518500249 + blocks[j + 1]) << 0; a = (a << 30) | (a >>> 2); f = (e & a) | (~e & b); t = (d << 5) | (d >>> 27); c = (t + f + c + 1518500249 + blocks[j + 2]) << 0; e = (e << 30) | (e >>> 2); f = (d & e) | (~d & a); t = (c << 5) | (c >>> 27); b = (t + f + b + 1518500249 + blocks[j + 3]) << 0; d = (d << 30) | (d >>> 2); f = (c & d) | (~c & e); t = (b << 5) | (b >>> 27); a = (t + f + a + 1518500249 + blocks[j + 4]) << 0; c = (c << 30) | (c >>> 2); } for (; j < 40; j += 5) { f = b ^ c ^ d; t = (a << 5) | (a >>> 27); e = (t + f + e + 1859775393 + blocks[j]) << 0; b = (b << 30) | (b >>> 2); f = a ^ b ^ c; t = (e << 5) | (e >>> 27); d = (t + f + d + 1859775393 + blocks[j + 1]) << 0; a = (a << 30) | (a >>> 2); f = e ^ a ^ b; t = (d << 5) | (d >>> 27); c = (t + f + c + 1859775393 + blocks[j + 2]) << 0; e = (e << 30) | (e >>> 2); f = d ^ e ^ a; t = (c << 5) | (c >>> 27); b = (t + f + b + 1859775393 + blocks[j + 3]) << 0; d = (d << 30) | (d >>> 2); f = c ^ d ^ e; t = (b << 5) | (b >>> 27); a = (t + f + a + 1859775393 + blocks[j + 4]) << 0; c = (c << 30) | (c >>> 2); } for (; j < 60; j += 5) { f = (b & c) | (b & d) | (c & d); t = (a << 5) | (a >>> 27); e = (t + f + e - 1894007588 + blocks[j]) << 0; b = (b << 30) | (b >>> 2); f = (a & b) | (a & c) | (b & c); t = (e << 5) | (e >>> 27); d = (t + f + d - 1894007588 + blocks[j + 1]) << 0; a = (a << 30) | (a >>> 2); f = (e & a) | (e & b) | (a & b); t = (d << 5) | (d >>> 27); c = (t + f + c - 1894007588 + blocks[j + 2]) << 0; e = (e << 30) | (e >>> 2); f = (d & e) | (d & a) | (e & a); t = (c << 5) | (c >>> 27); b = (t + f + b - 1894007588 + blocks[j + 3]) << 0; d = (d << 30) | (d >>> 2); f = (c & d) | (c & e) | (d & e); t = (b << 5) | (b >>> 27); a = (t + f + a - 1894007588 + blocks[j + 4]) << 0; c = (c << 30) | (c >>> 2); } for (; j < 80; j += 5) { f = b ^ c ^ d; t = (a << 5) | (a >>> 27); e = (t + f + e - 899497514 + blocks[j]) << 0; b = (b << 30) | (b >>> 2); f = a ^ b ^ c; t = (e << 5) | (e >>> 27); d = (t + f + d - 899497514 + blocks[j + 1]) << 0; a = (a << 30) | (a >>> 2); f = e ^ a ^ b; t = (d << 5) | (d >>> 27); c = (t + f + c - 899497514 + blocks[j + 2]) << 0; e = (e << 30) | (e >>> 2); f = d ^ e ^ a; t = (c << 5) | (c >>> 27); b = (t + f + b - 899497514 + blocks[j + 3]) << 0; d = (d << 30) | (d >>> 2); f = c ^ d ^ e; t = (b << 5) | (b >>> 27); a = (t + f + a - 899497514 + blocks[j + 4]) << 0; c = (c << 30) | (c >>> 2); } this.h0 = (this.h0 + a) << 0; this.h1 = (this.h1 + b) << 0; this.h2 = (this.h2 + c) << 0; this.h3 = (this.h3 + d) << 0; this.h4 = (this.h4 + e) << 0; }; Sha1.prototype.hex = function () { this.finalize(); var h0 = this.h0, h1 = this.h1, h2 = this.h2, h3 = this.h3, h4 = this.h4; return ( HEX_CHARS[(h0 >> 28) & 0x0f] + HEX_CHARS[(h0 >> 24) & 0x0f] + HEX_CHARS[(h0 >> 20) & 0x0f] + HEX_CHARS[(h0 >> 16) & 0x0f] + HEX_CHARS[(h0 >> 12) & 0x0f] + HEX_CHARS[(h0 >> 8) & 0x0f] + HEX_CHARS[(h0 >> 4) & 0x0f] + HEX_CHARS[h0 & 0x0f] + HEX_CHARS[(h1 >> 28) & 0x0f] + HEX_CHARS[(h1 >> 24) & 0x0f] + HEX_CHARS[(h1 >> 20) & 0x0f] + HEX_CHARS[(h1 >> 16) & 0x0f] + HEX_CHARS[(h1 >> 12) & 0x0f] + HEX_CHARS[(h1 >> 8) & 0x0f] + HEX_CHARS[(h1 >> 4) & 0x0f] + HEX_CHARS[h1 & 0x0f] + HEX_CHARS[(h2 >> 28) & 0x0f] + HEX_CHARS[(h2 >> 24) & 0x0f] + HEX_CHARS[(h2 >> 20) & 0x0f] + HEX_CHARS[(h2 >> 16) & 0x0f] + HEX_CHARS[(h2 >> 12) & 0x0f] + HEX_CHARS[(h2 >> 8) & 0x0f] + HEX_CHARS[(h2 >> 4) & 0x0f] + HEX_CHARS[h2 & 0x0f] + HEX_CHARS[(h3 >> 28) & 0x0f] + HEX_CHARS[(h3 >> 24) & 0x0f] + HEX_CHARS[(h3 >> 20) & 0x0f] + HEX_CHARS[(h3 >> 16) & 0x0f] + HEX_CHARS[(h3 >> 12) & 0x0f] + HEX_CHARS[(h3 >> 8) & 0x0f] + HEX_CHARS[(h3 >> 4) & 0x0f] + HEX_CHARS[h3 & 0x0f] + HEX_CHARS[(h4 >> 28) & 0x0f] + HEX_CHARS[(h4 >> 24) & 0x0f] + HEX_CHARS[(h4 >> 20) & 0x0f] + HEX_CHARS[(h4 >> 16) & 0x0f] + HEX_CHARS[(h4 >> 12) & 0x0f] + HEX_CHARS[(h4 >> 8) & 0x0f] + HEX_CHARS[(h4 >> 4) & 0x0f] + HEX_CHARS[h4 & 0x0f] ); }; Sha1.prototype.toString = Sha1.prototype.hex; Sha1.prototype.digest = function () { this.finalize(); var h0 = this.h0, h1 = this.h1, h2 = this.h2, h3 = this.h3, h4 = this.h4; return [ (h0 >> 24) & 0xff, (h0 >> 16) & 0xff, (h0 >> 8) & 0xff, h0 & 0xff, (h1 >> 24) & 0xff, (h1 >> 16) & 0xff, (h1 >> 8) & 0xff, h1 & 0xff, (h2 >> 24) & 0xff, (h2 >> 16) & 0xff, (h2 >> 8) & 0xff, h2 & 0xff, (h3 >> 24) & 0xff, (h3 >> 16) & 0xff, (h3 >> 8) & 0xff, h3 & 0xff, (h4 >> 24) & 0xff, (h4 >> 16) & 0xff, (h4 >> 8) & 0xff, h4 & 0xff, ]; }; Sha1.prototype.array = Sha1.prototype.digest; Sha1.prototype.arrayBuffer = function () { this.finalize(); var buffer = new ArrayBuffer(20); var dataView = new DataView(buffer); dataView.setUint32(0, this.h0); dataView.setUint32(4, this.h1); dataView.setUint32(8, this.h2); dataView.setUint32(12, this.h3); dataView.setUint32(16, this.h4); return buffer; }; export const insecureHash = (message) => { return new Sha1(true).update(message)["hex"](); };
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/sax-js/LICENSE.md
The ISC License Copyright (c) 2010-2023 Isaac Z. Schlueter and Contributors Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ==== `String.fromCodePoint` by Mathias Bynens used according to terms of MIT License, as follows: Copyright (c) 2010-2023 Mathias Bynens <https://mathiasbynens.be/> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/sax-js/sax.ts
// @ts-nocheck // Inlined to deal with portability issues // Originally from: https://github.com/isaacs/sax-js const initializeSax = function () { const sax: any = {}; sax.parser = function (strict, opt) { return new SAXParser(strict, opt); }; sax.SAXParser = SAXParser; sax.SAXStream = SAXStream; sax.createStream = createStream; // When we pass the MAX_BUFFER_LENGTH position, start checking for buffer overruns. // When we check, schedule the next check for MAX_BUFFER_LENGTH - (max(buffer lengths)), // since that's the earliest that a buffer overrun could occur. This way, checks are // as rare as required, but as often as necessary to ensure never crossing this bound. // Furthermore, buffers are only tested at most once per write(), so passing a very // large string into write() might have undesirable effects, but this is manageable by // the caller, so it is assumed to be safe. Thus, a call to write() may, in the extreme // edge case, result in creating at most one complete copy of the string passed in. // Set to Infinity to have unlimited buffers. sax.MAX_BUFFER_LENGTH = 64 * 1024; const buffers = [ "comment", "sgmlDecl", "textNode", "tagName", "doctype", "procInstName", "procInstBody", "entity", "attribName", "attribValue", "cdata", "script", ]; sax.EVENTS = [ "text", "processinginstruction", "sgmldeclaration", "doctype", "comment", "opentagstart", "attribute", "opentag", "closetag", "opencdata", "cdata", "closecdata", "error", "end", "ready", "script", "opennamespace", "closenamespace", ]; function SAXParser(strict, opt) { if (!(this instanceof SAXParser)) { return new SAXParser(strict, opt); } var parser = this; clearBuffers(parser); parser.q = parser.c = ""; parser.bufferCheckPosition = sax.MAX_BUFFER_LENGTH; parser.opt = opt || {}; parser.opt.lowercase = parser.opt.lowercase || parser.opt.lowercasetags; parser.looseCase = parser.opt.lowercase ? "toLowerCase" : "toUpperCase"; parser.tags = []; parser.closed = parser.closedRoot = parser.sawRoot = false; parser.tag = parser.error = null; parser.strict = !!strict; parser.noscript = !!(strict || parser.opt.noscript); parser.state = S.BEGIN; parser.strictEntities = parser.opt.strictEntities; parser.ENTITIES = parser.strictEntities ? Object.create(sax.XML_ENTITIES) : Object.create(sax.ENTITIES); parser.attribList = []; // namespaces form a prototype chain. // it always points at the current tag, // which protos to its parent tag. if (parser.opt.xmlns) { parser.ns = Object.create(rootNS); } // mostly just for error reporting parser.trackPosition = parser.opt.position !== false; if (parser.trackPosition) { parser.position = parser.line = parser.column = 0; } emit(parser, "onready"); } if (!Object.create) { Object.create = function (o) { function F() {} F.prototype = o; var newf = new F(); return newf; }; } if (!Object.keys) { Object.keys = function (o) { var a = []; for (var i in o) if (o.hasOwnProperty(i)) a.push(i); return a; }; } function checkBufferLength(parser) { var maxAllowed = Math.max(sax.MAX_BUFFER_LENGTH, 10); var maxActual = 0; for (var i = 0, l = buffers.length; i < l; i++) { var len = parser[buffers[i]].length; if (len > maxAllowed) { // Text/cdata nodes can get big, and since they're buffered, // we can get here under normal conditions. // Avoid issues by emitting the text node now, // so at least it won't get any bigger. switch (buffers[i]) { case "textNode": closeText(parser); break; case "cdata": emitNode(parser, "oncdata", parser.cdata); parser.cdata = ""; break; case "script": emitNode(parser, "onscript", parser.script); parser.script = ""; break; default: error(parser, "Max buffer length exceeded: " + buffers[i]); } } maxActual = Math.max(maxActual, len); } // schedule the next check for the earliest possible buffer overrun. var m = sax.MAX_BUFFER_LENGTH - maxActual; parser.bufferCheckPosition = m + parser.position; } function clearBuffers(parser) { for (var i = 0, l = buffers.length; i < l; i++) { parser[buffers[i]] = ""; } } function flushBuffers(parser) { closeText(parser); if (parser.cdata !== "") { emitNode(parser, "oncdata", parser.cdata); parser.cdata = ""; } if (parser.script !== "") { emitNode(parser, "onscript", parser.script); parser.script = ""; } } SAXParser.prototype = { end: function () { end(this); }, write: write, resume: function () { this.error = null; return this; }, close: function () { return this.write(null); }, flush: function () { flushBuffers(this); }, }; var Stream = ReadableStream; if (!Stream) Stream = function () {}; var streamWraps = sax.EVENTS.filter(function (ev) { return ev !== "error" && ev !== "end"; }); function createStream(strict, opt) { return new SAXStream(strict, opt); } function SAXStream(strict, opt) { if (!(this instanceof SAXStream)) { return new SAXStream(strict, opt); } Stream.apply(this); this._parser = new SAXParser(strict, opt); this.writable = true; this.readable = true; var me = this; this._parser.onend = function () { me.emit("end"); }; this._parser.onerror = function (er) { me.emit("error", er); // if didn't throw, then means error was handled. // go ahead and clear error, so we can write again. me._parser.error = null; }; this._decoder = null; streamWraps.forEach(function (ev) { Object.defineProperty(me, "on" + ev, { get: function () { return me._parser["on" + ev]; }, set: function (h) { if (!h) { me.removeAllListeners(ev); me._parser["on" + ev] = h; return h; } me.on(ev, h); }, enumerable: true, configurable: false, }); }); } SAXStream.prototype = Object.create(Stream.prototype, { constructor: { value: SAXStream, }, }); SAXStream.prototype.write = function (data) { this._parser.write(data.toString()); this.emit("data", data); return true; }; SAXStream.prototype.end = function (chunk) { if (chunk && chunk.length) { this.write(chunk); } this._parser.end(); return true; }; SAXStream.prototype.on = function (ev, handler) { var me = this; if (!me._parser["on" + ev] && streamWraps.indexOf(ev) !== -1) { me._parser["on" + ev] = function () { var args = arguments.length === 1 ? [arguments[0]] : Array.apply(null, arguments); args.splice(0, 0, ev); me.emit.apply(me, args); }; } return Stream.prototype.on.call(me, ev, handler); }; // this really needs to be replaced with character classes. // XML allows all manner of ridiculous numbers and digits. var CDATA = "[CDATA["; var DOCTYPE = "DOCTYPE"; var XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"; var XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"; var rootNS = { xml: XML_NAMESPACE, xmlns: XMLNS_NAMESPACE }; // http://www.w3.org/TR/REC-xml/#NT-NameStartChar // This implementation works on strings, a single character at a time // as such, it cannot ever support astral-plane characters (10000-EFFFF) // without a significant breaking change to either this parser, or the // JavaScript language. Implementation of an emoji-capable xml parser // is left as an exercise for the reader. var nameStart = /[:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]/; var nameBody = /[:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u00B7\u0300-\u036F\u203F-\u2040.\d-]/; var entityStart = /[#:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]/; var entityBody = /[#:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u00B7\u0300-\u036F\u203F-\u2040.\d-]/; function isWhitespace(c) { return c === " " || c === "\n" || c === "\r" || c === "\t"; } function isQuote(c) { return c === '"' || c === "'"; } function isAttribEnd(c) { return c === ">" || isWhitespace(c); } function isMatch(regex, c) { return regex.test(c); } function notMatch(regex, c) { return !isMatch(regex, c); } var S = 0; sax.STATE = { BEGIN: S++, // leading byte order mark or whitespace BEGIN_WHITESPACE: S++, // leading whitespace TEXT: S++, // general stuff TEXT_ENTITY: S++, // &amp and such. OPEN_WAKA: S++, // < SGML_DECL: S++, // <!BLARG SGML_DECL_QUOTED: S++, // <!BLARG foo "bar DOCTYPE: S++, // <!DOCTYPE DOCTYPE_QUOTED: S++, // <!DOCTYPE "//blah DOCTYPE_DTD: S++, // <!DOCTYPE "//blah" [ ... DOCTYPE_DTD_QUOTED: S++, // <!DOCTYPE "//blah" [ "foo COMMENT_STARTING: S++, // <!- COMMENT: S++, // <!-- COMMENT_ENDING: S++, // <!-- blah - COMMENT_ENDED: S++, // <!-- blah -- CDATA: S++, // <![CDATA[ something CDATA_ENDING: S++, // ] CDATA_ENDING_2: S++, // ]] PROC_INST: S++, // <?hi PROC_INST_BODY: S++, // <?hi there PROC_INST_ENDING: S++, // <?hi "there" ? OPEN_TAG: S++, // <strong OPEN_TAG_SLASH: S++, // <strong / ATTRIB: S++, // <a ATTRIB_NAME: S++, // <a foo ATTRIB_NAME_SAW_WHITE: S++, // <a foo _ ATTRIB_VALUE: S++, // <a foo= ATTRIB_VALUE_QUOTED: S++, // <a foo="bar ATTRIB_VALUE_CLOSED: S++, // <a foo="bar" ATTRIB_VALUE_UNQUOTED: S++, // <a foo=bar ATTRIB_VALUE_ENTITY_Q: S++, // <foo bar="&quot;" ATTRIB_VALUE_ENTITY_U: S++, // <foo bar=&quot CLOSE_TAG: S++, // </a CLOSE_TAG_SAW_WHITE: S++, // </a > SCRIPT: S++, // <script> ... SCRIPT_ENDING: S++, // <script> ... < }; sax.XML_ENTITIES = { amp: "&", gt: ">", lt: "<", quot: '"', apos: "'", }; sax.ENTITIES = { amp: "&", gt: ">", lt: "<", quot: '"', apos: "'", AElig: 198, Aacute: 193, Acirc: 194, Agrave: 192, Aring: 197, Atilde: 195, Auml: 196, Ccedil: 199, ETH: 208, Eacute: 201, Ecirc: 202, Egrave: 200, Euml: 203, Iacute: 205, Icirc: 206, Igrave: 204, Iuml: 207, Ntilde: 209, Oacute: 211, Ocirc: 212, Ograve: 210, Oslash: 216, Otilde: 213, Ouml: 214, THORN: 222, Uacute: 218, Ucirc: 219, Ugrave: 217, Uuml: 220, Yacute: 221, aacute: 225, acirc: 226, aelig: 230, agrave: 224, aring: 229, atilde: 227, auml: 228, ccedil: 231, eacute: 233, ecirc: 234, egrave: 232, eth: 240, euml: 235, iacute: 237, icirc: 238, igrave: 236, iuml: 239, ntilde: 241, oacute: 243, ocirc: 244, ograve: 242, oslash: 248, otilde: 245, ouml: 246, szlig: 223, thorn: 254, uacute: 250, ucirc: 251, ugrave: 249, uuml: 252, yacute: 253, yuml: 255, copy: 169, reg: 174, nbsp: 160, iexcl: 161, cent: 162, pound: 163, curren: 164, yen: 165, brvbar: 166, sect: 167, uml: 168, ordf: 170, laquo: 171, not: 172, shy: 173, macr: 175, deg: 176, plusmn: 177, sup1: 185, sup2: 178, sup3: 179, acute: 180, micro: 181, para: 182, middot: 183, cedil: 184, ordm: 186, raquo: 187, frac14: 188, frac12: 189, frac34: 190, iquest: 191, times: 215, divide: 247, OElig: 338, oelig: 339, Scaron: 352, scaron: 353, Yuml: 376, fnof: 402, circ: 710, tilde: 732, Alpha: 913, Beta: 914, Gamma: 915, Delta: 916, Epsilon: 917, Zeta: 918, Eta: 919, Theta: 920, Iota: 921, Kappa: 922, Lambda: 923, Mu: 924, Nu: 925, Xi: 926, Omicron: 927, Pi: 928, Rho: 929, Sigma: 931, Tau: 932, Upsilon: 933, Phi: 934, Chi: 935, Psi: 936, Omega: 937, alpha: 945, beta: 946, gamma: 947, delta: 948, epsilon: 949, zeta: 950, eta: 951, theta: 952, iota: 953, kappa: 954, lambda: 955, mu: 956, nu: 957, xi: 958, omicron: 959, pi: 960, rho: 961, sigmaf: 962, sigma: 963, tau: 964, upsilon: 965, phi: 966, chi: 967, psi: 968, omega: 969, thetasym: 977, upsih: 978, piv: 982, ensp: 8194, emsp: 8195, thinsp: 8201, zwnj: 8204, zwj: 8205, lrm: 8206, rlm: 8207, ndash: 8211, mdash: 8212, lsquo: 8216, rsquo: 8217, sbquo: 8218, ldquo: 8220, rdquo: 8221, bdquo: 8222, dagger: 8224, Dagger: 8225, bull: 8226, hellip: 8230, permil: 8240, prime: 8242, Prime: 8243, lsaquo: 8249, rsaquo: 8250, oline: 8254, frasl: 8260, euro: 8364, image: 8465, weierp: 8472, real: 8476, trade: 8482, alefsym: 8501, larr: 8592, uarr: 8593, rarr: 8594, darr: 8595, harr: 8596, crarr: 8629, lArr: 8656, uArr: 8657, rArr: 8658, dArr: 8659, hArr: 8660, forall: 8704, part: 8706, exist: 8707, empty: 8709, nabla: 8711, isin: 8712, notin: 8713, ni: 8715, prod: 8719, sum: 8721, minus: 8722, lowast: 8727, radic: 8730, prop: 8733, infin: 8734, ang: 8736, and: 8743, or: 8744, cap: 8745, cup: 8746, int: 8747, there4: 8756, sim: 8764, cong: 8773, asymp: 8776, ne: 8800, equiv: 8801, le: 8804, ge: 8805, sub: 8834, sup: 8835, nsub: 8836, sube: 8838, supe: 8839, oplus: 8853, otimes: 8855, perp: 8869, sdot: 8901, lceil: 8968, rceil: 8969, lfloor: 8970, rfloor: 8971, lang: 9001, rang: 9002, loz: 9674, spades: 9824, clubs: 9827, hearts: 9829, diams: 9830, }; Object.keys(sax.ENTITIES).forEach(function (key) { var e = sax.ENTITIES[key]; var s = typeof e === "number" ? String.fromCharCode(e) : e; sax.ENTITIES[key] = s; }); for (var s in sax.STATE) { sax.STATE[sax.STATE[s]] = s; } // shorthand S = sax.STATE; function emit(parser, event, data) { parser[event] && parser[event](data); } function emitNode(parser, nodeType, data) { if (parser.textNode) closeText(parser); emit(parser, nodeType, data); } function closeText(parser) { parser.textNode = textopts(parser.opt, parser.textNode); if (parser.textNode) emit(parser, "ontext", parser.textNode); parser.textNode = ""; } function textopts(opt, text) { if (opt.trim) text = text.trim(); if (opt.normalize) text = text.replace(/\s+/g, " "); return text; } function error(parser, er) { closeText(parser); if (parser.trackPosition) { er += "\nLine: " + parser.line + "\nColumn: " + parser.column + "\nChar: " + parser.c; } er = new Error(er); parser.error = er; emit(parser, "onerror", er); return parser; } function end(parser) { if (parser.sawRoot && !parser.closedRoot) strictFail(parser, "Unclosed root tag"); if ( parser.state !== S.BEGIN && parser.state !== S.BEGIN_WHITESPACE && parser.state !== S.TEXT ) { error(parser, "Unexpected end"); } closeText(parser); parser.c = ""; parser.closed = true; emit(parser, "onend"); SAXParser.call(parser, parser.strict, parser.opt); return parser; } function strictFail(parser, message) { if (typeof parser !== "object" || !(parser instanceof SAXParser)) { throw new Error("bad call to strictFail"); } if (parser.strict) { error(parser, message); } } function newTag(parser) { if (!parser.strict) parser.tagName = parser.tagName[parser.looseCase](); var parent = parser.tags[parser.tags.length - 1] || parser; var tag = (parser.tag = { name: parser.tagName, attributes: {} }); // will be overridden if tag contails an xmlns="foo" or xmlns:foo="bar" if (parser.opt.xmlns) { tag.ns = parent.ns; } parser.attribList.length = 0; emitNode(parser, "onopentagstart", tag); } function qname(name, attribute) { var i = name.indexOf(":"); var qualName = i < 0 ? ["", name] : name.split(":"); var prefix = qualName[0]; var local = qualName[1]; // <x "xmlns"="http://foo"> if (attribute && name === "xmlns") { prefix = "xmlns"; local = ""; } return { prefix: prefix, local: local }; } function attrib(parser) { if (!parser.strict) { parser.attribName = parser.attribName[parser.looseCase](); } if ( parser.attribList.indexOf(parser.attribName) !== -1 || parser.tag.attributes.hasOwnProperty(parser.attribName) ) { parser.attribName = parser.attribValue = ""; return; } if (parser.opt.xmlns) { var qn = qname(parser.attribName, true); var prefix = qn.prefix; var local = qn.local; if (prefix === "xmlns") { // namespace binding attribute. push the binding into scope if (local === "xml" && parser.attribValue !== XML_NAMESPACE) { strictFail( parser, "xml: prefix must be bound to " + XML_NAMESPACE + "\n" + "Actual: " + parser.attribValue ); } else if ( local === "xmlns" && parser.attribValue !== XMLNS_NAMESPACE ) { strictFail( parser, "xmlns: prefix must be bound to " + XMLNS_NAMESPACE + "\n" + "Actual: " + parser.attribValue ); } else { var tag = parser.tag; var parent = parser.tags[parser.tags.length - 1] || parser; if (tag.ns === parent.ns) { tag.ns = Object.create(parent.ns); } tag.ns[local] = parser.attribValue; } } // defer onattribute events until all attributes have been seen // so any new bindings can take effect. preserve attribute order // so deferred events can be emitted in document order parser.attribList.push([parser.attribName, parser.attribValue]); } else { // in non-xmlns mode, we can emit the event right away parser.tag.attributes[parser.attribName] = parser.attribValue; emitNode(parser, "onattribute", { name: parser.attribName, value: parser.attribValue, }); } parser.attribName = parser.attribValue = ""; } function openTag(parser, selfClosing) { if (parser.opt.xmlns) { // emit namespace binding events var tag = parser.tag; // add namespace info to tag var qn = qname(parser.tagName); tag.prefix = qn.prefix; tag.local = qn.local; tag.uri = tag.ns[qn.prefix] || ""; if (tag.prefix && !tag.uri) { strictFail( parser, "Unbound namespace prefix: " + JSON.stringify(parser.tagName) ); tag.uri = qn.prefix; } var parent = parser.tags[parser.tags.length - 1] || parser; if (tag.ns && parent.ns !== tag.ns) { Object.keys(tag.ns).forEach(function (p) { emitNode(parser, "onopennamespace", { prefix: p, uri: tag.ns[p], }); }); } // handle deferred onattribute events // Note: do not apply default ns to attributes: // http://www.w3.org/TR/REC-xml-names/#defaulting for (var i = 0, l = parser.attribList.length; i < l; i++) { var nv = parser.attribList[i]; var name = nv[0]; var value = nv[1]; var qualName = qname(name, true); var prefix = qualName.prefix; var local = qualName.local; var uri = prefix === "" ? "" : tag.ns[prefix] || ""; var a = { name: name, value: value, prefix: prefix, local: local, uri: uri, }; // if there's any attributes with an undefined namespace, // then fail on them now. if (prefix && prefix !== "xmlns" && !uri) { strictFail( parser, "Unbound namespace prefix: " + JSON.stringify(prefix) ); a.uri = prefix; } parser.tag.attributes[name] = a; emitNode(parser, "onattribute", a); } parser.attribList.length = 0; } parser.tag.isSelfClosing = !!selfClosing; // process the tag parser.sawRoot = true; parser.tags.push(parser.tag); emitNode(parser, "onopentag", parser.tag); if (!selfClosing) { // special case for <script> in non-strict mode. if (!parser.noscript && parser.tagName.toLowerCase() === "script") { parser.state = S.SCRIPT; } else { parser.state = S.TEXT; } parser.tag = null; parser.tagName = ""; } parser.attribName = parser.attribValue = ""; parser.attribList.length = 0; } function closeTag(parser) { if (!parser.tagName) { strictFail(parser, "Weird empty close tag."); parser.textNode += "</>"; parser.state = S.TEXT; return; } if (parser.script) { if (parser.tagName !== "script") { parser.script += "</" + parser.tagName + ">"; parser.tagName = ""; parser.state = S.SCRIPT; return; } emitNode(parser, "onscript", parser.script); parser.script = ""; } // first make sure that the closing tag actually exists. // <a><b></c></b></a> will close everything, otherwise. var t = parser.tags.length; var tagName = parser.tagName; if (!parser.strict) { tagName = tagName[parser.looseCase](); } var closeTo = tagName; while (t--) { var close = parser.tags[t]; if (close.name !== closeTo) { // fail the first time in strict mode strictFail(parser, "Unexpected close tag"); } else { break; } } // didn't find it. we already failed for strict, so just abort. if (t < 0) { strictFail(parser, "Unmatched closing tag: " + parser.tagName); parser.textNode += "</" + parser.tagName + ">"; parser.state = S.TEXT; return; } parser.tagName = tagName; var s = parser.tags.length; while (s-- > t) { var tag = (parser.tag = parser.tags.pop()); parser.tagName = parser.tag.name; emitNode(parser, "onclosetag", parser.tagName); var x = {}; for (var i in tag.ns) { x[i] = tag.ns[i]; } var parent = parser.tags[parser.tags.length - 1] || parser; if (parser.opt.xmlns && tag.ns !== parent.ns) { // remove namespace bindings introduced by tag Object.keys(tag.ns).forEach(function (p) { var n = tag.ns[p]; emitNode(parser, "onclosenamespace", { prefix: p, uri: n }); }); } } if (t === 0) parser.closedRoot = true; parser.tagName = parser.attribValue = parser.attribName = ""; parser.attribList.length = 0; parser.state = S.TEXT; } function parseEntity(parser) { var entity = parser.entity; var entityLC = entity.toLowerCase(); var num; var numStr = ""; if (parser.ENTITIES[entity]) { return parser.ENTITIES[entity]; } if (parser.ENTITIES[entityLC]) { return parser.ENTITIES[entityLC]; } entity = entityLC; if (entity.charAt(0) === "#") { if (entity.charAt(1) === "x") { entity = entity.slice(2); num = parseInt(entity, 16); numStr = num.toString(16); } else { entity = entity.slice(1); num = parseInt(entity, 10); numStr = num.toString(10); } } entity = entity.replace(/^0+/, ""); if (isNaN(num) || numStr.toLowerCase() !== entity) { strictFail(parser, "Invalid character entity"); return "&" + parser.entity + ";"; } return String.fromCodePoint(num); } function beginWhiteSpace(parser, c) { if (c === "<") { parser.state = S.OPEN_WAKA; parser.startTagPosition = parser.position; } else if (!isWhitespace(c)) { // have to process this as a text node. // weird, but happens. strictFail(parser, "Non-whitespace before first tag."); parser.textNode = c; parser.state = S.TEXT; } } function charAt(chunk, i) { var result = ""; if (i < chunk.length) { result = chunk.charAt(i); } return result; } function write(chunk) { var parser = this; if (this.error) { throw this.error; } if (parser.closed) { return error( parser, "Cannot write after close. Assign an onready handler." ); } if (chunk === null) { return end(parser); } if (typeof chunk === "object") { chunk = chunk.toString(); } var i = 0; var c = ""; while (true) { c = charAt(chunk, i++); parser.c = c; if (!c) { break; } if (parser.trackPosition) { parser.position++; if (c === "\n") { parser.line++; parser.column = 0; } else { parser.column++; } } switch (parser.state) { case S.BEGIN: parser.state = S.BEGIN_WHITESPACE; if (c === "\uFEFF") { continue; } beginWhiteSpace(parser, c); continue; case S.BEGIN_WHITESPACE: beginWhiteSpace(parser, c); continue; case S.TEXT: if (parser.sawRoot && !parser.closedRoot) { var starti = i - 1; while (c && c !== "<" && c !== "&") { c = charAt(chunk, i++); if (c && parser.trackPosition) { parser.position++; if (c === "\n") { parser.line++; parser.column = 0; } else { parser.column++; } } } parser.textNode += chunk.substring(starti, i - 1); } if ( c === "<" && !(parser.sawRoot && parser.closedRoot && !parser.strict) ) { parser.state = S.OPEN_WAKA; parser.startTagPosition = parser.position; } else { if (!isWhitespace(c) && (!parser.sawRoot || parser.closedRoot)) { strictFail(parser, "Text data outside of root node."); } if (c === "&") { parser.state = S.TEXT_ENTITY; } else { parser.textNode += c; } } continue; case S.SCRIPT: // only non-strict if (c === "<") { parser.state = S.SCRIPT_ENDING; } else { parser.script += c; } continue; case S.SCRIPT_ENDING: if (c === "/") { parser.state = S.CLOSE_TAG; } else { parser.script += "<" + c; parser.state = S.SCRIPT; } continue; case S.OPEN_WAKA: // either a /, ?, !, or text is coming next. if (c === "!") { parser.state = S.SGML_DECL; parser.sgmlDecl = ""; } else if (isWhitespace(c)) { // wait for it... } else if (isMatch(nameStart, c)) { parser.state = S.OPEN_TAG; parser.tagName = c; } else if (c === "/") { parser.state = S.CLOSE_TAG; parser.tagName = ""; } else if (c === "?") { parser.state = S.PROC_INST; parser.procInstName = parser.procInstBody = ""; } else { strictFail(parser, "Unencoded <"); // if there was some whitespace, then add that in. if (parser.startTagPosition + 1 < parser.position) { var pad = parser.position - parser.startTagPosition; c = new Array(pad).join(" ") + c; } parser.textNode += "<" + c; parser.state = S.TEXT; } continue; case S.SGML_DECL: if ((parser.sgmlDecl + c).toUpperCase() === CDATA) { emitNode(parser, "onopencdata"); parser.state = S.CDATA; parser.sgmlDecl = ""; parser.cdata = ""; } else if (parser.sgmlDecl + c === "--") { parser.state = S.COMMENT; parser.comment = ""; parser.sgmlDecl = ""; } else if ((parser.sgmlDecl + c).toUpperCase() === DOCTYPE) { parser.state = S.DOCTYPE; if (parser.doctype || parser.sawRoot) { strictFail(parser, "Inappropriately located doctype declaration"); } parser.doctype = ""; parser.sgmlDecl = ""; } else if (c === ">") { emitNode(parser, "onsgmldeclaration", parser.sgmlDecl); parser.sgmlDecl = ""; parser.state = S.TEXT; } else if (isQuote(c)) { parser.state = S.SGML_DECL_QUOTED; parser.sgmlDecl += c; } else { parser.sgmlDecl += c; } continue; case S.SGML_DECL_QUOTED: if (c === parser.q) { parser.state = S.SGML_DECL; parser.q = ""; } parser.sgmlDecl += c; continue; case S.DOCTYPE: if (c === ">") { parser.state = S.TEXT; emitNode(parser, "ondoctype", parser.doctype); parser.doctype = true; // just remember that we saw it. } else { parser.doctype += c; if (c === "[") { parser.state = S.DOCTYPE_DTD; } else if (isQuote(c)) { parser.state = S.DOCTYPE_QUOTED; parser.q = c; } } continue; case S.DOCTYPE_QUOTED: parser.doctype += c; if (c === parser.q) { parser.q = ""; parser.state = S.DOCTYPE; } continue; case S.DOCTYPE_DTD: parser.doctype += c; if (c === "]") { parser.state = S.DOCTYPE; } else if (isQuote(c)) { parser.state = S.DOCTYPE_DTD_QUOTED; parser.q = c; } continue; case S.DOCTYPE_DTD_QUOTED: parser.doctype += c; if (c === parser.q) { parser.state = S.DOCTYPE_DTD; parser.q = ""; } continue; case S.COMMENT: if (c === "-") { parser.state = S.COMMENT_ENDING; } else { parser.comment += c; } continue; case S.COMMENT_ENDING: if (c === "-") { parser.state = S.COMMENT_ENDED; parser.comment = textopts(parser.opt, parser.comment); if (parser.comment) { emitNode(parser, "oncomment", parser.comment); } parser.comment = ""; } else { parser.comment += "-" + c; parser.state = S.COMMENT; } continue; case S.COMMENT_ENDED: if (c !== ">") { strictFail(parser, "Malformed comment"); // allow <!-- blah -- bloo --> in non-strict mode, // which is a comment of " blah -- bloo " parser.comment += "--" + c; parser.state = S.COMMENT; } else { parser.state = S.TEXT; } continue; case S.CDATA: if (c === "]") { parser.state = S.CDATA_ENDING; } else { parser.cdata += c; } continue; case S.CDATA_ENDING: if (c === "]") { parser.state = S.CDATA_ENDING_2; } else { parser.cdata += "]" + c; parser.state = S.CDATA; } continue; case S.CDATA_ENDING_2: if (c === ">") { if (parser.cdata) { emitNode(parser, "oncdata", parser.cdata); } emitNode(parser, "onclosecdata"); parser.cdata = ""; parser.state = S.TEXT; } else if (c === "]") { parser.cdata += "]"; } else { parser.cdata += "]]" + c; parser.state = S.CDATA; } continue; case S.PROC_INST: if (c === "?") { parser.state = S.PROC_INST_ENDING; } else if (isWhitespace(c)) { parser.state = S.PROC_INST_BODY; } else { parser.procInstName += c; } continue; case S.PROC_INST_BODY: if (!parser.procInstBody && isWhitespace(c)) { continue; } else if (c === "?") { parser.state = S.PROC_INST_ENDING; } else { parser.procInstBody += c; } continue; case S.PROC_INST_ENDING: if (c === ">") { emitNode(parser, "onprocessinginstruction", { name: parser.procInstName, body: parser.procInstBody, }); parser.procInstName = parser.procInstBody = ""; parser.state = S.TEXT; } else { parser.procInstBody += "?" + c; parser.state = S.PROC_INST_BODY; } continue; case S.OPEN_TAG: if (isMatch(nameBody, c)) { parser.tagName += c; } else { newTag(parser); if (c === ">") { openTag(parser); } else if (c === "/") { parser.state = S.OPEN_TAG_SLASH; } else { if (!isWhitespace(c)) { strictFail(parser, "Invalid character in tag name"); } parser.state = S.ATTRIB; } } continue; case S.OPEN_TAG_SLASH: if (c === ">") { openTag(parser, true); closeTag(parser); } else { strictFail( parser, "Forward-slash in opening tag not followed by >" ); parser.state = S.ATTRIB; } continue; case S.ATTRIB: // haven't read the attribute name yet. if (isWhitespace(c)) { continue; } else if (c === ">") { openTag(parser); } else if (c === "/") { parser.state = S.OPEN_TAG_SLASH; } else if (isMatch(nameStart, c)) { parser.attribName = c; parser.attribValue = ""; parser.state = S.ATTRIB_NAME; } else { strictFail(parser, "Invalid attribute name"); } continue; case S.ATTRIB_NAME: if (c === "=") { parser.state = S.ATTRIB_VALUE; } else if (c === ">") { strictFail(parser, "Attribute without value"); parser.attribValue = parser.attribName; attrib(parser); openTag(parser); } else if (isWhitespace(c)) { parser.state = S.ATTRIB_NAME_SAW_WHITE; } else if (isMatch(nameBody, c)) { parser.attribName += c; } else { strictFail(parser, "Invalid attribute name"); } continue; case S.ATTRIB_NAME_SAW_WHITE: if (c === "=") { parser.state = S.ATTRIB_VALUE; } else if (isWhitespace(c)) { continue; } else { strictFail(parser, "Attribute without value"); parser.tag.attributes[parser.attribName] = ""; parser.attribValue = ""; emitNode(parser, "onattribute", { name: parser.attribName, value: "", }); parser.attribName = ""; if (c === ">") { openTag(parser); } else if (isMatch(nameStart, c)) { parser.attribName = c; parser.state = S.ATTRIB_NAME; } else { strictFail(parser, "Invalid attribute name"); parser.state = S.ATTRIB; } } continue; case S.ATTRIB_VALUE: if (isWhitespace(c)) { continue; } else if (isQuote(c)) { parser.q = c; parser.state = S.ATTRIB_VALUE_QUOTED; } else { strictFail(parser, "Unquoted attribute value"); parser.state = S.ATTRIB_VALUE_UNQUOTED; parser.attribValue = c; } continue; case S.ATTRIB_VALUE_QUOTED: if (c !== parser.q) { if (c === "&") { parser.state = S.ATTRIB_VALUE_ENTITY_Q; } else { parser.attribValue += c; } continue; } attrib(parser); parser.q = ""; parser.state = S.ATTRIB_VALUE_CLOSED; continue; case S.ATTRIB_VALUE_CLOSED: if (isWhitespace(c)) { parser.state = S.ATTRIB; } else if (c === ">") { openTag(parser); } else if (c === "/") { parser.state = S.OPEN_TAG_SLASH; } else if (isMatch(nameStart, c)) { strictFail(parser, "No whitespace between attributes"); parser.attribName = c; parser.attribValue = ""; parser.state = S.ATTRIB_NAME; } else { strictFail(parser, "Invalid attribute name"); } continue; case S.ATTRIB_VALUE_UNQUOTED: if (!isAttribEnd(c)) { if (c === "&") { parser.state = S.ATTRIB_VALUE_ENTITY_U; } else { parser.attribValue += c; } continue; } attrib(parser); if (c === ">") { openTag(parser); } else { parser.state = S.ATTRIB; } continue; case S.CLOSE_TAG: if (!parser.tagName) { if (isWhitespace(c)) { continue; } else if (notMatch(nameStart, c)) { if (parser.script) { parser.script += "</" + c; parser.state = S.SCRIPT; } else { strictFail(parser, "Invalid tagname in closing tag."); } } else { parser.tagName = c; } } else if (c === ">") { closeTag(parser); } else if (isMatch(nameBody, c)) { parser.tagName += c; } else if (parser.script) { parser.script += "</" + parser.tagName; parser.tagName = ""; parser.state = S.SCRIPT; } else { if (!isWhitespace(c)) { strictFail(parser, "Invalid tagname in closing tag"); } parser.state = S.CLOSE_TAG_SAW_WHITE; } continue; case S.CLOSE_TAG_SAW_WHITE: if (isWhitespace(c)) { continue; } if (c === ">") { closeTag(parser); } else { strictFail(parser, "Invalid characters in closing tag"); } continue; case S.TEXT_ENTITY: case S.ATTRIB_VALUE_ENTITY_Q: case S.ATTRIB_VALUE_ENTITY_U: var returnState; var buffer; switch (parser.state) { case S.TEXT_ENTITY: returnState = S.TEXT; buffer = "textNode"; break; case S.ATTRIB_VALUE_ENTITY_Q: returnState = S.ATTRIB_VALUE_QUOTED; buffer = "attribValue"; break; case S.ATTRIB_VALUE_ENTITY_U: returnState = S.ATTRIB_VALUE_UNQUOTED; buffer = "attribValue"; break; } if (c === ";") { if (parser.opt.unparsedEntities) { var parsedEntity = parseEntity(parser); parser.entity = ""; parser.state = returnState; parser.write(parsedEntity); } else { parser[buffer] += parseEntity(parser); parser.entity = ""; parser.state = returnState; } } else if ( isMatch(parser.entity.length ? entityBody : entityStart, c) ) { parser.entity += c; } else { strictFail(parser, "Invalid character in entity name"); parser[buffer] += "&" + parser.entity + c; parser.entity = ""; parser.state = returnState; } continue; default: /* istanbul ignore next */ { throw new Error(parser, "Unknown state: " + parser.state); } } } // while if (parser.position >= parser.bufferCheckPosition) { checkBufferLength(parser); } return parser; } /*! http://mths.be/fromcodepoint v0.1.0 by @mathias */ /* istanbul ignore next */ if (!String.fromCodePoint) { (function () { var stringFromCharCode = String.fromCharCode; var floor = Math.floor; var fromCodePoint = function () { var MAX_SIZE = 0x4000; var codeUnits = []; var highSurrogate; var lowSurrogate; var index = -1; var length = arguments.length; if (!length) { return ""; } var result = ""; while (++index < length) { var codePoint = Number(arguments[index]); if ( !isFinite(codePoint) || // `NaN`, `+Infinity`, or `-Infinity` codePoint < 0 || // not a valid Unicode code point codePoint > 0x10ffff || // not a valid Unicode code point floor(codePoint) !== codePoint // not an integer ) { throw RangeError("Invalid code point: " + codePoint); } if (codePoint <= 0xffff) { // BMP code point codeUnits.push(codePoint); } else { // Astral code point; split in surrogate halves // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae codePoint -= 0x10000; highSurrogate = (codePoint >> 10) + 0xd800; lowSurrogate = (codePoint % 0x400) + 0xdc00; codeUnits.push(highSurrogate, lowSurrogate); } if (index + 1 === length || codeUnits.length > MAX_SIZE) { result += stringFromCharCode.apply(null, codeUnits); codeUnits.length = 0; } } return result; }; /* istanbul ignore next */ if (Object.defineProperty) { Object.defineProperty(String, "fromCodePoint", { value: fromCodePoint, configurable: true, writable: true, }); } else { String.fromCodePoint = fromCodePoint; } })(); } return sax; }; const sax = /** #__PURE__ */ initializeSax(); export { sax };
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/tests/enviroment.test.ts
import { test, expect } from "@jest/globals"; import { getRuntimeEnvironment } from "../env.js"; test("test getRuntimeEnvironment", async () => { const runtimeEnvironment = await getRuntimeEnvironment(); console.log(runtimeEnvironment); expect(runtimeEnvironment.runtime).toEqual("node"); });
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/tests/polyfill_stream.test.ts
import "web-streams-polyfill/polyfill"; import { test, expect } from "@jest/globals"; import { FakeStreamingLLM } from "../testing/index.js"; import { StringOutputParser } from "../../output_parsers/string.js"; test("Stream the entire way through", async () => { const llm = new FakeStreamingLLM({}); const stream = await llm.pipe(new StringOutputParser()).stream("Hi there!"); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); console.log(chunk); } expect(chunks.length).toEqual("Hi there!".length); expect(chunks.join("")).toEqual("Hi there!"); });
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/tests/math_utils.test.ts
import { test, expect } from "@jest/globals"; import { Matrix } from "ml-matrix"; import { cosineSimilarity, euclideanDistance, innerProduct, maximalMarginalRelevance, normalize, } from "../math.js"; test("Test cosine similarity zero", async () => { const X = Matrix.rand(3, 3).to2DArray(); const Y = Matrix.zeros(3, 3).to2DArray(); const expected = Matrix.zeros(3, 3).to2DArray(); const actual = cosineSimilarity(X, Y); expect(actual).toEqual(expected); }); test("Test cosine similarity identity", async () => { const X = Matrix.rand(4, 4).to2DArray(); const actual = cosineSimilarity(X, X); // Diagonal is expected to be [1, 1, 1, 1] for (let i = 0; i < 4; i += 1) { expect(actual[i][i]).toBeCloseTo(1); } }); test("Test cosine similarity", async () => { const X = [ [1.0, 2.0, 3.0], [0.0, 1.0, 0.0], [1.0, 2.0, 0.0], ]; const Y = [ [0.5, 1.0, 1.5], [1.0, 0.0, 0.0], [2.0, 5.0, 2.0], [0.0, 0.0, 0.0], ]; const expected = [ [1, 0.2672612419124244, 0.8374357893586237, 0], [0.5345224838248488, 0, 0.8703882797784892, 0], [0.5976143046671968, 0.4472135954999579, 0.9341987329938275, 0], ]; const actual = cosineSimilarity(X, Y); expect(actual).toEqual(expected); }); test("Test cosine similarity empty", async () => { const X = [[]]; const Y = Matrix.rand(3, 3).to2DArray(); expect(cosineSimilarity(X, X)).toEqual([[]]); expect(cosineSimilarity(X, Y)).toEqual([[]]); }); test("Test cosine similarity wrong shape", async () => { const X = Matrix.rand(2, 2).to2DArray(); const Y = Matrix.rand(2, 4).to2DArray(); expect(() => cosineSimilarity(X, Y)).toThrowError(); }); test("Test cosine similarity different shape", async () => { const X = Matrix.rand(2, 2).to2DArray(); const Y = Matrix.rand(4, 2).to2DArray(); expect(() => cosineSimilarity(X, Y)).not.toThrowError(); }); test("Test maximal marginal relevance lambda zero", async () => { const queryEmbedding = Matrix.rand(5, 1).to1DArray(); const zeros = Matrix.zeros(5, 1).to1DArray(); const embeddingList = [queryEmbedding, queryEmbedding, zeros]; const expected = [0, 2]; const actual = maximalMarginalRelevance(queryEmbedding, embeddingList, 0, 2); expect(actual).toEqual(expected); }); test("Test maximal marginal relevance lambda one", async () => { const queryEmbedding = Matrix.rand(5, 1).to1DArray(); const zeros = Matrix.zeros(5, 1).to1DArray(); const embeddingList = [queryEmbedding, queryEmbedding, zeros]; const expected = [0, 1]; const actual = maximalMarginalRelevance(queryEmbedding, embeddingList, 1, 2); expect(actual).toEqual(expected); }); test("Test maximal marginal relevance", async () => { // Vectors that are 30, 45 and 75 degrees from query vector (cosine similarity of // 0.87, 0.71, 0.26) and the latter two are 15 and 60 degree from the first // (cosine similarity 0.97 and 0.71). So for 3rd vector be chosen, must be case that // 0.71lambda - 0.97(1 - lambda) < 0.26lambda - 0.71(1-lambda) -> lambda ~< .26 / .71 const queryEmbedding = [1, 0]; const embeddingList = [ [3 ** 0.5, 1], [1, 1], [1, 2 + 3 ** 0.5], ]; let expected = [0, 2]; let actual = maximalMarginalRelevance( queryEmbedding, embeddingList, 25 / 71, 2 ); expect(actual).toEqual(expected); expected = [0, 1]; actual = maximalMarginalRelevance(queryEmbedding, embeddingList, 27 / 71, 2); expect(actual).toEqual(expected); }); test("Test maximal marginal relevance query dim", async () => { const randomVector = Matrix.rand(5, 1); const queryEmbedding = randomVector.to1DArray(); const queryEmbedding2D = randomVector.transpose().to2DArray(); const embeddingList = Matrix.rand(4, 5).to2DArray(); const first = maximalMarginalRelevance(queryEmbedding, embeddingList, 1, 2); const second = maximalMarginalRelevance( queryEmbedding2D, embeddingList, 1, 2 ); expect(first).toEqual(second); }); test("Test maximal marginal relevance has no duplicates", async () => { const queryEmbedding = Matrix.rand(1, 1536).to1DArray(); const embeddingList = Matrix.rand(200, 1536).to2DArray(); const actual = maximalMarginalRelevance( queryEmbedding, embeddingList, 0.5, 200 ); const expected = new Set(actual).size; expect(actual).toHaveLength(expected); }); test("Test normalize", async () => { const input = [ [1, 2], [3, 4], ]; const expected = [ [0.25, 0.5], [0.75, 1], ]; const actual = normalize(input); expect(actual).toEqual(expected); }); test("Test innerProduct", async () => { const x = [ [1, 2], [5, 6], ]; const y = [ [3, 4], [7, 8], ]; const expected = [ [11, 23], [39, 83], ]; const actual = innerProduct(x, y); expect(actual).toEqual(expected); }); test("Test distance", async () => { const x = [[1, 2]]; const y = [[2, 4]]; const expected = [[2.23606797749979]]; const actual = euclideanDistance(x, y); expect(actual[0][0]).toBeCloseTo(expected[0][0]); });
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/tests/async_caller.test.ts
import { test, expect, jest } from "@jest/globals"; import { AsyncCaller } from "../async_caller.js"; test("AsyncCaller passes on arguments and returns return value", async () => { const caller = new AsyncCaller({}); const callable = jest.fn((arg1, arg2) => Promise.resolve([arg2, arg1])); const resultDirect = await callable(1, 2); const resultWrapped = await caller.call(callable, 1, 2); expect(resultDirect).toEqual([2, 1]); expect(resultWrapped).toEqual([2, 1]); }); test("AsyncCaller retries on failure", async () => { const caller = new AsyncCaller({}); // A direct call throws an error. let callable = jest .fn<() => Promise<number[]>>() .mockRejectedValueOnce("error") .mockResolvedValueOnce([2, 1]); await expect(() => callable()).rejects.toEqual("error"); // A wrapped call retries and succeeds. callable = jest .fn<() => Promise<number[]>>() .mockRejectedValueOnce("error") .mockResolvedValueOnce([2, 1]); const resultWrapped = await caller.call(callable); expect(resultWrapped).toEqual([2, 1]); expect(callable.mock.calls).toHaveLength(2); });
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/tests/function_calling.test.ts
import { z } from "zod"; import { test, expect } from "@jest/globals"; import { convertToOpenAIFunction, convertToOpenAITool, } from "../function_calling.js"; import { FakeTool } from "../testing/index.js"; test("Can convert tool to OpenAI Functions format", async () => { const tool = new FakeTool({ name: "faketesttool", description: "A fake test tool", schema: z.object({ prop1: z.string(), prop2: z.number().describe("Some desc"), optionalProp: z.optional( z.array( z.object({ nestedRequired: z.string(), nestedOptional: z.optional(z.string()), }) ) ), }), }); const result = convertToOpenAIFunction(tool); expect(result).toEqual({ name: "faketesttool", description: "A fake test tool", parameters: { type: "object", properties: { prop1: { type: "string", }, prop2: { type: "number", description: "Some desc", }, optionalProp: { type: "array", items: { type: "object", properties: { nestedRequired: { type: "string", }, nestedOptional: { type: "string", }, }, required: ["nestedRequired"], additionalProperties: false, }, }, }, required: ["prop1", "prop2"], additionalProperties: false, $schema: "http://json-schema.org/draft-07/schema#", }, }); }); test("Can convert tool to OpenAI Tool format", async () => { const tool = new FakeTool({ name: "faketesttool", description: "A fake test tool", schema: z.object({ prop1: z.string(), prop2: z.number().describe("Some desc"), optionalProp: z.optional( z.array( z.object({ nestedRequired: z.string(), nestedOptional: z.optional(z.string()), }) ) ), }), }); const result = convertToOpenAITool(tool); expect(result).toEqual({ type: "function", function: { name: "faketesttool", description: "A fake test tool", parameters: { type: "object", properties: { prop1: { type: "string", }, prop2: { type: "number", description: "Some desc", }, optionalProp: { type: "array", items: { type: "object", properties: { nestedRequired: { type: "string", }, nestedOptional: { type: "string", }, }, required: ["nestedRequired"], additionalProperties: false, }, }, }, required: ["prop1", "prop2"], additionalProperties: false, $schema: "http://json-schema.org/draft-07/schema#", }, }, }); });
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/testing/index.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable @typescript-eslint/no-explicit-any */ /* eslint-disable @typescript-eslint/no-unused-vars */ import { z } from "zod"; import { BaseCallbackConfig, CallbackManagerForLLMRun, CallbackManagerForToolRun, } from "../../callbacks/manager.js"; import { BaseChatMessageHistory, BaseListChatMessageHistory, } from "../../chat_history.js"; import { Document } from "../../documents/document.js"; import { BaseChatModel, BaseChatModelCallOptions, BaseChatModelParams, } from "../../language_models/chat_models.js"; import { BaseLLMParams, LLM } from "../../language_models/llms.js"; import { BaseMessage, AIMessage, AIMessageChunk, HumanMessage, } from "../../messages/index.js"; import { BaseOutputParser } from "../../output_parsers/base.js"; import { GenerationChunk, type ChatResult, ChatGenerationChunk, } from "../../outputs.js"; import { BaseRetriever } from "../../retrievers/index.js"; import { Runnable, RunnableLambda } from "../../runnables/base.js"; import { StructuredTool, ToolParams } from "../../tools/index.js"; import { BaseTracer, Run } from "../../tracers/base.js"; import { Embeddings, EmbeddingsInterface, EmbeddingsParams, } from "../../embeddings.js"; import { StructuredOutputMethodParams, BaseLanguageModelInput, StructuredOutputMethodOptions, } from "../../language_models/base.js"; import { VectorStore } from "../../vectorstores.js"; import { cosine } from "../ml-distance/similarities.js"; /** * Parser for comma-separated values. It splits the input text by commas * and trims the resulting values. */ export class FakeSplitIntoListParser extends BaseOutputParser<string[]> { lc_namespace = ["tests", "fake"]; getFormatInstructions() { return ""; } async parse(text: string): Promise<string[]> { return text.split(",").map((value) => value.trim()); } } export class FakeRunnable extends Runnable<string, Record<string, any>> { lc_namespace = ["tests", "fake"]; returnOptions?: boolean; constructor(fields: { returnOptions?: boolean }) { super(fields); this.returnOptions = fields.returnOptions; } async invoke( input: string, options?: Partial<BaseCallbackConfig> ): Promise<Record<string, any>> { if (this.returnOptions) { return options ?? {}; } return { input }; } } export class FakeLLM extends LLM { response?: string; thrownErrorString?: string; constructor( fields: { response?: string; thrownErrorString?: string } & BaseLLMParams ) { super(fields); this.response = fields.response; this.thrownErrorString = fields.thrownErrorString; } _llmType() { return "fake"; } async _call( prompt: string, _options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<string> { if (this.thrownErrorString) { throw new Error(this.thrownErrorString); } const response = this.response ?? prompt; await runManager?.handleLLMNewToken(response); return response; } } export class FakeStreamingLLM extends LLM { sleep?: number = 50; responses?: string[]; thrownErrorString?: string; constructor( fields: { sleep?: number; responses?: string[]; thrownErrorString?: string; } & BaseLLMParams ) { super(fields); this.sleep = fields.sleep ?? this.sleep; this.responses = fields.responses; this.thrownErrorString = fields.thrownErrorString; } _llmType() { return "fake"; } async _call(prompt: string): Promise<string> { if (this.thrownErrorString) { throw new Error(this.thrownErrorString); } const response = this.responses?.[0]; this.responses = this.responses?.slice(1); return response ?? prompt; } async *_streamResponseChunks( input: string, _options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ) { if (this.thrownErrorString) { throw new Error(this.thrownErrorString); } const response = this.responses?.[0]; this.responses = this.responses?.slice(1); for (const c of response ?? input) { await new Promise((resolve) => setTimeout(resolve, this.sleep)); yield { text: c, generationInfo: {} } as GenerationChunk; await runManager?.handleLLMNewToken(c); } } } export class FakeChatModel extends BaseChatModel { _combineLLMOutput() { return []; } _llmType(): string { return "fake"; } async _generate( messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<ChatResult> { if (options?.stop?.length) { return { generations: [ { message: new AIMessage(options.stop[0]), text: options.stop[0], }, ], }; } const text = messages .map((m) => { if (typeof m.content === "string") { return m.content; } return JSON.stringify(m.content, null, 2); }) .join("\n"); await runManager?.handleLLMNewToken(text); return { generations: [ { message: new AIMessage(text), text, }, ], llmOutput: {}, }; } } export class FakeStreamingChatModel extends BaseChatModel { sleep?: number = 50; responses?: BaseMessage[]; thrownErrorString?: string; constructor( fields: { sleep?: number; responses?: BaseMessage[]; thrownErrorString?: string; } & BaseLLMParams ) { super(fields); this.sleep = fields.sleep ?? this.sleep; this.responses = fields.responses; this.thrownErrorString = fields.thrownErrorString; } _llmType() { return "fake"; } async _generate( messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun ): Promise<ChatResult> { if (this.thrownErrorString) { throw new Error(this.thrownErrorString); } const content = this.responses?.[0].content ?? messages[0].content; const generation: ChatResult = { generations: [ { text: "", message: new AIMessage({ content, }), }, ], }; return generation; } async *_streamResponseChunks( messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun ): AsyncGenerator<ChatGenerationChunk> { if (this.thrownErrorString) { throw new Error(this.thrownErrorString); } const content = this.responses?.[0].content ?? messages[0].content; if (typeof content !== "string") { for (const _ of this.responses ?? messages) { yield new ChatGenerationChunk({ text: "", message: new AIMessageChunk({ content, }), }); } } else { for (const _ of this.responses ?? messages) { yield new ChatGenerationChunk({ text: content, message: new AIMessageChunk({ content, }), }); } } } } export class FakeRetriever extends BaseRetriever { lc_namespace = ["test", "fake"]; output = [ new Document({ pageContent: "foo" }), new Document({ pageContent: "bar" }), ]; constructor(fields?: { output: Document[] }) { super(); this.output = fields?.output ?? this.output; } async _getRelevantDocuments( _query: string // eslint-disable-next-line @typescript-eslint/no-explicit-any ): Promise<Document<Record<string, any>>[]> { return this.output; } } /** * Interface for the input parameters specific to the Fake List Chat model. */ export interface FakeChatInput extends BaseChatModelParams { /** Responses to return */ responses: string[]; /** Time to sleep in milliseconds between responses */ sleep?: number; emitCustomEvent?: boolean; } export interface FakeListChatModelCallOptions extends BaseChatModelCallOptions { thrownErrorString?: string; } /** * A fake Chat Model that returns a predefined list of responses. It can be used * for testing purposes. * @example * ```typescript * const chat = new FakeListChatModel({ * responses: ["I'll callback later.", "You 'console' them!"] * }); * * const firstMessage = new HumanMessage("You want to hear a JavaScript joke?"); * const secondMessage = new HumanMessage("How do you cheer up a JavaScript developer?"); * * // Call the chat model with a message and log the response * const firstResponse = await chat.call([firstMessage]); * console.log({ firstResponse }); * * const secondResponse = await chat.call([secondMessage]); * console.log({ secondResponse }); * ``` */ export class FakeListChatModel extends BaseChatModel<FakeListChatModelCallOptions> { static lc_name() { return "FakeListChatModel"; } lc_serializable = true; responses: string[]; i = 0; sleep?: number; emitCustomEvent = false; constructor(params: FakeChatInput) { super(params); const { responses, sleep, emitCustomEvent } = params; this.responses = responses; this.sleep = sleep; this.emitCustomEvent = emitCustomEvent ?? this.emitCustomEvent; } _combineLLMOutput() { return []; } _llmType(): string { return "fake-list"; } async _generate( _messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<ChatResult> { await this._sleepIfRequested(); if (options?.thrownErrorString) { throw new Error(options.thrownErrorString); } if (this.emitCustomEvent) { await runManager?.handleCustomEvent("some_test_event", { someval: true, }); } if (options?.stop?.length) { return { generations: [this._formatGeneration(options.stop[0])], }; } else { const response = this._currentResponse(); this._incrementResponse(); return { generations: [this._formatGeneration(response)], llmOutput: {}, }; } } _formatGeneration(text: string) { return { message: new AIMessage(text), text, }; } async *_streamResponseChunks( _messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): AsyncGenerator<ChatGenerationChunk> { const response = this._currentResponse(); this._incrementResponse(); if (this.emitCustomEvent) { await runManager?.handleCustomEvent("some_test_event", { someval: true, }); } for await (const text of response) { await this._sleepIfRequested(); if (options?.thrownErrorString) { throw new Error(options.thrownErrorString); } const chunk = this._createResponseChunk(text); yield chunk; void runManager?.handleLLMNewToken(text); } } async _sleepIfRequested() { if (this.sleep !== undefined) { await this._sleep(); } } async _sleep() { return new Promise<void>((resolve) => { setTimeout(() => resolve(), this.sleep); }); } _createResponseChunk(text: string): ChatGenerationChunk { return new ChatGenerationChunk({ message: new AIMessageChunk({ content: text }), text, }); } _currentResponse() { return this.responses[this.i]; } _incrementResponse() { if (this.i < this.responses.length - 1) { this.i += 1; } else { this.i = 0; } } withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( _params: | StructuredOutputMethodParams<RunOutput, false> | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: StructuredOutputMethodOptions<false> ): Runnable<BaseLanguageModelInput, RunOutput>; withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( _params: | StructuredOutputMethodParams<RunOutput, true> | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: StructuredOutputMethodOptions<true> ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>; withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( _params: | StructuredOutputMethodParams<RunOutput, boolean> | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, _config?: StructuredOutputMethodOptions<boolean> ): | Runnable<BaseLanguageModelInput, RunOutput> | Runnable< BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput } > { return RunnableLambda.from(async (input) => { const message = await this.invoke(input); return JSON.parse(message.content as string); }) as Runnable; } } export class FakeChatMessageHistory extends BaseChatMessageHistory { lc_namespace = ["langchain_core", "message", "fake"]; messages: Array<BaseMessage> = []; constructor() { super(); } async getMessages(): Promise<BaseMessage[]> { return this.messages; } async addMessage(message: BaseMessage): Promise<void> { this.messages.push(message); } async addUserMessage(message: string): Promise<void> { this.messages.push(new HumanMessage(message)); } async addAIChatMessage(message: string): Promise<void> { this.messages.push(new AIMessage(message)); } async clear(): Promise<void> { this.messages = []; } } export class FakeListChatMessageHistory extends BaseListChatMessageHistory { lc_namespace = ["langchain_core", "message", "fake"]; messages: Array<BaseMessage> = []; constructor() { super(); } async addMessage(message: BaseMessage): Promise<void> { this.messages.push(message); } async getMessages(): Promise<BaseMessage[]> { return this.messages; } } export class FakeTracer extends BaseTracer { name = "fake_tracer"; runs: Run[] = []; constructor() { super(); } protected persistRun(run: Run): Promise<void> { this.runs.push(run); return Promise.resolve(); } } export interface FakeToolParams< // eslint-disable-next-line @typescript-eslint/no-explicit-any T extends z.ZodObject<any, any, any, any> = z.ZodObject<any, any, any, any> > extends ToolParams { name: string; description: string; schema: T; } export class FakeTool< // eslint-disable-next-line @typescript-eslint/no-explicit-any T extends z.ZodObject<any, any, any, any> = z.ZodObject<any, any, any, any> > extends StructuredTool<T> { name: string; description: string; schema: T; constructor(fields: FakeToolParams<T>) { super(fields); this.name = fields.name; this.description = fields.description; this.schema = fields.schema; } protected async _call( arg: z.output<T>, _runManager?: CallbackManagerForToolRun ): Promise<string> { return JSON.stringify(arg); } } /** * A class that provides fake embeddings by overriding the embedDocuments * and embedQuery methods to return fixed values. */ export class FakeEmbeddings extends Embeddings { constructor(params?: EmbeddingsParams) { super(params ?? {}); } /** * Generates fixed embeddings for a list of documents. * @param documents List of documents to generate embeddings for. * @returns A promise that resolves with a list of fixed embeddings for each document. */ embedDocuments(documents: string[]): Promise<number[][]> { return Promise.resolve(documents.map(() => [0.1, 0.2, 0.3, 0.4])); } /** * Generates a fixed embedding for a query. * @param _ The query to generate an embedding for. * @returns A promise that resolves with a fixed embedding for the query. */ embedQuery(_: string): Promise<number[]> { return Promise.resolve([0.1, 0.2, 0.3, 0.4]); } } /** * An interface that defines additional parameters specific to the * SyntheticEmbeddings class. */ interface SyntheticEmbeddingsParams extends EmbeddingsParams { vectorSize: number; } /** * A class that provides synthetic embeddings by overriding the * embedDocuments and embedQuery methods to generate embeddings based on * the input documents. The embeddings are generated by converting each * document into chunks, calculating a numerical value for each chunk, and * returning an array of these values as the embedding. */ export class SyntheticEmbeddings extends Embeddings implements SyntheticEmbeddingsParams { vectorSize: number; constructor(params?: SyntheticEmbeddingsParams) { super(params ?? {}); this.vectorSize = params?.vectorSize ?? 4; } /** * Generates synthetic embeddings for a list of documents. * @param documents List of documents to generate embeddings for. * @returns A promise that resolves with a list of synthetic embeddings for each document. */ async embedDocuments(documents: string[]): Promise<number[][]> { return Promise.all(documents.map((doc) => this.embedQuery(doc))); } /** * Generates a synthetic embedding for a document. The document is * converted into chunks, a numerical value is calculated for each chunk, * and an array of these values is returned as the embedding. * @param document The document to generate an embedding for. * @returns A promise that resolves with a synthetic embedding for the document. */ async embedQuery(document: string): Promise<number[]> { let doc = document; // Only use the letters (and space) from the document, and make them lower case doc = doc.toLowerCase().replaceAll(/[^a-z ]/g, ""); // Pad the document to make sure it has a divisible number of chunks const padMod = doc.length % this.vectorSize; const padGapSize = padMod === 0 ? 0 : this.vectorSize - padMod; const padSize = doc.length + padGapSize; doc = doc.padEnd(padSize, " "); // Break it into chunks const chunkSize = doc.length / this.vectorSize; const docChunk = []; for (let co = 0; co < doc.length; co += chunkSize) { docChunk.push(doc.slice(co, co + chunkSize)); } // Turn each chunk into a number const ret: number[] = docChunk.map((s) => { let sum = 0; // Get a total value by adding the value of each character in the string for (let co = 0; co < s.length; co += 1) { sum += s === " " ? 0 : s.charCodeAt(co); } // Reduce this to a number between 0 and 25 inclusive // Then get the fractional number by dividing it by 26 const ret = (sum % 26) / 26; return ret; }); return ret; } } export class SingleRunExtractor extends BaseTracer { runPromiseResolver: (run: Run) => void; runPromise: Promise<Run>; /** The name of the callback handler. */ name = "single_run_extractor"; constructor() { super(); this.runPromise = new Promise<Run>((extract) => { this.runPromiseResolver = extract; }); } async persistRun(run: Run) { this.runPromiseResolver(run); } async extract(): Promise<Run> { return this.runPromise; } } /** * Interface representing a vector in memory. It includes the content * (text), the corresponding embedding (vector), and any associated * metadata. */ interface MemoryVector { content: string; embedding: number[]; // eslint-disable-next-line @typescript-eslint/no-explicit-any metadata: Record<string, any>; } /** * Interface for the arguments that can be passed to the * `FakeVectorStore` constructor. It includes an optional `similarity` * function. */ export interface FakeVectorStoreArgs { similarity?: typeof cosine; } /** * Class that extends `VectorStore` to store vectors in memory. Provides * methods for adding documents, performing similarity searches, and * creating instances from texts, documents, or an existing index. */ export class FakeVectorStore extends VectorStore { declare FilterType: (doc: Document) => boolean; memoryVectors: MemoryVector[] = []; similarity: typeof cosine; _vectorstoreType(): string { return "memory"; } constructor( embeddings: EmbeddingsInterface, { similarity, ...rest }: FakeVectorStoreArgs = {} ) { super(embeddings, rest); this.similarity = similarity ?? cosine; } /** * Method to add documents to the memory vector store. It extracts the * text from each document, generates embeddings for them, and adds the * resulting vectors to the store. * @param documents Array of `Document` instances to be added to the store. * @returns Promise that resolves when all documents have been added. */ async addDocuments(documents: Document[]): Promise<void> { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents ); } /** * Method to add vectors to the memory vector store. It creates * `MemoryVector` instances for each vector and document pair and adds * them to the store. * @param vectors Array of vectors to be added to the store. * @param documents Array of `Document` instances corresponding to the vectors. * @returns Promise that resolves when all vectors have been added. */ async addVectors(vectors: number[][], documents: Document[]): Promise<void> { const memoryVectors = vectors.map((embedding, idx) => ({ content: documents[idx].pageContent, embedding, metadata: documents[idx].metadata, })); this.memoryVectors = this.memoryVectors.concat(memoryVectors); } /** * Method to perform a similarity search in the memory vector store. It * calculates the similarity between the query vector and each vector in * the store, sorts the results by similarity, and returns the top `k` * results along with their scores. * @param query Query vector to compare against the vectors in the store. * @param k Number of top results to return. * @param filter Optional filter function to apply to the vectors before performing the search. * @returns Promise that resolves with an array of tuples, each containing a `Document` and its similarity score. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[Document, number][]> { const filterFunction = (memoryVector: MemoryVector) => { if (!filter) { return true; } const doc = new Document({ metadata: memoryVector.metadata, pageContent: memoryVector.content, }); return filter(doc); }; const filteredMemoryVectors = this.memoryVectors.filter(filterFunction); const searches = filteredMemoryVectors .map((vector, index) => ({ similarity: this.similarity(query, vector.embedding), index, })) .sort((a, b) => (a.similarity > b.similarity ? -1 : 0)) .slice(0, k); const result: [Document, number][] = searches.map((search) => [ new Document({ metadata: filteredMemoryVectors[search.index].metadata, pageContent: filteredMemoryVectors[search.index].content, }), search.similarity, ]); return result; } /** * Static method to create a `FakeVectorStore` instance from an array of * texts. It creates a `Document` for each text and metadata pair, and * adds them to the store. * @param texts Array of texts to be added to the store. * @param metadatas Array or single object of metadata corresponding to the texts. * @param embeddings `Embeddings` instance used to generate embeddings for the texts. * @param dbConfig Optional `FakeVectorStoreArgs` to configure the `FakeVectorStore` instance. * @returns Promise that resolves with a new `FakeVectorStore` instance. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig?: FakeVectorStoreArgs ): Promise<FakeVectorStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return FakeVectorStore.fromDocuments(docs, embeddings, dbConfig); } /** * Static method to create a `FakeVectorStore` instance from an array of * `Document` instances. It adds the documents to the store. * @param docs Array of `Document` instances to be added to the store. * @param embeddings `Embeddings` instance used to generate embeddings for the documents. * @param dbConfig Optional `FakeVectorStoreArgs` to configure the `FakeVectorStore` instance. * @returns Promise that resolves with a new `FakeVectorStore` instance. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig?: FakeVectorStoreArgs ): Promise<FakeVectorStore> { const instance = new this(embeddings, dbConfig); await instance.addDocuments(docs); return instance; } /** * Static method to create a `FakeVectorStore` instance from an existing * index. It creates a new `FakeVectorStore` instance without adding any * documents or vectors. * @param embeddings `Embeddings` instance used to generate embeddings for the documents. * @param dbConfig Optional `FakeVectorStoreArgs` to configure the `FakeVectorStore` instance. * @returns Promise that resolves with a new `FakeVectorStore` instance. */ static async fromExistingIndex( embeddings: EmbeddingsInterface, dbConfig?: FakeVectorStoreArgs ): Promise<FakeVectorStore> { const instance = new this(embeddings, dbConfig); return instance; } }
0
lc_public_repos/langchainjs/langchain-core/src/utils/testing
lc_public_repos/langchainjs/langchain-core/src/utils/testing/tests/chatfake.test.ts
import { describe, test, expect, jest } from "@jest/globals"; import { HumanMessage } from "../../../messages/index.js"; import { StringOutputParser } from "../../../output_parsers/string.js"; import { FakeListChatModel } from "../index.js"; describe("Test FakeListChatLLM", () => { test("Should exist", async () => { const chat = new FakeListChatModel({ responses: ["test response"] }); const message = new HumanMessage("test message"); const response = await chat.invoke([message]); expect(typeof response.content).toBe("string"); }); test("Should return responses in order", async () => { const chat = new FakeListChatModel({ responses: ["test response 1", "test response 2"], }); const message = new HumanMessage("test message"); const response1 = await chat.invoke([message]); const response2 = await chat.invoke([message]); expect(response1.content).toBe("test response 1"); expect(response2.content).toBe("test response 2"); }); test("Should reset index when all responses have been returned", async () => { const chat = new FakeListChatModel({ responses: ["test response 1", "test response 2"], }); const message = new HumanMessage("test message"); const first_response = await chat.invoke([message]); const second_response = await chat.invoke([message]); const third_response = await chat.invoke([message]); expect(first_response.content).toBe("test response 1"); expect(second_response.content).toBe("test response 2"); expect(third_response.content).toBe("test response 1"); }); test("Should return stop value as response when provided", async () => { const chat = new FakeListChatModel({ responses: ["test response 1", "test response 2"], }); const message = new HumanMessage("test message"); const response = await chat.invoke([message], { stop: ["stop"] }); expect(response.content).toBe("stop"); }); test("Should not increment index when stop value is provided", async () => { const chat = new FakeListChatModel({ responses: ["test response 1", "test response 2"], }); const message = new HumanMessage("test message"); const first_response = await chat.invoke([message], { stop: ["stop"] }); const second_response = await chat.invoke([message]); expect(first_response.content).toBe("stop"); expect(second_response.content).toBe("test response 1"); }); test("Should return responses after sleep if requested", async () => { const chat = new FakeListChatModel({ responses: ["test response 1", "test response 2"], sleep: 10, }); const sleepSpy = jest.spyOn(chat, "_sleep"); const message = new HumanMessage("test message"); await chat.invoke([message]); expect(sleepSpy).toHaveBeenCalledTimes(1); }, 30000); test("Should stream responses if requested", async () => { const chat = new FakeListChatModel({ responses: ["test response 1", "test response 2"], }); const chunks = []; const response = await chat .pipe(new StringOutputParser()) .stream("Test message"); for await (const chunk of response) { chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); expect(chunks.join("")).toBe("test response 1"); }); test("Should return responses in order when streaming", async () => { const chat = new FakeListChatModel({ responses: ["test response 1", "test response 2"], }); const chunks1 = []; const chunks2 = []; const response1 = await chat .pipe(new StringOutputParser()) .stream("Test message"); for await (const chunk of response1) { chunks1.push(chunk); } const response2 = await chat .pipe(new StringOutputParser()) .stream("Test message"); for await (const chunk of response2) { chunks2.push(chunk); } expect(chunks1.join("")).toBe("test response 1"); expect(chunks2.join("")).toBe("test response 2"); }); test("Should stream responses after sleep if requested", async () => { const chat = new FakeListChatModel({ responses: ["test response 1", "test response 2"], sleep: 10, }); const sleepSpy = jest.spyOn(chat, "_sleep"); const chunks = []; const response = await chat .pipe(new StringOutputParser()) .stream("Test message"); for await (const chunk of response) { chunks.push(chunk); } expect(sleepSpy).toHaveBeenCalledTimes(chunks.length); }, 30000); });
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/ml-distance/LICENSE
The MIT License (MIT) Copyright (c) 2014 ml.js Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/ml-distance/similarities.ts
/** * Returns the average of cosine distances between vectors a and b * @param a - first vector * @param b - second vector * */ export function cosine(a: number[], b: number[]): number { let p = 0; let p2 = 0; let q2 = 0; for (let i = 0; i < a.length; i++) { p += a[i] * b[i]; p2 += a[i] * a[i]; q2 += b[i] * b[i]; } return p / (Math.sqrt(p2) * Math.sqrt(q2)); }
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/ml-distance/distances.ts
/** *Returns the Inner Product similarity between vectors a and b * @link [Inner Product Similarity algorithm](https://www.naun.org/main/NAUN/ijmmas/mmmas-49.pdf) * @param a - first vector * @param b - second vector * */ export function innerProduct(a: number[], b: number[]): number { let ans = 0; for (let i = 0; i < a.length; i++) { ans += a[i] * b[i]; } return ans; }
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/ml-distance-euclidean/LICENSE
The MIT License (MIT) Copyright (c) 2015 ml.js Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/langchain-core/src/utils
lc_public_repos/langchainjs/langchain-core/src/utils/ml-distance-euclidean/euclidean.ts
export function squaredEuclidean(p: number[], q: number[]) { let d = 0; for (let i = 0; i < p.length; i++) { d += (p[i] - q[i]) * (p[i] - q[i]); } return d; } export function euclidean(p: number[], q: number[]) { return Math.sqrt(squaredEuclidean(p, q)); }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/singletons/tracer.ts
import { Client } from "langsmith"; import { getEnvironmentVariable } from "../utils/env.js"; let client: Client; export const getDefaultLangChainClientSingleton = () => { if (client === undefined) { const clientParams = getEnvironmentVariable("LANGCHAIN_CALLBACKS_BACKGROUND") === "false" ? { // LangSmith has its own backgrounding system blockOnRootRunFinalization: true, } : {}; client = new Client(clientParams); } return client; }; export const setDefaultLangChainClientSingleton = (newClient: Client) => { client = newClient; };
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/singletons/callbacks.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ import PQueueMod from "p-queue"; import { getGlobalAsyncLocalStorageInstance } from "./async_local_storage/globals.js"; let queue: typeof import("p-queue")["default"]["prototype"]; /** * Creates a queue using the p-queue library. The queue is configured to * auto-start and has a concurrency of 1, meaning it will process tasks * one at a time. */ function createQueue() { const PQueue: any = "default" in PQueueMod ? PQueueMod.default : PQueueMod; return new PQueue({ autoStart: true, concurrency: 1, }); } export function getQueue() { if (typeof queue === "undefined") { queue = createQueue(); } return queue; } /** * Consume a promise, either adding it to the queue or waiting for it to resolve * @param promiseFn Promise to consume * @param wait Whether to wait for the promise to resolve or resolve immediately */ export async function consumeCallback<T>( promiseFn: () => Promise<T> | T | void, wait: boolean ): Promise<void> { if (wait === true) { // Clear config since callbacks are not part of the root run // Avoid using global singleton due to circuluar dependency issues if (getGlobalAsyncLocalStorageInstance() !== undefined) { await getGlobalAsyncLocalStorageInstance().run(undefined, async () => promiseFn() ); } else { await promiseFn(); } } else { queue = getQueue(); void queue.add(async () => { if (getGlobalAsyncLocalStorageInstance() !== undefined) { await getGlobalAsyncLocalStorageInstance().run(undefined, async () => promiseFn() ); } else { await promiseFn(); } }); } } /** * Waits for all promises in the queue to resolve. If the queue is * undefined, it immediately resolves a promise. */ export function awaitAllCallbacks(): Promise<void> { return typeof queue !== "undefined" ? queue.onIdle() : Promise.resolve(); }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/singletons/index.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ import { type AsyncLocalStorageInterface, AsyncLocalStorageProviderSingleton, _CONTEXT_VARIABLES_KEY, MockAsyncLocalStorage, } from "./async_local_storage/index.js"; export { type AsyncLocalStorageInterface, AsyncLocalStorageProviderSingleton, _CONTEXT_VARIABLES_KEY, MockAsyncLocalStorage, };
0
lc_public_repos/langchainjs/langchain-core/src/singletons
lc_public_repos/langchainjs/langchain-core/src/singletons/async_local_storage/globals.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ export interface AsyncLocalStorageInterface { getStore: () => any | undefined; run: <T>(store: any, callback: () => T) => T; enterWith: (store: any) => void; } export const TRACING_ALS_KEY = Symbol.for("ls:tracing_async_local_storage"); export const setGlobalAsyncLocalStorageInstance = ( instance: AsyncLocalStorageInterface ) => { (globalThis as any)[TRACING_ALS_KEY] = instance; }; export const getGlobalAsyncLocalStorageInstance = () => { return (globalThis as any)[TRACING_ALS_KEY]; };
0
lc_public_repos/langchainjs/langchain-core/src/singletons
lc_public_repos/langchainjs/langchain-core/src/singletons/async_local_storage/index.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ import { RunTree } from "langsmith"; import { AsyncLocalStorageInterface, getGlobalAsyncLocalStorageInstance, setGlobalAsyncLocalStorageInstance, } from "./globals.js"; import { CallbackManager } from "../../callbacks/manager.js"; import { LangChainTracer } from "../../tracers/tracer_langchain.js"; export class MockAsyncLocalStorage implements AsyncLocalStorageInterface { getStore(): any { return undefined; } run<T>(_store: any, callback: () => T): T { return callback(); } enterWith(_store: any) { return undefined; } } const mockAsyncLocalStorage = new MockAsyncLocalStorage(); const LC_CHILD_KEY = Symbol.for("lc:child_config"); export const _CONTEXT_VARIABLES_KEY = Symbol.for("lc:context_variables"); class AsyncLocalStorageProvider { getInstance(): AsyncLocalStorageInterface { return getGlobalAsyncLocalStorageInstance() ?? mockAsyncLocalStorage; } getRunnableConfig() { const storage = this.getInstance(); // this has the runnable config // which means that we should also have an instance of a LangChainTracer // with the run map prepopulated return storage.getStore()?.extra?.[LC_CHILD_KEY]; } runWithConfig<T>( config: any, callback: () => T, avoidCreatingRootRunTree?: boolean ): T { const callbackManager = CallbackManager._configureSync( config?.callbacks, undefined, config?.tags, undefined, config?.metadata ); const storage = this.getInstance(); const previousValue = storage.getStore(); const parentRunId = callbackManager?.getParentRunId(); const langChainTracer = callbackManager?.handlers?.find( (handler) => handler?.name === "langchain_tracer" ) as LangChainTracer | undefined; let runTree; if (langChainTracer && parentRunId) { runTree = langChainTracer.convertToRunTree(parentRunId); } else if (!avoidCreatingRootRunTree) { runTree = new RunTree({ name: "<runnable_lambda>", tracingEnabled: false, }); } if (runTree) { runTree.extra = { ...runTree.extra, [LC_CHILD_KEY]: config }; } if ( previousValue !== undefined && previousValue[_CONTEXT_VARIABLES_KEY] !== undefined ) { (runTree as any)[_CONTEXT_VARIABLES_KEY] = previousValue[_CONTEXT_VARIABLES_KEY]; } return storage.run(runTree, callback); } initializeGlobalInstance(instance: AsyncLocalStorageInterface) { if (getGlobalAsyncLocalStorageInstance() === undefined) { setGlobalAsyncLocalStorageInstance(instance); } } } const AsyncLocalStorageProviderSingleton = new AsyncLocalStorageProvider(); export { AsyncLocalStorageProviderSingleton, type AsyncLocalStorageInterface };
0
lc_public_repos/langchainjs/langchain-core/src/singletons
lc_public_repos/langchainjs/langchain-core/src/singletons/tests/async_local_storage.test.ts
import { test, expect } from "@jest/globals"; import { v4 } from "uuid"; import { AsyncLocalStorage } from "node:async_hooks"; import { AsyncLocalStorageProviderSingleton } from "../index.js"; import { RunnableLambda } from "../../runnables/base.js"; import { FakeListChatModel } from "../../utils/testing/index.js"; import { getCallbackManagerForConfig } from "../../runnables/config.js"; import { BaseCallbackHandler } from "../../callbacks/base.js"; class FakeCallbackHandler extends BaseCallbackHandler { name = `fake-${v4()}`; } test("Config should be automatically populated after setting global async local storage", async () => { const inner = RunnableLambda.from((_, config) => config); const outer = RunnableLambda.from(async (input) => { const res = await inner.invoke(input); return res; }); const res1 = await outer.invoke( { hi: true }, { configurable: { sampleKey: "sampleValue", }, tags: ["tester"], } ); expect(res1?.tags).toEqual([]); AsyncLocalStorageProviderSingleton.initializeGlobalInstance( new AsyncLocalStorage() ); const res2 = await outer.invoke( { hi: true }, { configurable: { sampleKey: "sampleValue", }, tags: ["tester"], } ); expect(res2?.tags).toEqual(["tester"]); const stream = await outer.stream( { hi2: true }, { configurable: { sampleKey: "sampleValue", }, tags: ["stream_tester"], } ); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toEqual(1); expect(chunks[0]).toEqual( expect.objectContaining({ configurable: { sampleKey: "sampleValue", }, tags: ["stream_tester"], }) ); const outer2 = RunnableLambda.from(async () => inner); const res3 = await outer2.invoke( {}, { configurable: { sampleKey: "sampleValue", }, tags: ["test_recursive"], } ); expect(res3?.tags).toEqual(["test_recursive"]); const stream2 = await outer2.stream( {}, { configurable: { sampleKey: "sampleValue", }, tags: ["stream_test_recursive"], } ); const chunks2 = []; for await (const chunk of stream2) { chunks2.push(chunk); } expect(chunks2.length).toEqual(1); expect(chunks2[0]).toEqual( expect.objectContaining({ configurable: { sampleKey: "sampleValue", }, tags: ["stream_test_recursive"], }) ); const inner2 = RunnableLambda.from((_, config) => config).withConfig({ runName: "inner_test_run", }); const outer3 = RunnableLambda.from(async (input) => { const res = await inner2.invoke(input); return res; }); const res4 = await outer3.invoke( { hi: true }, { configurable: { sampleKey: "sampleValue", }, tags: ["tester_with_config"], } ); expect(res4?.tags).toEqual(["tester_with_config"]); const chatModel = new FakeListChatModel({ responses: ["test"] }); const outer4 = RunnableLambda.from(async () => { const res = await chatModel.invoke("hey"); return res; }); const eventStream = await outer4.streamEvents( { hi: true }, { version: "v1" } ); const events = []; for await (const event of eventStream) { events.push(event); } expect( events.filter((event) => event.event === "on_llm_start").length ).toEqual(1); }); test("Runnable streamEvents method with streaming nested in a RunnableLambda", async () => { AsyncLocalStorageProviderSingleton.initializeGlobalInstance( new AsyncLocalStorage() ); const chat = new FakeListChatModel({ responses: ["Hello"], }); const outerRunId = v4(); const innerRunId = v4(); const innerRunId2 = v4(); const dummyHandler = new FakeCallbackHandler(); const myFunc = async (input: string) => { const outerCallbackManager = await getCallbackManagerForConfig( AsyncLocalStorageProviderSingleton.getRunnableConfig() ); expect(outerCallbackManager?.getParentRunId()).toEqual(outerRunId); const nestedLambdaWithOverriddenCallbacks = RunnableLambda.from( async (_: string, config) => { expect( config?.callbacks?.handlers.filter( // eslint-disable-next-line @typescript-eslint/no-explicit-any (handler: any) => handler.name !== "langchain_tracer" ) ).toEqual([]); } ); await nestedLambdaWithOverriddenCallbacks.invoke(input, { runId: innerRunId, callbacks: [], }); const nestedLambdaWithoutOverriddenCallbacks = RunnableLambda.from( async (_: string, config) => { const innerCallbackManager = await getCallbackManagerForConfig( AsyncLocalStorageProviderSingleton.getRunnableConfig() ); expect(innerCallbackManager?.getParentRunId()).toEqual(innerRunId2); expect(config?.callbacks?.handlers).toContain(dummyHandler); } ); await nestedLambdaWithoutOverriddenCallbacks.invoke(input, { runId: innerRunId2, }); for await (const _ of await chat.stream(input)) { // no-op } }; const myNestedLambda = RunnableLambda.from(myFunc); const events = []; for await (const event of myNestedLambda.streamEvents("hello", { version: "v1", runId: outerRunId, callbacks: [dummyHandler], })) { events.push(event); } const chatModelStreamEvent = events.find((event) => { return event.event === "on_llm_stream"; }); expect(chatModelStreamEvent).toBeDefined(); });
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/document_loaders/langsmith.ts
import { KVMap } from "langsmith/schemas"; import { Client } from "langsmith"; import { Document, DocumentInterface } from "../documents/document.js"; import { AsyncCallerParams } from "../utils/async_caller.js"; import { BaseDocumentLoader } from "./base.js"; // TODO: Replace with import from `langsmith` once exposed. interface ClientConfig { apiUrl?: string; apiKey?: string; callerOptions?: AsyncCallerParams; timeout_ms?: number; webUrl?: string; anonymizer?: (values: KVMap) => KVMap; hideInputs?: boolean | ((inputs: KVMap) => KVMap); hideOutputs?: boolean | ((outputs: KVMap) => KVMap); autoBatchTracing?: boolean; pendingAutoBatchedRunLimit?: number; fetchOptions?: RequestInit; } export interface LangSmithLoaderFields { datasetId?: string; datasetName?: string; exampleIds?: Array<string>; asOf?: Date | string; splits?: string[]; inlineS3Urls?: boolean; offset?: number; limit?: number; metadata?: KVMap; filter?: string; contentKey?: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any formatContent?: (content: any) => string; client?: Client; clientConfig?: ClientConfig; } /** * Document loader integration with LangSmith. * * ## [Constructor args](https://api.js.langchain.com/interfaces/_langchain_core.document_loaders_langsmith.LangSmithLoaderFields.html) * * <details open> * <summary><strong>Load</strong></summary> * * ```typescript * import { LangSmithLoader } from '@langchain/core/document_loaders/langsmith'; * import { Client } from 'langsmith'; * * const langSmithClient = new Client({ * apiKey: process.env.LANGSMITH_API_KEY, * }) * * const loader = new LangSmithLoader({ * datasetId: "9a3b36f7-b308-40a5-9b46-6613853b6330", * limit: 1, * }); * * const docs = await loader.load(); * ``` * * ```txt * [ * { * pageContent: '{\n "input_key_str": "string",\n "input_key_bool": true\n}', * metadata: { * id: '8523d9e9-c123-4b23-9b46-21021nds289e', * created_at: '2024-08-19T17:09:14.806441+00:00', * modified_at: '2024-08-19T17:09:14.806441+00:00', * name: '#8517 @ brace-test-dataset', * dataset_id: '9a3b36f7-b308-40a5-9b46-6613853b6330', * source_run_id: null, * metadata: [Object], * inputs: [Object], * outputs: [Object] * } * } * ] * ``` * </details> */ export class LangSmithLoader extends BaseDocumentLoader { datasetId?: string; datasetName?: string; exampleIds?: Array<string>; asOf?: Date | string; splits?: string[]; inlineS3Urls?: boolean; offset?: number; limit?: number; metadata?: KVMap; filter?: string; contentKey: string[]; // eslint-disable-next-line @typescript-eslint/no-explicit-any formatContent: (content: any) => string; client: Client; constructor(fields: LangSmithLoaderFields) { super(); if (fields.client && fields.clientConfig) { throw new Error("client and clientConfig cannot both be provided."); } this.client = fields.client ?? new Client(fields?.clientConfig); this.contentKey = fields.contentKey ? fields.contentKey.split(".") : []; this.formatContent = fields.formatContent ?? _stringify; this.datasetId = fields.datasetId; this.datasetName = fields.datasetName; this.exampleIds = fields.exampleIds; this.asOf = fields.asOf; this.splits = fields.splits; this.inlineS3Urls = fields.inlineS3Urls; this.offset = fields.offset; this.limit = fields.limit; this.metadata = fields.metadata; this.filter = fields.filter; } async load(): Promise<Document[]> { const documents: DocumentInterface[] = []; for await (const example of this.client.listExamples({ datasetId: this.datasetId, datasetName: this.datasetName, exampleIds: this.exampleIds, asOf: this.asOf, splits: this.splits, inlineS3Urls: this.inlineS3Urls, offset: this.offset, limit: this.limit, metadata: this.metadata, filter: this.filter, })) { let content = example.inputs; for (const key of this.contentKey) { content = content[key]; } const contentStr = this.formatContent(content); const metadata: KVMap = example; ["created_at", "modified_at"].forEach((k) => { if (k in metadata) { if (typeof metadata[k] === "object") { // Dates are of type `object`, we want to convert them to strings. metadata[k] = metadata[k].toString(); } } }); documents.push({ pageContent: contentStr, metadata, }); } return documents; } } function _stringify(x: string | KVMap): string { if (typeof x === "string") { return x; } else { try { return JSON.stringify(x, null, 2); } catch (error) { return String(x); } } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/document_loaders/base.ts
import { Document } from "../documents/document.js"; import { BaseDocumentTransformer } from "../documents/transformers.js"; /** * Interface that defines the methods for loading and splitting documents. */ export interface DocumentLoader { load(): Promise<Document[]>; loadAndSplit(textSplitter?: BaseDocumentTransformer): Promise<Document[]>; } /** * Abstract class that provides a default implementation for the * loadAndSplit() method from the DocumentLoader interface. The load() * method is left abstract and needs to be implemented by subclasses. */ export abstract class BaseDocumentLoader implements DocumentLoader { /** * Loads the documents. * @returns A Promise that resolves with an array of Document instances. */ abstract load(): Promise<Document[]>; /** * @deprecated Use `this.load()` and `splitter.splitDocuments()` individually. * Loads the documents and splits them using a specified text splitter. * @param textSplitter The TextSplitter instance to use for splitting the loaded documents. Defaults to a RecursiveCharacterTextSplitter instance. * @returns A Promise that resolves with an array of Document instances, each split according to the provided TextSplitter. */ async loadAndSplit(splitter?: BaseDocumentTransformer): Promise<Document[]> { if (splitter === undefined) { throw new Error("You must pass a text splitter to use this method."); } const docs = await this.load(); return splitter.invoke(docs); } }
0
lc_public_repos/langchainjs/langchain-core/src/document_loaders
lc_public_repos/langchainjs/langchain-core/src/document_loaders/tests/langsmith.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { Client } from "langsmith"; import { LangSmithLoader } from "../langsmith.js"; const DATASET_NAME = "brace-test-dataset"; const DATASET_ID = "9a3b36f7-a297-40a5-944d-6613853b6330"; test("LangSmithLoader can load with client passed in", async () => { const lsClient = new Client(); const loader = new LangSmithLoader({ datasetId: DATASET_ID, client: lsClient, }); const docs = await loader.load(); expect(docs.length).toBeGreaterThanOrEqual(1); console.log(docs[0]); const parsedContent = JSON.parse(docs[0].pageContent); expect(parsedContent).toHaveProperty("input_key_str"); expect(parsedContent.input_key_str).toBe("string"); expect(parsedContent).toHaveProperty("input_key_bool"); expect(parsedContent.input_key_bool).toBe(true); expect(docs[0].metadata).toHaveProperty("created_at"); expect(typeof docs[0].metadata.created_at).toBe("string"); expect(docs[0].metadata).toHaveProperty("modified_at"); expect(typeof docs[0].metadata.modified_at).toBe("string"); }); test("LangSmithLoader can load with client options passed in", async () => { const lsApiKey = process.env.LANGCHAIN_API_KEY; // unassign the API key to confirm the client isn't overriding what we passed in. process.env.LANGCHAIN_API_KEY = ""; try { const lsConfigArgs = { apiKey: lsApiKey, }; const loader = new LangSmithLoader({ datasetId: DATASET_ID, clientConfig: lsConfigArgs, }); const docs = await loader.load(); expect(docs.length).toBeGreaterThanOrEqual(1); } finally { process.env.LANGCHAIN_API_KEY = lsApiKey; } }); test("LangSmithLoader can load with dataset name", async () => { const loader = new LangSmithLoader({ datasetName: DATASET_NAME }); const docs = await loader.load(); expect(docs.length).toBeGreaterThanOrEqual(1); }); test("Passing content key correctly loads that value", async () => { const loader = new LangSmithLoader({ datasetName: DATASET_NAME, contentKey: "input_key_str", }); const docs = await loader.load(); expect(docs[0].pageContent).toBe("string"); });
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/documents/transformers.ts
import { Runnable } from "../runnables/base.js"; import type { BaseCallbackConfig } from "../callbacks/manager.js"; import type { DocumentInterface } from "./document.js"; /** * Abstract base class for document transformation systems. * * A document transformation system takes an array of Documents and returns an * array of transformed Documents. These arrays do not necessarily have to have * the same length. * * One example of this is a text splitter that splits a large document into * many smaller documents. */ export abstract class BaseDocumentTransformer< RunInput extends DocumentInterface[] = DocumentInterface[], RunOutput extends DocumentInterface[] = DocumentInterface[] > extends Runnable<RunInput, RunOutput> { lc_namespace = ["langchain_core", "documents", "transformers"]; /** * Transform a list of documents. * @param documents A sequence of documents to be transformed. * @returns A list of transformed documents. */ abstract transformDocuments(documents: RunInput): Promise<RunOutput>; /** * Method to invoke the document transformation. This method calls the * transformDocuments method with the provided input. * @param input The input documents to be transformed. * @param _options Optional configuration object to customize the behavior of callbacks. * @returns A Promise that resolves to the transformed documents. */ invoke(input: RunInput, _options?: BaseCallbackConfig): Promise<RunOutput> { return this.transformDocuments(input); } } /** * Class for document transformers that return exactly one transformed document * for each input document. */ export abstract class MappingDocumentTransformer extends BaseDocumentTransformer { async transformDocuments( documents: DocumentInterface[] ): Promise<DocumentInterface[]> { const newDocuments = []; for (const document of documents) { const transformedDocument = await this._transformDocument(document); newDocuments.push(transformedDocument); } return newDocuments; } abstract _transformDocument( document: DocumentInterface ): Promise<DocumentInterface>; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/documents/document.ts
export interface DocumentInput< // eslint-disable-next-line @typescript-eslint/no-explicit-any Metadata extends Record<string, any> = Record<string, any> > { pageContent: string; metadata?: Metadata; /** * An optional identifier for the document. * * Ideally this should be unique across the document collection and formatted * as a UUID, but this will not be enforced. */ id?: string; } export interface DocumentInterface< // eslint-disable-next-line @typescript-eslint/no-explicit-any Metadata extends Record<string, any> = Record<string, any> > { pageContent: string; metadata: Metadata; /** * An optional identifier for the document. * * Ideally this should be unique across the document collection and formatted * as a UUID, but this will not be enforced. */ id?: string; } /** * Interface for interacting with a document. */ export class Document< // eslint-disable-next-line @typescript-eslint/no-explicit-any Metadata extends Record<string, any> = Record<string, any> > implements DocumentInput, DocumentInterface { pageContent: string; metadata: Metadata; // The ID field is optional at the moment. // It will likely become required in a future major release after // it has been adopted by enough vectorstore implementations. /** * An optional identifier for the document. * * Ideally this should be unique across the document collection and formatted * as a UUID, but this will not be enforced. */ id?: string; constructor(fields: DocumentInput<Metadata>) { this.pageContent = fields.pageContent !== undefined ? fields.pageContent.toString() : ""; this.metadata = fields.metadata ?? ({} as Metadata); this.id = fields.id; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/documents/index.ts
export * from "./document.js"; export * from "./transformers.js";
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/indexing/record_manager.ts
import { Serializable } from "../load/serializable.js"; // Arbitrary value, used for generating namespaced UUIDs. export const UUIDV5_NAMESPACE = "10f90ea3-90a4-4962-bf75-83a0f3c1c62a"; export type UpdateOptions = { groupIds?: (string | null)[]; timeAtLeast?: number; }; export type ListKeyOptions = { before?: number; after?: number; groupIds?: (string | null)[]; limit?: number; }; export interface RecordManagerInterface { /** * Creates schema in the record manager. * @returns Promise */ createSchema(): Promise<void>; /** * Returns current time from the record manager. * @returns Current time */ getTime(): Promise<number>; /** * Updates keys in the record manager. * @param keys List of keys to update * @param groupIds List of groupIds to update * @param timeAtLeast Update only if current time is at least this value * @returns Promise * @throws Error if timeAtLeast is provided and current time is less than timeAtLeast * @throws Error if number of keys does not match number of groupIds */ update(keys: string[], updateOptions: UpdateOptions): Promise<void>; /** * Checks if keys exist in the record manager. * @param keys List of keys to check * @returns List of booleans indicating if key exists in same order as provided keys */ exists(keys: string[]): Promise<boolean[]>; /** * Lists keys from the record manager. * @param before List keys before this timestamp * @param after List keys after this timestamp * @param groupIds List keys with these groupIds * @param limit Limit the number of keys returned * @returns List of keys * */ listKeys(options: ListKeyOptions): Promise<string[]>; /** * Deletes keys from the record manager. * @param keys List of keys to delete */ deleteKeys(keys: string[]): Promise<void>; } export abstract class RecordManager extends Serializable implements RecordManagerInterface { lc_namespace = ["langchain", "recordmanagers"]; abstract createSchema(): Promise<void>; abstract getTime(): Promise<number>; abstract update(keys: string[], updateOptions?: UpdateOptions): Promise<void>; abstract exists(keys: string[]): Promise<boolean[]>; abstract listKeys(options?: ListKeyOptions): Promise<string[]>; abstract deleteKeys(keys: string[]): Promise<void>; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/indexing/index.ts
export * from "./record_manager.js"; export * from "./base.js";
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/indexing/base.ts
import { v5 as uuidv5 } from "uuid"; import { VectorStore } from "../vectorstores.js"; import { RecordManagerInterface, UUIDV5_NAMESPACE } from "./record_manager.js"; import { insecureHash } from "../utils/hash.js"; import { DocumentInterface, Document } from "../documents/document.js"; import { BaseDocumentLoader } from "../document_loaders/base.js"; type Metadata = Record<string, unknown>; type IndexingResult = { numAdded: number; numDeleted: number; numUpdated: number; numSkipped: number; }; type StringOrDocFunc = string | ((doc: DocumentInterface) => string); export interface HashedDocumentInterface extends DocumentInterface { uid: string; hash_?: string; contentHash?: string; metadataHash?: string; pageContent: string; metadata: Metadata; calculateHashes(): void; toDocument(): DocumentInterface; } interface HashedDocumentArgs { pageContent: string; metadata: Metadata; uid: string; } /** * HashedDocument is a Document with hashes calculated. * Hashes are calculated based on page content and metadata. * It is used for indexing. */ export class _HashedDocument implements HashedDocumentInterface { uid: string; hash_?: string; contentHash?: string; metadataHash?: string; pageContent: string; metadata: Metadata; constructor(fields: HashedDocumentArgs) { this.uid = fields.uid; this.pageContent = fields.pageContent; this.metadata = fields.metadata; } calculateHashes(): void { const forbiddenKeys = ["hash_", "content_hash", "metadata_hash"]; for (const key of forbiddenKeys) { if (key in this.metadata) { throw new Error( `Metadata cannot contain key ${key} as it is reserved for internal use. Restricted keys: [${forbiddenKeys.join( ", " )}]` ); } } const contentHash = this._hashStringToUUID(this.pageContent); try { const metadataHash = this._hashNestedDictToUUID(this.metadata); this.contentHash = contentHash; this.metadataHash = metadataHash; } catch (e) { throw new Error( `Failed to hash metadata: ${e}. Please use a dict that can be serialized using json.` ); } this.hash_ = this._hashStringToUUID(this.contentHash + this.metadataHash); if (!this.uid) { this.uid = this.hash_; } } toDocument(): DocumentInterface { return new Document({ pageContent: this.pageContent, metadata: this.metadata, }); } static fromDocument( document: DocumentInterface, uid?: string ): _HashedDocument { const doc = new this({ pageContent: document.pageContent, metadata: document.metadata, uid: uid || (document as DocumentInterface & { uid: string }).uid, }); doc.calculateHashes(); return doc; } private _hashStringToUUID(inputString: string): string { const hash_value = insecureHash(inputString); return uuidv5(hash_value, UUIDV5_NAMESPACE); } private _hashNestedDictToUUID(data: Record<string, unknown>): string { const serialized_data = JSON.stringify(data, Object.keys(data).sort()); const hash_value = insecureHash(serialized_data); return uuidv5(hash_value, UUIDV5_NAMESPACE); } } export type CleanupMode = "full" | "incremental"; export type IndexOptions = { /** * The number of documents to index in one batch. */ batchSize?: number; /** * The cleanup mode to use. Can be "full", "incremental" or undefined. * - **Incremental**: Cleans up all documents that haven't been updated AND * that are associated with source ids that were seen * during indexing. * Clean up is done continuously during indexing helping * to minimize the probability of users seeing duplicated * content. * - **Full**: Delete all documents that haven to been returned by the loader. * Clean up runs after all documents have been indexed. * This means that users may see duplicated content during indexing. * - **undefined**: Do not delete any documents. */ cleanup?: CleanupMode; /** * Optional key that helps identify the original source of the document. * Must either be a string representing the key of the source in the metadata * or a function that takes a document and returns a string representing the source. * **Required when cleanup is incremental**. */ sourceIdKey?: StringOrDocFunc; /** * Batch size to use when cleaning up documents. */ cleanupBatchSize?: number; /** * Force update documents even if they are present in the * record manager. Useful if you are re-indexing with updated embeddings. */ forceUpdate?: boolean; }; export function _batch<T>(size: number, iterable: T[]): T[][] { const batches: T[][] = []; let currentBatch: T[] = []; iterable.forEach((item) => { currentBatch.push(item); if (currentBatch.length >= size) { batches.push(currentBatch); currentBatch = []; } }); if (currentBatch.length > 0) { batches.push(currentBatch); } return batches; } export function _deduplicateInOrder( hashedDocuments: HashedDocumentInterface[] ): HashedDocumentInterface[] { const seen = new Set<string>(); const deduplicated: HashedDocumentInterface[] = []; for (const hashedDoc of hashedDocuments) { if (!hashedDoc.hash_) { throw new Error("Hashed document does not have a hash"); } if (!seen.has(hashedDoc.hash_)) { seen.add(hashedDoc.hash_); deduplicated.push(hashedDoc); } } return deduplicated; } export function _getSourceIdAssigner( sourceIdKey: StringOrDocFunc | null ): (doc: DocumentInterface) => string | null { if (sourceIdKey === null) { return (_doc: DocumentInterface) => null; } else if (typeof sourceIdKey === "string") { return (doc: DocumentInterface) => doc.metadata[sourceIdKey]; } else if (typeof sourceIdKey === "function") { return sourceIdKey; } else { throw new Error( `sourceIdKey should be null, a string or a function, got ${typeof sourceIdKey}` ); } } // eslint-disable-next-line @typescript-eslint/no-explicit-any export const _isBaseDocumentLoader = (arg: any): arg is BaseDocumentLoader => { if ( "load" in arg && typeof arg.load === "function" && "loadAndSplit" in arg && typeof arg.loadAndSplit === "function" ) { return true; } return false; }; interface IndexArgs { docsSource: BaseDocumentLoader | DocumentInterface[]; recordManager: RecordManagerInterface; vectorStore: VectorStore; options?: IndexOptions; } /** * Index data from the doc source into the vector store. * * Indexing functionality uses a manager to keep track of which documents * are in the vector store. * * This allows us to keep track of which documents were updated, and which * documents were deleted, which documents should be skipped. * * For the time being, documents are indexed using their hashes, and users * are not able to specify the uid of the document. * * @param {IndexArgs} args * @param {BaseDocumentLoader | DocumentInterface[]} args.docsSource The source of documents to index. Can be a DocumentLoader or a list of Documents. * @param {RecordManagerInterface} args.recordManager The record manager to use for keeping track of indexed documents. * @param {VectorStore} args.vectorStore The vector store to use for storing the documents. * @param {IndexOptions | undefined} args.options Options for indexing. * @returns {Promise<IndexingResult>} */ export async function index(args: IndexArgs): Promise<IndexingResult> { const { docsSource, recordManager, vectorStore, options } = args; const { batchSize = 100, cleanup, sourceIdKey, cleanupBatchSize = 1000, forceUpdate = false, } = options ?? {}; if (cleanup === "incremental" && !sourceIdKey) { throw new Error( "sourceIdKey is required when cleanup mode is incremental. Please provide through 'options.sourceIdKey'." ); } const docs = _isBaseDocumentLoader(docsSource) ? await docsSource.load() : docsSource; const sourceIdAssigner = _getSourceIdAssigner(sourceIdKey ?? null); const indexStartDt = await recordManager.getTime(); let numAdded = 0; let numDeleted = 0; let numUpdated = 0; let numSkipped = 0; const batches = _batch<DocumentInterface>(batchSize ?? 100, docs); for (const batch of batches) { const hashedDocs = _deduplicateInOrder( batch.map((doc) => _HashedDocument.fromDocument(doc)) ); const sourceIds = hashedDocs.map((doc) => sourceIdAssigner(doc)); if (cleanup === "incremental") { hashedDocs.forEach((_hashedDoc, index) => { const source = sourceIds[index]; if (source === null) { throw new Error( "sourceIdKey must be provided when cleanup is incremental" ); } }); } const batchExists = await recordManager.exists( hashedDocs.map((doc) => doc.uid) ); const uids: string[] = []; const docsToIndex: DocumentInterface[] = []; const docsToUpdate: string[] = []; const seenDocs = new Set<string>(); hashedDocs.forEach((hashedDoc, i) => { const docExists = batchExists[i]; if (docExists) { if (forceUpdate) { seenDocs.add(hashedDoc.uid); } else { docsToUpdate.push(hashedDoc.uid); return; } } uids.push(hashedDoc.uid); docsToIndex.push(hashedDoc.toDocument()); }); if (docsToUpdate.length > 0) { await recordManager.update(docsToUpdate, { timeAtLeast: indexStartDt }); numSkipped += docsToUpdate.length; } if (docsToIndex.length > 0) { await vectorStore.addDocuments(docsToIndex, { ids: uids }); numAdded += docsToIndex.length - seenDocs.size; numUpdated += seenDocs.size; } await recordManager.update( hashedDocs.map((doc) => doc.uid), { timeAtLeast: indexStartDt, groupIds: sourceIds } ); if (cleanup === "incremental") { sourceIds.forEach((sourceId) => { if (!sourceId) throw new Error("Source id cannot be null"); }); const uidsToDelete = await recordManager.listKeys({ before: indexStartDt, groupIds: sourceIds, }); await vectorStore.delete({ ids: uidsToDelete }); await recordManager.deleteKeys(uidsToDelete); numDeleted += uidsToDelete.length; } } if (cleanup === "full") { let uidsToDelete = await recordManager.listKeys({ before: indexStartDt, limit: cleanupBatchSize, }); while (uidsToDelete.length > 0) { await vectorStore.delete({ ids: uidsToDelete }); await recordManager.deleteKeys(uidsToDelete); numDeleted += uidsToDelete.length; uidsToDelete = await recordManager.listKeys({ before: indexStartDt, limit: cleanupBatchSize, }); } } return { numAdded, numDeleted, numUpdated, numSkipped, }; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/callbacks/promises.ts
import { awaitAllCallbacks, consumeCallback } from "../singletons/callbacks.js"; export { awaitAllCallbacks, consumeCallback };
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/callbacks/manager.ts
import { v4 as uuidv4 } from "uuid"; import { AgentAction, AgentFinish } from "../agents.js"; import type { ChainValues } from "../utils/types/index.js"; import { LLMResult } from "../outputs.js"; import { BaseCallbackHandler, CallbackHandlerMethods, HandleLLMNewTokenCallbackFields, NewTokenIndices, } from "./base.js"; import { ConsoleCallbackHandler } from "../tracers/console.js"; import { type BaseMessage } from "../messages/base.js"; import { getBufferString } from "../messages/utils.js"; import { getEnvironmentVariable } from "../utils/env.js"; import { LangChainTracer, LangChainTracerFields, } from "../tracers/tracer_langchain.js"; import { consumeCallback } from "./promises.js"; import { Serialized } from "../load/serializable.js"; import type { DocumentInterface } from "../documents/document.js"; import { isTracingEnabled } from "../utils/callbacks.js"; import { isBaseTracer } from "../tracers/base.js"; type BaseCallbackManagerMethods = { [K in keyof CallbackHandlerMethods]?: ( ...args: Parameters<Required<CallbackHandlerMethods>[K]> ) => Promise<unknown>; }; export interface CallbackManagerOptions { verbose?: boolean; tracing?: boolean; } export type Callbacks = | CallbackManager | (BaseCallbackHandler | CallbackHandlerMethods)[]; export interface BaseCallbackConfig { /** * Name for the tracer run for this call. Defaults to the name of the class. */ runName?: string; /** * Tags for this call and any sub-calls (eg. a Chain calling an LLM). * You can use these to filter calls. */ tags?: string[]; /** * Metadata for this call and any sub-calls (eg. a Chain calling an LLM). * Keys should be strings, values should be JSON-serializable. */ metadata?: Record<string, unknown>; /** * Callbacks for this call and any sub-calls (eg. a Chain calling an LLM). * Tags are passed to all callbacks, metadata is passed to handle*Start callbacks. */ callbacks?: Callbacks; /** * Unique identifier for the tracer run for this call. If not provided, a new UUID * will be generated. */ runId?: string; } export function parseCallbackConfigArg( arg: Callbacks | BaseCallbackConfig | undefined ): BaseCallbackConfig { if (!arg) { return {}; } else if (Array.isArray(arg) || "name" in arg) { return { callbacks: arg }; } else { return arg; } } /** * Manage callbacks from different components of LangChain. */ export abstract class BaseCallbackManager { abstract addHandler(handler: BaseCallbackHandler): void; abstract removeHandler(handler: BaseCallbackHandler): void; abstract setHandlers(handlers: BaseCallbackHandler[]): void; setHandler(handler: BaseCallbackHandler): void { return this.setHandlers([handler]); } } /** * Base class for run manager in LangChain. */ export class BaseRunManager { constructor( public readonly runId: string, public readonly handlers: BaseCallbackHandler[], protected readonly inheritableHandlers: BaseCallbackHandler[], protected readonly tags: string[], protected readonly inheritableTags: string[], protected readonly metadata: Record<string, unknown>, protected readonly inheritableMetadata: Record<string, unknown>, protected readonly _parentRunId?: string ) {} get parentRunId() { return this._parentRunId; } async handleText(text: string): Promise<void> { await Promise.all( this.handlers.map((handler) => consumeCallback(async () => { try { await handler.handleText?.( text, this.runId, this._parentRunId, this.tags ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleText: ${err}` ); if (handler.raiseError) { throw err; } } }, handler.awaitHandlers) ) ); } async handleCustomEvent( eventName: string, // eslint-disable-next-line @typescript-eslint/no-explicit-any data: any, _runId?: string, _tags?: string[], // eslint-disable-next-line @typescript-eslint/no-explicit-any _metadata?: Record<string, any> ): Promise<void> { await Promise.all( this.handlers.map((handler) => consumeCallback(async () => { try { await handler.handleCustomEvent?.( eventName, data, this.runId, this.tags, this.metadata ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleCustomEvent: ${err}` ); if (handler.raiseError) { throw err; } } }, handler.awaitHandlers) ) ); } } /** * Manages callbacks for retriever runs. */ export class CallbackManagerForRetrieverRun extends BaseRunManager implements BaseCallbackManagerMethods { getChild(tag?: string): CallbackManager { // eslint-disable-next-line @typescript-eslint/no-use-before-define const manager = new CallbackManager(this.runId); manager.setHandlers(this.inheritableHandlers); manager.addTags(this.inheritableTags); manager.addMetadata(this.inheritableMetadata); if (tag) { manager.addTags([tag], false); } return manager; } async handleRetrieverEnd(documents: DocumentInterface[]): Promise<void> { await Promise.all( this.handlers.map((handler) => consumeCallback(async () => { if (!handler.ignoreRetriever) { try { await handler.handleRetrieverEnd?.( documents, this.runId, this._parentRunId, this.tags ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleRetriever` ); if (handler.raiseError) { throw err; } } } }, handler.awaitHandlers) ) ); } async handleRetrieverError(err: Error | unknown): Promise<void> { await Promise.all( this.handlers.map((handler) => consumeCallback(async () => { if (!handler.ignoreRetriever) { try { await handler.handleRetrieverError?.( err, this.runId, this._parentRunId, this.tags ); } catch (error) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleRetrieverError: ${error}` ); if (handler.raiseError) { throw err; } } } }, handler.awaitHandlers) ) ); } } export class CallbackManagerForLLMRun extends BaseRunManager implements BaseCallbackManagerMethods { async handleLLMNewToken( token: string, idx?: NewTokenIndices, _runId?: string, _parentRunId?: string, _tags?: string[], fields?: HandleLLMNewTokenCallbackFields ): Promise<void> { await Promise.all( this.handlers.map((handler) => consumeCallback(async () => { if (!handler.ignoreLLM) { try { await handler.handleLLMNewToken?.( token, idx ?? { prompt: 0, completion: 0 }, this.runId, this._parentRunId, this.tags, fields ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleLLMNewToken: ${err}` ); if (handler.raiseError) { throw err; } } } }, handler.awaitHandlers) ) ); } async handleLLMError(err: Error | unknown): Promise<void> { await Promise.all( this.handlers.map((handler) => consumeCallback(async () => { if (!handler.ignoreLLM) { try { await handler.handleLLMError?.( err, this.runId, this._parentRunId, this.tags ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleLLMError: ${err}` ); if (handler.raiseError) { throw err; } } } }, handler.awaitHandlers) ) ); } async handleLLMEnd(output: LLMResult): Promise<void> { await Promise.all( this.handlers.map((handler) => consumeCallback(async () => { if (!handler.ignoreLLM) { try { await handler.handleLLMEnd?.( output, this.runId, this._parentRunId, this.tags ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleLLMEnd: ${err}` ); if (handler.raiseError) { throw err; } } } }, handler.awaitHandlers) ) ); } } export class CallbackManagerForChainRun extends BaseRunManager implements BaseCallbackManagerMethods { getChild(tag?: string): CallbackManager { // eslint-disable-next-line @typescript-eslint/no-use-before-define const manager = new CallbackManager(this.runId); manager.setHandlers(this.inheritableHandlers); manager.addTags(this.inheritableTags); manager.addMetadata(this.inheritableMetadata); if (tag) { manager.addTags([tag], false); } return manager; } async handleChainError( err: Error | unknown, _runId?: string, _parentRunId?: string, _tags?: string[], kwargs?: { inputs?: Record<string, unknown> } ): Promise<void> { await Promise.all( this.handlers.map((handler) => consumeCallback(async () => { if (!handler.ignoreChain) { try { await handler.handleChainError?.( err, this.runId, this._parentRunId, this.tags, kwargs ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleChainError: ${err}` ); if (handler.raiseError) { throw err; } } } }, handler.awaitHandlers) ) ); } async handleChainEnd( output: ChainValues, _runId?: string, _parentRunId?: string, _tags?: string[], kwargs?: { inputs?: Record<string, unknown> } ): Promise<void> { await Promise.all( this.handlers.map((handler) => consumeCallback(async () => { if (!handler.ignoreChain) { try { await handler.handleChainEnd?.( output, this.runId, this._parentRunId, this.tags, kwargs ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleChainEnd: ${err}` ); if (handler.raiseError) { throw err; } } } }, handler.awaitHandlers) ) ); } async handleAgentAction(action: AgentAction): Promise<void> { await Promise.all( this.handlers.map((handler) => consumeCallback(async () => { if (!handler.ignoreAgent) { try { await handler.handleAgentAction?.( action, this.runId, this._parentRunId, this.tags ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleAgentAction: ${err}` ); if (handler.raiseError) { throw err; } } } }, handler.awaitHandlers) ) ); } async handleAgentEnd(action: AgentFinish): Promise<void> { await Promise.all( this.handlers.map((handler) => consumeCallback(async () => { if (!handler.ignoreAgent) { try { await handler.handleAgentEnd?.( action, this.runId, this._parentRunId, this.tags ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleAgentEnd: ${err}` ); if (handler.raiseError) { throw err; } } } }, handler.awaitHandlers) ) ); } } export class CallbackManagerForToolRun extends BaseRunManager implements BaseCallbackManagerMethods { getChild(tag?: string): CallbackManager { // eslint-disable-next-line @typescript-eslint/no-use-before-define const manager = new CallbackManager(this.runId); manager.setHandlers(this.inheritableHandlers); manager.addTags(this.inheritableTags); manager.addMetadata(this.inheritableMetadata); if (tag) { manager.addTags([tag], false); } return manager; } async handleToolError(err: Error | unknown): Promise<void> { await Promise.all( this.handlers.map((handler) => consumeCallback(async () => { if (!handler.ignoreAgent) { try { await handler.handleToolError?.( err, this.runId, this._parentRunId, this.tags ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleToolError: ${err}` ); if (handler.raiseError) { throw err; } } } }, handler.awaitHandlers) ) ); } // eslint-disable-next-line @typescript-eslint/no-explicit-any async handleToolEnd(output: any): Promise<void> { await Promise.all( this.handlers.map((handler) => consumeCallback(async () => { if (!handler.ignoreAgent) { try { await handler.handleToolEnd?.( output, this.runId, this._parentRunId, this.tags ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleToolEnd: ${err}` ); if (handler.raiseError) { throw err; } } } }, handler.awaitHandlers) ) ); } } /** * @example * ```typescript * const prompt = PromptTemplate.fromTemplate("What is the answer to {question}?"); * * // Example of using LLMChain with OpenAI and a simple prompt * const chain = new LLMChain({ * llm: new ChatOpenAI({ temperature: 0.9 }), * prompt, * }); * * // Running the chain with a single question * const result = await chain.call({ * question: "What is the airspeed velocity of an unladen swallow?", * }); * console.log("The answer is:", result); * ``` */ export class CallbackManager extends BaseCallbackManager implements BaseCallbackManagerMethods { handlers: BaseCallbackHandler[] = []; inheritableHandlers: BaseCallbackHandler[] = []; tags: string[] = []; inheritableTags: string[] = []; metadata: Record<string, unknown> = {}; inheritableMetadata: Record<string, unknown> = {}; name = "callback_manager"; public _parentRunId?: string; constructor( parentRunId?: string, options?: { handlers?: BaseCallbackHandler[]; inheritableHandlers?: BaseCallbackHandler[]; tags?: string[]; inheritableTags?: string[]; metadata?: Record<string, unknown>; inheritableMetadata?: Record<string, unknown>; } ) { super(); this.handlers = options?.handlers ?? this.handlers; this.inheritableHandlers = options?.inheritableHandlers ?? this.inheritableHandlers; this.tags = options?.tags ?? this.tags; this.inheritableTags = options?.inheritableTags ?? this.inheritableTags; this.metadata = options?.metadata ?? this.metadata; this.inheritableMetadata = options?.inheritableMetadata ?? this.inheritableMetadata; this._parentRunId = parentRunId; } /** * Gets the parent run ID, if any. * * @returns The parent run ID. */ getParentRunId() { return this._parentRunId; } async handleLLMStart( llm: Serialized, prompts: string[], runId: string | undefined = undefined, _parentRunId: string | undefined = undefined, extraParams: Record<string, unknown> | undefined = undefined, _tags: string[] | undefined = undefined, _metadata: Record<string, unknown> | undefined = undefined, runName: string | undefined = undefined ): Promise<CallbackManagerForLLMRun[]> { return Promise.all( prompts.map(async (prompt, idx) => { // Can't have duplicate runs with the same run ID (if provided) const runId_ = idx === 0 && runId ? runId : uuidv4(); await Promise.all( this.handlers.map((handler) => { if (handler.ignoreLLM) { return; } if (isBaseTracer(handler)) { // Create and add run to the run map. // We do this synchronously to avoid race conditions // when callbacks are backgrounded. handler._createRunForLLMStart( llm, [prompt], runId_, this._parentRunId, extraParams, this.tags, this.metadata, runName ); } return consumeCallback(async () => { try { await handler.handleLLMStart?.( llm, [prompt], runId_, this._parentRunId, extraParams, this.tags, this.metadata, runName ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleLLMStart: ${err}` ); if (handler.raiseError) { throw err; } } }, handler.awaitHandlers); }) ); return new CallbackManagerForLLMRun( runId_, this.handlers, this.inheritableHandlers, this.tags, this.inheritableTags, this.metadata, this.inheritableMetadata, this._parentRunId ); }) ); } async handleChatModelStart( llm: Serialized, messages: BaseMessage[][], runId: string | undefined = undefined, _parentRunId: string | undefined = undefined, extraParams: Record<string, unknown> | undefined = undefined, _tags: string[] | undefined = undefined, _metadata: Record<string, unknown> | undefined = undefined, runName: string | undefined = undefined ): Promise<CallbackManagerForLLMRun[]> { return Promise.all( messages.map(async (messageGroup, idx) => { // Can't have duplicate runs with the same run ID (if provided) const runId_ = idx === 0 && runId ? runId : uuidv4(); await Promise.all( this.handlers.map((handler) => { if (handler.ignoreLLM) { return; } if (isBaseTracer(handler)) { // Create and add run to the run map. // We do this synchronously to avoid race conditions // when callbacks are backgrounded. handler._createRunForChatModelStart( llm, [messageGroup], runId_, this._parentRunId, extraParams, this.tags, this.metadata, runName ); } return consumeCallback(async () => { try { if (handler.handleChatModelStart) { await handler.handleChatModelStart?.( llm, [messageGroup], runId_, this._parentRunId, extraParams, this.tags, this.metadata, runName ); } else if (handler.handleLLMStart) { const messageString = getBufferString(messageGroup); await handler.handleLLMStart?.( llm, [messageString], runId_, this._parentRunId, extraParams, this.tags, this.metadata, runName ); } } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleLLMStart: ${err}` ); if (handler.raiseError) { throw err; } } }, handler.awaitHandlers); }) ); return new CallbackManagerForLLMRun( runId_, this.handlers, this.inheritableHandlers, this.tags, this.inheritableTags, this.metadata, this.inheritableMetadata, this._parentRunId ); }) ); } async handleChainStart( chain: Serialized, inputs: ChainValues, runId = uuidv4(), runType: string | undefined = undefined, _tags: string[] | undefined = undefined, _metadata: Record<string, unknown> | undefined = undefined, runName: string | undefined = undefined ): Promise<CallbackManagerForChainRun> { await Promise.all( this.handlers.map((handler) => { if (handler.ignoreChain) { return; } if (isBaseTracer(handler)) { // Create and add run to the run map. // We do this synchronously to avoid race conditions // when callbacks are backgrounded. handler._createRunForChainStart( chain, inputs, runId, this._parentRunId, this.tags, this.metadata, runType, runName ); } return consumeCallback(async () => { try { await handler.handleChainStart?.( chain, inputs, runId, this._parentRunId, this.tags, this.metadata, runType, runName ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleChainStart: ${err}` ); if (handler.raiseError) { throw err; } } }, handler.awaitHandlers); }) ); return new CallbackManagerForChainRun( runId, this.handlers, this.inheritableHandlers, this.tags, this.inheritableTags, this.metadata, this.inheritableMetadata, this._parentRunId ); } async handleToolStart( tool: Serialized, input: string, runId = uuidv4(), _parentRunId: string | undefined = undefined, _tags: string[] | undefined = undefined, _metadata: Record<string, unknown> | undefined = undefined, runName: string | undefined = undefined ): Promise<CallbackManagerForToolRun> { await Promise.all( this.handlers.map((handler) => { if (handler.ignoreAgent) { return; } if (isBaseTracer(handler)) { // Create and add run to the run map. // We do this synchronously to avoid race conditions // when callbacks are backgrounded. handler._createRunForToolStart( tool, input, runId, this._parentRunId, this.tags, this.metadata, runName ); } return consumeCallback(async () => { try { await handler.handleToolStart?.( tool, input, runId, this._parentRunId, this.tags, this.metadata, runName ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleToolStart: ${err}` ); if (handler.raiseError) { throw err; } } }, handler.awaitHandlers); }) ); return new CallbackManagerForToolRun( runId, this.handlers, this.inheritableHandlers, this.tags, this.inheritableTags, this.metadata, this.inheritableMetadata, this._parentRunId ); } async handleRetrieverStart( retriever: Serialized, query: string, runId: string = uuidv4(), _parentRunId: string | undefined = undefined, _tags: string[] | undefined = undefined, _metadata: Record<string, unknown> | undefined = undefined, runName: string | undefined = undefined ): Promise<CallbackManagerForRetrieverRun> { await Promise.all( this.handlers.map((handler) => { if (handler.ignoreRetriever) { return; } if (isBaseTracer(handler)) { // Create and add run to the run map. // We do this synchronously to avoid race conditions // when callbacks are backgrounded. handler._createRunForRetrieverStart( retriever, query, runId, this._parentRunId, this.tags, this.metadata, runName ); } return consumeCallback(async () => { try { await handler.handleRetrieverStart?.( retriever, query, runId, this._parentRunId, this.tags, this.metadata, runName ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleRetrieverStart: ${err}` ); if (handler.raiseError) { throw err; } } }, handler.awaitHandlers); }) ); return new CallbackManagerForRetrieverRun( runId, this.handlers, this.inheritableHandlers, this.tags, this.inheritableTags, this.metadata, this.inheritableMetadata, this._parentRunId ); } async handleCustomEvent?( eventName: string, // eslint-disable-next-line @typescript-eslint/no-explicit-any data: any, runId: string, _tags?: string[], // eslint-disable-next-line @typescript-eslint/no-explicit-any _metadata?: Record<string, any> ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> { await Promise.all( this.handlers.map((handler) => consumeCallback(async () => { if (!handler.ignoreCustomEvent) { try { await handler.handleCustomEvent?.( eventName, data, runId, this.tags, this.metadata ); } catch (err) { const logFunction = handler.raiseError ? console.error : console.warn; logFunction( `Error in handler ${handler.constructor.name}, handleCustomEvent: ${err}` ); if (handler.raiseError) { throw err; } } } }, handler.awaitHandlers) ) ); } addHandler(handler: BaseCallbackHandler, inherit = true): void { this.handlers.push(handler); if (inherit) { this.inheritableHandlers.push(handler); } } removeHandler(handler: BaseCallbackHandler): void { this.handlers = this.handlers.filter((_handler) => _handler !== handler); this.inheritableHandlers = this.inheritableHandlers.filter( (_handler) => _handler !== handler ); } setHandlers(handlers: BaseCallbackHandler[], inherit = true): void { this.handlers = []; this.inheritableHandlers = []; for (const handler of handlers) { this.addHandler(handler, inherit); } } addTags(tags: string[], inherit = true): void { this.removeTags(tags); // Remove duplicates this.tags.push(...tags); if (inherit) { this.inheritableTags.push(...tags); } } removeTags(tags: string[]): void { this.tags = this.tags.filter((tag) => !tags.includes(tag)); this.inheritableTags = this.inheritableTags.filter( (tag) => !tags.includes(tag) ); } addMetadata(metadata: Record<string, unknown>, inherit = true): void { this.metadata = { ...this.metadata, ...metadata }; if (inherit) { this.inheritableMetadata = { ...this.inheritableMetadata, ...metadata }; } } removeMetadata(metadata: Record<string, unknown>): void { for (const key of Object.keys(metadata)) { delete this.metadata[key]; delete this.inheritableMetadata[key]; } } copy( additionalHandlers: BaseCallbackHandler[] = [], inherit = true ): CallbackManager { const manager = new CallbackManager(this._parentRunId); for (const handler of this.handlers) { const inheritable = this.inheritableHandlers.includes(handler); manager.addHandler(handler, inheritable); } for (const tag of this.tags) { const inheritable = this.inheritableTags.includes(tag); manager.addTags([tag], inheritable); } for (const key of Object.keys(this.metadata)) { const inheritable = Object.keys(this.inheritableMetadata).includes(key); manager.addMetadata({ [key]: this.metadata[key] }, inheritable); } for (const handler of additionalHandlers) { if ( // Prevent multiple copies of console_callback_handler manager.handlers .filter((h) => h.name === "console_callback_handler") .some((h) => h.name === handler.name) ) { continue; } manager.addHandler(handler, inherit); } return manager; } static fromHandlers(handlers: CallbackHandlerMethods) { class Handler extends BaseCallbackHandler { name = uuidv4(); constructor() { super(); Object.assign(this, handlers); } } const manager = new this(); manager.addHandler(new Handler()); return manager; } static configure( inheritableHandlers?: Callbacks, localHandlers?: Callbacks, inheritableTags?: string[], localTags?: string[], inheritableMetadata?: Record<string, unknown>, localMetadata?: Record<string, unknown>, options?: CallbackManagerOptions ): CallbackManager | undefined { return this._configureSync( inheritableHandlers, localHandlers, inheritableTags, localTags, inheritableMetadata, localMetadata, options ); } // TODO: Deprecate async method in favor of this one. static _configureSync( inheritableHandlers?: Callbacks, localHandlers?: Callbacks, inheritableTags?: string[], localTags?: string[], inheritableMetadata?: Record<string, unknown>, localMetadata?: Record<string, unknown>, options?: CallbackManagerOptions ) { let callbackManager: CallbackManager | undefined; if (inheritableHandlers || localHandlers) { if (Array.isArray(inheritableHandlers) || !inheritableHandlers) { callbackManager = new CallbackManager(); callbackManager.setHandlers( inheritableHandlers?.map(ensureHandler) ?? [], true ); } else { callbackManager = inheritableHandlers; } callbackManager = callbackManager.copy( Array.isArray(localHandlers) ? localHandlers.map(ensureHandler) : localHandlers?.handlers, false ); } const verboseEnabled = getEnvironmentVariable("LANGCHAIN_VERBOSE") === "true" || options?.verbose; const tracingV2Enabled = LangChainTracer.getTraceableRunTree()?.tracingEnabled || isTracingEnabled(); const tracingEnabled = tracingV2Enabled || (getEnvironmentVariable("LANGCHAIN_TRACING") ?? false); if (verboseEnabled || tracingEnabled) { if (!callbackManager) { callbackManager = new CallbackManager(); } if ( verboseEnabled && !callbackManager.handlers.some( (handler) => handler.name === ConsoleCallbackHandler.prototype.name ) ) { const consoleHandler = new ConsoleCallbackHandler(); callbackManager.addHandler(consoleHandler, true); } if ( tracingEnabled && !callbackManager.handlers.some( (handler) => handler.name === "langchain_tracer" ) ) { if (tracingV2Enabled) { const tracerV2 = new LangChainTracer(); callbackManager.addHandler(tracerV2, true); // handoff between langchain and langsmith/traceable // override the parent run ID callbackManager._parentRunId = LangChainTracer.getTraceableRunTree()?.id ?? callbackManager._parentRunId; } } } if (inheritableTags || localTags) { if (callbackManager) { callbackManager.addTags(inheritableTags ?? []); callbackManager.addTags(localTags ?? [], false); } } if (inheritableMetadata || localMetadata) { if (callbackManager) { callbackManager.addMetadata(inheritableMetadata ?? {}); callbackManager.addMetadata(localMetadata ?? {}, false); } } return callbackManager; } } export function ensureHandler( handler: BaseCallbackHandler | CallbackHandlerMethods ): BaseCallbackHandler { if ("name" in handler) { return handler; } return BaseCallbackHandler.fromMethods(handler); } /** * @deprecated Use [`traceable`](https://docs.smith.langchain.com/observability/how_to_guides/tracing/annotate_code) * from "langsmith" instead. */ export class TraceGroup { private runManager?: CallbackManagerForChainRun; constructor( private groupName: string, private options?: { projectName?: string; exampleId?: string; } ) {} private async getTraceGroupCallbackManager( group_name: string, inputs?: ChainValues, options?: LangChainTracerFields ): Promise<CallbackManagerForChainRun> { const cb = new LangChainTracer(options); const cm = await CallbackManager.configure([cb]); const runManager = await cm?.handleChainStart( { lc: 1, type: "not_implemented", id: ["langchain", "callbacks", "groups", group_name], }, inputs ?? {} ); if (!runManager) { throw new Error("Failed to create run group callback manager."); } return runManager; } async start(inputs?: ChainValues): Promise<CallbackManager> { if (!this.runManager) { this.runManager = await this.getTraceGroupCallbackManager( this.groupName, inputs, this.options ); } return this.runManager.getChild(); } async error(err: Error | unknown): Promise<void> { if (this.runManager) { await this.runManager.handleChainError(err); this.runManager = undefined; } } async end(output?: ChainValues): Promise<void> { if (this.runManager) { await this.runManager.handleChainEnd(output ?? {}); this.runManager = undefined; } } } // eslint-disable-next-line @typescript-eslint/no-explicit-any function _coerceToDict(value: any, defaultKey: string) { return value && !Array.isArray(value) && typeof value === "object" ? value : { [defaultKey]: value }; } // eslint-disable-next-line @typescript-eslint/no-explicit-any export async function traceAsGroup<T, A extends any[]>( groupOptions: { name: string; } & LangChainTracerFields, enclosedCode: (manager: CallbackManager, ...args: A) => Promise<T>, ...args: A ): Promise<T> { const traceGroup = new TraceGroup(groupOptions.name, groupOptions); const callbackManager = await traceGroup.start({ ...args }); try { const result = await enclosedCode(callbackManager, ...args); await traceGroup.end(_coerceToDict(result, "output")); return result; } catch (err) { await traceGroup.error(err); throw err; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/callbacks/base.ts
import * as uuid from "uuid"; import type { ChainValues } from "../utils/types/index.js"; import type { BaseMessage } from "../messages/base.js"; import type { AgentAction, AgentFinish } from "../agents.js"; import type { ChatGenerationChunk, GenerationChunk, LLMResult, } from "../outputs.js"; import { Serializable, Serialized, SerializedNotImplemented, get_lc_unique_name, } from "../load/serializable.js"; import type { SerializedFields } from "../load/map_keys.js"; import type { DocumentInterface } from "../documents/document.js"; import { getEnvironmentVariable } from "../utils/env.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any type Error = any; /** * Interface for the input parameters of the BaseCallbackHandler class. It * allows to specify which types of events should be ignored by the * callback handler. */ export interface BaseCallbackHandlerInput { ignoreLLM?: boolean; ignoreChain?: boolean; ignoreAgent?: boolean; ignoreRetriever?: boolean; ignoreCustomEvent?: boolean; _awaitHandler?: boolean; raiseError?: boolean; } /** * Interface for the indices of a new token produced by an LLM or Chat * Model in streaming mode. */ export interface NewTokenIndices { prompt: number; completion: number; } // TODO: Add all additional callback fields here export type HandleLLMNewTokenCallbackFields = { chunk?: GenerationChunk | ChatGenerationChunk; }; /** * Abstract class that provides a set of optional methods that can be * overridden in derived classes to handle various events during the * execution of a LangChain application. */ abstract class BaseCallbackHandlerMethodsClass { /** * Called at the start of an LLM or Chat Model run, with the prompt(s) * and the run ID. */ handleLLMStart?( llm: Serialized, prompts: string[], runId: string, parentRunId?: string, extraParams?: Record<string, unknown>, tags?: string[], metadata?: Record<string, unknown>, runName?: string ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; /** * Called when an LLM/ChatModel in `streaming` mode produces a new token */ handleLLMNewToken?( token: string, /** * idx.prompt is the index of the prompt that produced the token * (if there are multiple prompts) * idx.completion is the index of the completion that produced the token * (if multiple completions per prompt are requested) */ idx: NewTokenIndices, runId: string, parentRunId?: string, tags?: string[], fields?: HandleLLMNewTokenCallbackFields ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; /** * Called if an LLM/ChatModel run encounters an error */ handleLLMError?( err: Error, runId: string, parentRunId?: string, tags?: string[] ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; /** * Called at the end of an LLM/ChatModel run, with the output and the run ID. */ handleLLMEnd?( output: LLMResult, runId: string, parentRunId?: string, tags?: string[] ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; /** * Called at the start of a Chat Model run, with the prompt(s) * and the run ID. */ handleChatModelStart?( llm: Serialized, messages: BaseMessage[][], runId: string, parentRunId?: string, extraParams?: Record<string, unknown>, tags?: string[], metadata?: Record<string, unknown>, runName?: string ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; /** * Called at the start of a Chain run, with the chain name and inputs * and the run ID. */ handleChainStart?( chain: Serialized, inputs: ChainValues, runId: string, parentRunId?: string, tags?: string[], metadata?: Record<string, unknown>, runType?: string, runName?: string ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; /** * Called if a Chain run encounters an error */ handleChainError?( err: Error, runId: string, parentRunId?: string, tags?: string[], kwargs?: { inputs?: Record<string, unknown> } ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; /** * Called at the end of a Chain run, with the outputs and the run ID. */ handleChainEnd?( outputs: ChainValues, runId: string, parentRunId?: string, tags?: string[], kwargs?: { inputs?: Record<string, unknown> } ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; /** * Called at the start of a Tool run, with the tool name and input * and the run ID. */ handleToolStart?( tool: Serialized, input: string, runId: string, parentRunId?: string, tags?: string[], metadata?: Record<string, unknown>, runName?: string ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; /** * Called if a Tool run encounters an error */ handleToolError?( err: Error, runId: string, parentRunId?: string, tags?: string[] ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; /** * Called at the end of a Tool run, with the tool output and the run ID. */ handleToolEnd?( // eslint-disable-next-line @typescript-eslint/no-explicit-any output: any, runId: string, parentRunId?: string, tags?: string[] ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; handleText?( text: string, runId: string, parentRunId?: string, tags?: string[] ): Promise<void> | void; /** * Called when an agent is about to execute an action, * with the action and the run ID. */ handleAgentAction?( action: AgentAction, runId: string, parentRunId?: string, tags?: string[] ): Promise<void> | void; /** * Called when an agent finishes execution, before it exits. * with the final output and the run ID. */ handleAgentEnd?( action: AgentFinish, runId: string, parentRunId?: string, tags?: string[] ): Promise<void> | void; handleRetrieverStart?( retriever: Serialized, query: string, runId: string, parentRunId?: string, tags?: string[], metadata?: Record<string, unknown>, name?: string ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; handleRetrieverEnd?( documents: DocumentInterface[], runId: string, parentRunId?: string, tags?: string[] ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; handleRetrieverError?( err: Error, runId: string, parentRunId?: string, tags?: string[] ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; handleCustomEvent?( eventName: string, // eslint-disable-next-line @typescript-eslint/no-explicit-any data: any, runId: string, tags?: string[], // eslint-disable-next-line @typescript-eslint/no-explicit-any metadata?: Record<string, any> ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any> | any; } /** * Base interface for callbacks. All methods are optional. If a method is not * implemented, it will be ignored. If a method is implemented, it will be * called at the appropriate time. All methods are called with the run ID of * the LLM/ChatModel/Chain that is running, which is generated by the * CallbackManager. * * @interface */ export type CallbackHandlerMethods = BaseCallbackHandlerMethodsClass; /** * Abstract base class for creating callback handlers in the LangChain * framework. It provides a set of optional methods that can be overridden * in derived classes to handle various events during the execution of a * LangChain application. */ export abstract class BaseCallbackHandler extends BaseCallbackHandlerMethodsClass implements BaseCallbackHandlerInput, Serializable { lc_serializable = false; get lc_namespace(): ["langchain_core", "callbacks", string] { return ["langchain_core", "callbacks", this.name]; } get lc_secrets(): { [key: string]: string } | undefined { return undefined; } get lc_attributes(): { [key: string]: string } | undefined { return undefined; } get lc_aliases(): { [key: string]: string } | undefined { return undefined; } /** * The name of the serializable. Override to provide an alias or * to preserve the serialized module name in minified environments. * * Implemented as a static method to support loading logic. */ static lc_name(): string { return this.name; } /** * The final serialized identifier for the module. */ get lc_id(): string[] { return [ ...this.lc_namespace, get_lc_unique_name(this.constructor as typeof BaseCallbackHandler), ]; } lc_kwargs: SerializedFields; abstract name: string; ignoreLLM = false; ignoreChain = false; ignoreAgent = false; ignoreRetriever = false; ignoreCustomEvent = false; raiseError = false; awaitHandlers = getEnvironmentVariable("LANGCHAIN_CALLBACKS_BACKGROUND") === "false"; constructor(input?: BaseCallbackHandlerInput) { super(); this.lc_kwargs = input || {}; if (input) { this.ignoreLLM = input.ignoreLLM ?? this.ignoreLLM; this.ignoreChain = input.ignoreChain ?? this.ignoreChain; this.ignoreAgent = input.ignoreAgent ?? this.ignoreAgent; this.ignoreRetriever = input.ignoreRetriever ?? this.ignoreRetriever; this.ignoreCustomEvent = input.ignoreCustomEvent ?? this.ignoreCustomEvent; this.raiseError = input.raiseError ?? this.raiseError; this.awaitHandlers = this.raiseError || (input._awaitHandler ?? this.awaitHandlers); } } copy(): BaseCallbackHandler { return new (this.constructor as new ( input?: BaseCallbackHandlerInput ) => BaseCallbackHandler)(this); } toJSON(): Serialized { return Serializable.prototype.toJSON.call(this); } toJSONNotImplemented(): SerializedNotImplemented { return Serializable.prototype.toJSONNotImplemented.call(this); } static fromMethods(methods: CallbackHandlerMethods) { class Handler extends BaseCallbackHandler { name = uuid.v4(); constructor() { super(); Object.assign(this, methods); } } return new Handler(); } }
0
lc_public_repos/langchainjs/langchain-core/src/callbacks
lc_public_repos/langchainjs/langchain-core/src/callbacks/dispatch/web.ts
import { type RunnableConfig, getCallbackManagerForConfig, } from "../../runnables/config.js"; /** * Dispatch a custom event. Requires an explicit config object. * @param name The name of the custom event. * @param payload The data for the custom event. * Ideally should be JSON serializable to avoid serialization issues downstream, but not enforced. * @param config Config object. * * @example * ```typescript * import { dispatchCustomEvent } from "@langchain/core/callbacks/dispatch"; * * const foo = RunnableLambda.from(async (input: string, config?: RunnableConfig) => { * await dispatchCustomEvent( * "my_custom_event", * { arbitraryField: "someval" }, * config * ); * return input; * }); * * const callbacks = [{ * handleCustomEvent: (eventName: string, payload: any) => { * // Logs "my_custom_event" and { arbitraryField: "someval" } * console.log(eventName, payload); * } * }]; * * await foo.invoke("hi", { callbacks }) * ``` */ export async function dispatchCustomEvent( name: string, // eslint-disable-next-line @typescript-eslint/no-explicit-any payload: any, config?: RunnableConfig ) { const callbackManager = await getCallbackManagerForConfig(config); const parentRunId = callbackManager?.getParentRunId(); // We want to get the callback manager for the parent run. // This is a work-around for now to be able to dispatch adhoc events from // within a tool or a lambda and have the metadata events associated // with the parent run rather than have a new run id generated for each. if (callbackManager === undefined || parentRunId === undefined) { throw new Error( [ "Unable to dispatch a custom event without a parent run id.", "This function can only be called from within an existing run (e.g.,", "inside a tool or a RunnableLambda).", `\n\nIf you continue to see this error, please import from "@langchain/core/callbacks/dispatch/web"`, "and explicitly pass in a config parameter.", `\n\nOr, if you are calling this from a custom tool, ensure you're using the "tool" helper constructor as documented here:`, "\n |", "\n └-> https://js.langchain.com/docs/how_to/custom_tools#tool-function", "\n", ].join(" ") ); } // We pass parent id as the current run id here intentionally since events dispatch // from within things like RunnableLambda await callbackManager.handleCustomEvent?.(name, payload, parentRunId); }
0
lc_public_repos/langchainjs/langchain-core/src/callbacks
lc_public_repos/langchainjs/langchain-core/src/callbacks/dispatch/index.ts
/* __LC_ALLOW_ENTRYPOINT_SIDE_EFFECTS__ */ import { AsyncLocalStorage } from "node:async_hooks"; import { dispatchCustomEvent as dispatchCustomEventWeb } from "./web.js"; import { type RunnableConfig, ensureConfig } from "../../runnables/config.js"; import { AsyncLocalStorageProviderSingleton } from "../../singletons/index.js"; AsyncLocalStorageProviderSingleton.initializeGlobalInstance( new AsyncLocalStorage() ); /** * Dispatch a custom event. * * Note: this method is only supported in non-web environments * due to usage of async_hooks to infer config. * * If you are using this method in the browser, please import and use * from "@langchain/core/callbacks/dispatch/web". * * @param name The name of the custom event. * @param payload The data for the custom event. * Ideally should be JSON serializable to avoid serialization issues downstream, but not enforced. * @param config Optional config object. * * @example * ```typescript * import { dispatchCustomEvent } from "@langchain/core/callbacks/dispatch"; * * const foo = RunnableLambda.from(async (input: string) => { * await dispatchCustomEvent("my_custom_event", { arbitraryField: "someval" }); * return input; * }); * * const callbacks = [{ * handleCustomEvent: (eventName: string, payload: any) => { * // Logs "my_custom_event" and { arbitraryField: "someval" } * console.log(eventName, payload); * } * }]; * * await foo.invoke("hi", { callbacks }) * ``` */ export async function dispatchCustomEvent( eventName: string, // eslint-disable-next-line @typescript-eslint/no-explicit-any payload: any, config?: RunnableConfig ) { const ensuredConfig = ensureConfig(config); await dispatchCustomEventWeb(eventName, payload, ensuredConfig); }
0
lc_public_repos/langchainjs/langchain-core/src/callbacks
lc_public_repos/langchainjs/langchain-core/src/callbacks/tests/manager.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { PromptTemplate } from "../../prompts/prompt.js"; import { FakeLLM } from "../../utils/testing/index.js"; import { CallbackManager, traceAsGroup, TraceGroup } from "../manager.js"; import { StringOutputParser } from "../../output_parsers/string.js"; test("Test grouping traces", async () => { process.env.LANGCHAIN_TRACING_V2 = "true"; const chain = PromptTemplate.fromTemplate("hello world") .pipe(new FakeLLM({})) .pipe(new StringOutputParser()); const nextChain = PromptTemplate.fromTemplate("This is the day {input2}") .pipe(new FakeLLM({})) .pipe(new StringOutputParser()); const result = await traceAsGroup( { name: "my_chain_group" }, async (manager: CallbackManager, arg1: string, { chain, nextChain }) => { const result = await chain.invoke({ input: arg1 }, manager); const nextResult = await nextChain.invoke({ input2: result }, manager); return nextResult; }, "I'm arg1", { chain, nextChain } ); console.log(result); }); test("Test TraceGroup object", async () => { const traceGroup = new TraceGroup("my_trace_group"); const childManager = await traceGroup.start({ input: "Hello, World" }); const prompt = PromptTemplate.fromTemplate("Hello, world!"); const result = await prompt.invoke({}, { callbacks: childManager }); await traceGroup.end({ value: result.value }); expect(result.value).toBe("Hello, world!"); });
0
lc_public_repos/langchainjs/langchain-core/src/callbacks
lc_public_repos/langchainjs/langchain-core/src/callbacks/tests/callbacks.test.ts
/* eslint-disable no-promise-executor-return */ import { test, expect } from "@jest/globals"; import * as uuid from "uuid"; import { AsyncLocalStorage } from "node:async_hooks"; import { CallbackManager } from "../manager.js"; import { BaseCallbackHandler, type BaseCallbackHandlerInput } from "../base.js"; import type { Serialized } from "../../load/serializable.js"; import { Document } from "../../documents/document.js"; import type { ChainValues } from "../../utils/types/index.js"; import type { AgentAction, AgentFinish } from "../../agents.js"; import { BaseMessage, HumanMessage } from "../../messages/index.js"; import type { LLMResult } from "../../outputs.js"; import { RunnableLambda } from "../../runnables/base.js"; import { AsyncLocalStorageProviderSingleton } from "../../singletons/index.js"; import { awaitAllCallbacks } from "../promises.js"; class FakeCallbackHandler extends BaseCallbackHandler { name = `fake-${uuid.v4()}`; starts = 0; ends = 0; errors = 0; chainStarts = 0; chainEnds = 0; llmStarts = 0; llmEnds = 0; llmStreams = 0; toolStarts = 0; toolEnds = 0; agentEnds = 0; retrieverStarts = 0; retrieverEnds = 0; texts = 0; constructor(inputs?: BaseCallbackHandlerInput) { super(inputs); } async handleLLMStart(_llm: Serialized, _prompts: string[]): Promise<void> { this.starts += 1; this.llmStarts += 1; } async handleLLMEnd(_output: LLMResult): Promise<void> { this.ends += 1; this.llmEnds += 1; } async handleLLMNewToken(_token: string): Promise<void> { this.llmStreams += 1; } async handleLLMError(_err: Error): Promise<void> { this.errors += 1; } async handleChainStart( _chain: Serialized, _inputs: ChainValues ): Promise<void> { this.starts += 1; this.chainStarts += 1; } async handleChainEnd(_outputs: ChainValues): Promise<void> { this.ends += 1; this.chainEnds += 1; } async handleChainError(_err: Error): Promise<void> { this.errors += 1; } async handleToolStart(_tool: Serialized, _input: string): Promise<void> { this.starts += 1; this.toolStarts += 1; } async handleToolEnd(_output: string): Promise<void> { this.ends += 1; this.toolEnds += 1; } async handleToolError(_err: Error): Promise<void> { this.errors += 1; } async handleText(_text: string): Promise<void> { this.texts += 1; } async handleAgentAction(_action: AgentAction): Promise<void> { this.starts += 1; this.toolStarts += 1; } async handleAgentEnd(_action: AgentFinish): Promise<void> { this.ends += 1; this.agentEnds += 1; } async handleRetrieverStart( _retriever: Serialized, _query: string ): Promise<void> { this.starts += 1; this.retrieverStarts += 1; } async handleRetrieverEnd( _documents: Document<Record<string, unknown>>[] ): Promise<void> { this.ends += 1; this.retrieverEnds += 1; } async handleRetrieverError(_err: Error): Promise<void> { this.errors += 1; } copy(): FakeCallbackHandler { const newInstance = new FakeCallbackHandler(); newInstance.name = this.name; newInstance.starts = this.starts; newInstance.ends = this.ends; newInstance.errors = this.errors; newInstance.chainStarts = this.chainStarts; newInstance.chainEnds = this.chainEnds; newInstance.llmStarts = this.llmStarts; newInstance.llmEnds = this.llmEnds; newInstance.llmStreams = this.llmStreams; newInstance.toolStarts = this.toolStarts; newInstance.toolEnds = this.toolEnds; newInstance.agentEnds = this.agentEnds; newInstance.retrieverStarts = this.retrieverStarts; newInstance.retrieverEnds = this.retrieverEnds; newInstance.texts = this.texts; return newInstance; } } class FakeCallbackHandlerWithChatStart extends FakeCallbackHandler { chatModelStarts = 0; async handleChatModelStart( _llm: Serialized, _messages: BaseMessage[][] ): Promise<void> { this.starts += 1; this.chatModelStarts += 1; } } const serialized: Serialized = { lc: 1, type: "constructor", id: ["test"], kwargs: {}, }; test("CallbackManager", async () => { const manager = new CallbackManager(); const handler1 = new FakeCallbackHandler(); const handler2 = new FakeCallbackHandler(); manager.addHandler(handler1); manager.addHandler(handler2); const llmCbs = await manager.handleLLMStart(serialized, ["test"]); await Promise.all( llmCbs.map(async (llmCb) => { await llmCb.handleLLMEnd({ generations: [] }); await llmCb.handleLLMNewToken("test"); await llmCb.handleLLMError(new Error("test")); }) ); const chainCb = await manager.handleChainStart(serialized, { test: "test" }); await chainCb.handleChainEnd({ test: "test" }); await chainCb.handleChainError(new Error("test")); const toolCb = await manager.handleToolStart(serialized, "test"); await toolCb.handleToolEnd("test"); await toolCb.handleToolError(new Error("test")); await chainCb.handleText("test"); await chainCb.handleAgentAction({ tool: "test", toolInput: "test", log: "test", }); await chainCb.handleAgentEnd({ returnValues: { test: "test" }, log: "test" }); const retrieverCb = await manager.handleRetrieverStart(serialized, "test"); await retrieverCb.handleRetrieverEnd([ new Document({ pageContent: "test", metadata: { test: "test" } }), ]); await retrieverCb.handleRetrieverError(new Error("test")); // In case background mode is on while running this test await new Promise((resolve) => setTimeout(resolve, 100)); for (const handler of [handler1, handler2]) { expect(handler.starts).toBe(5); expect(handler.ends).toBe(5); expect(handler.errors).toBe(4); expect(handler.retrieverStarts).toBe(1); expect(handler.retrieverEnds).toBe(1); expect(handler.llmStarts).toBe(1); expect(handler.llmEnds).toBe(1); expect(handler.llmStreams).toBe(1); expect(handler.chainStarts).toBe(1); expect(handler.chainEnds).toBe(1); expect(handler.toolStarts).toBe(2); expect(handler.toolEnds).toBe(1); expect(handler.agentEnds).toBe(1); expect(handler.texts).toBe(1); } }); test("CallbackManager Chat Message Handling", async () => { const manager = new CallbackManager(); const handler1 = new FakeCallbackHandler(); const handler2 = new FakeCallbackHandlerWithChatStart(); manager.addHandler(handler1); manager.addHandler(handler2); const llmCbs = await manager.handleChatModelStart(serialized, [ [new HumanMessage("test")], ]); await Promise.all( llmCbs.map(async (llmCb) => { await llmCb.handleLLMEnd({ generations: [] }); }) ); // Everything treated as llm in handler 1 expect(handler1.llmStarts).toBe(1); expect(handler2.llmStarts).toBe(0); expect(handler2.chatModelStarts).toBe(1); // These should all be treated the same for (const handler of [handler1, handler2]) { expect(handler.starts).toBe(1); expect(handler.ends).toBe(1); expect(handler.errors).toBe(0); expect(handler.llmEnds).toBe(1); } }); test("CallbackHandler with ignoreLLM", async () => { const handler = new FakeCallbackHandler({ ignoreLLM: true, }); const manager = new CallbackManager(); manager.addHandler(handler); const llmCbs = await manager.handleLLMStart(serialized, ["test"]); await Promise.all( llmCbs.map(async (llmCb) => { await llmCb.handleLLMEnd({ generations: [] }); await llmCb.handleLLMNewToken("test"); await llmCb.handleLLMError(new Error("test")); }) ); expect(handler.starts).toBe(0); expect(handler.ends).toBe(0); expect(handler.errors).toBe(0); expect(handler.llmStarts).toBe(0); expect(handler.llmEnds).toBe(0); expect(handler.llmStreams).toBe(0); }); test("CallbackHandler with ignoreRetriever", async () => { const handler = new FakeCallbackHandler({ ignoreRetriever: true, }); const manager = new CallbackManager(); manager.addHandler(handler); const retrieverCb = await manager.handleRetrieverStart(serialized, "test"); await retrieverCb.handleRetrieverEnd([ new Document({ pageContent: "test", metadata: { test: "test" } }), ]); await retrieverCb.handleRetrieverError(new Error("test")); expect(handler.starts).toBe(0); expect(handler.ends).toBe(0); expect(handler.errors).toBe(0); expect(handler.retrieverStarts).toBe(0); expect(handler.retrieverEnds).toBe(0); }); test("CallbackHandler with ignoreChain", async () => { const handler = new FakeCallbackHandler({ ignoreChain: true, }); const manager = new CallbackManager(); manager.addHandler(handler); const chainCb = await manager.handleChainStart(serialized, { test: "test" }); await chainCb.handleChainEnd({ test: "test" }); await chainCb.handleChainError(new Error("test")); expect(handler.starts).toBe(0); expect(handler.ends).toBe(0); expect(handler.errors).toBe(0); expect(handler.chainStarts).toBe(0); expect(handler.chainEnds).toBe(0); }); test("CallbackHandler with ignoreAgent", async () => { const handler = new FakeCallbackHandler({ ignoreAgent: true, }); const manager = new CallbackManager(); manager.addHandler(handler); const toolCb = await manager.handleToolStart(serialized, "test"); await toolCb.handleToolEnd("test"); await toolCb.handleToolError(new Error("test")); const chainCb = await manager.handleChainStart(serialized, {}); await chainCb.handleAgentAction({ tool: "test", toolInput: "test", log: "test", }); await chainCb.handleAgentEnd({ returnValues: { test: "test" }, log: "test" }); expect(handler.starts).toBe(1); expect(handler.ends).toBe(0); expect(handler.errors).toBe(0); expect(handler.toolStarts).toBe(0); expect(handler.toolEnds).toBe(0); expect(handler.agentEnds).toBe(0); }); test("CallbackManager with child manager", async () => { const chainRunId = "chainRunId"; let llmWasCalled = false; let chainWasCalled = false; const manager = CallbackManager.fromHandlers({ async handleLLMStart( _llm: Serialized, _prompts: string[], _runId?: string, parentRunId?: string ) { expect(parentRunId).toBe(chainRunId); llmWasCalled = true; }, async handleChainStart( _chain: Serialized, _inputs: ChainValues, runId?: string, parentRunId?: string ) { expect(runId).toBe(chainRunId); expect(parentRunId).toBe(undefined); chainWasCalled = true; }, }); const chainCb = await manager.handleChainStart( serialized, { test: "test" }, chainRunId ); await chainCb.getChild().handleLLMStart(serialized, ["test"]); expect(llmWasCalled).toBe(true); expect(chainWasCalled).toBe(true); }); test("CallbackManager with child manager inherited handlers", async () => { const callbackManager1 = new CallbackManager(); const handler1 = new FakeCallbackHandler(); const handler2 = new FakeCallbackHandler(); const handler3 = new FakeCallbackHandler(); const handler4 = new FakeCallbackHandler(); callbackManager1.setHandlers([handler1, handler2]); expect(callbackManager1.handlers).toEqual([handler1, handler2]); expect(callbackManager1.inheritableHandlers).toEqual([handler1, handler2]); const callbackManager2 = callbackManager1.copy([handler3, handler4]); expect(callbackManager2.handlers).toEqual([ handler1, handler2, handler3, handler4, ]); expect(callbackManager2.inheritableHandlers).toEqual([ handler1, handler2, handler3, handler4, ]); const callbackManager3 = callbackManager1.copy([handler3, handler4], false); expect(callbackManager3.handlers).toEqual([ handler1, handler2, handler3, handler4, ]); expect(callbackManager3.inheritableHandlers).toEqual([handler1, handler2]); const chainCb = await callbackManager3.handleChainStart(serialized, { test: "test", }); const childManager = chainCb.getChild(); expect(childManager.handlers.map((h) => h.name)).toEqual([ handler1.name, handler2.name, ]); expect(childManager.inheritableHandlers.map((h) => h.name)).toEqual([ handler1.name, handler2.name, ]); const toolCb = await childManager.handleToolStart(serialized, "test"); const childManager2 = toolCb.getChild(); expect(childManager2.handlers.map((h) => h.name)).toEqual([ handler1.name, handler2.name, ]); expect(childManager2.inheritableHandlers.map((h) => h.name)).toEqual([ handler1.name, handler2.name, ]); }); test("CallbackManager.copy()", () => { const callbackManager1 = new CallbackManager(); const handler1 = new FakeCallbackHandler(); const handler2 = new FakeCallbackHandler(); const handler3 = new FakeCallbackHandler(); const handler4 = new FakeCallbackHandler(); callbackManager1.addHandler(handler1, true); callbackManager1.addHandler(handler2, false); callbackManager1.addTags(["a"], true); callbackManager1.addTags(["b"], false); callbackManager1.addMetadata({ a: "a" }, true); callbackManager1.addMetadata({ b: "b" }, false); expect(callbackManager1.handlers).toEqual([handler1, handler2]); expect(callbackManager1.inheritableHandlers).toEqual([handler1]); expect(callbackManager1.tags).toEqual(["a", "b"]); expect(callbackManager1.inheritableTags).toEqual(["a"]); expect(callbackManager1.metadata).toEqual({ a: "a", b: "b" }); expect(callbackManager1.inheritableMetadata).toEqual({ a: "a" }); const callbackManager2 = callbackManager1.copy([handler3]); expect(callbackManager2.handlers.map((h) => h.name)).toEqual([ handler1.name, handler2.name, handler3.name, ]); expect(callbackManager2.inheritableHandlers.map((h) => h.name)).toEqual([ handler1.name, handler3.name, ]); expect(callbackManager2.tags).toEqual(["a", "b"]); expect(callbackManager2.inheritableTags).toEqual(["a"]); const callbackManager3 = callbackManager2.copy([handler4], false); expect(callbackManager3.handlers.map((h) => h.name)).toEqual([ handler1.name, handler2.name, handler3.name, handler4.name, ]); expect(callbackManager3.inheritableHandlers.map((h) => h.name)).toEqual([ handler1.name, handler3.name, ]); }); class FakeCallbackHandlerWithErrors extends FakeCallbackHandler { constructor(input: BaseCallbackHandlerInput) { super({ ...input, raiseError: true }); } async handleChainStart( _chain: Serialized, _inputs: ChainValues ): Promise<void> { throw Error("error!"); } async handleLLMStart(_llm: Serialized, _prompts: string[]): Promise<void> { throw Error("llm start error!"); } } test("error handling in chain start", async () => { const handler = new FakeCallbackHandlerWithErrors({ ignoreLLM: true, }); const manager = new CallbackManager(undefined); manager.addHandler(handler); await expect(async () => { await manager.handleChainStart(serialized, ["test"]); }).rejects.toThrowError(); await manager.handleLLMStart(serialized, ["test"]); }); test("error handling in llm start", async () => { const handler = new FakeCallbackHandlerWithErrors({ ignoreChain: true, }); const manager = new CallbackManager(undefined); manager.addHandler(handler); await manager.handleChainStart(serialized, ["test"]); await expect(async () => { await manager.handleLLMStart(serialized, ["test"]); }).rejects.toThrowError(); }); test("chain should still run if a normal callback handler throws an error", async () => { const chain = RunnableLambda.from(async () => "hello world"); const res = await chain.invoke( {}, { callbacks: [ { handleChainStart: () => { throw new Error("Bad"); }, }, ], } ); expect(res).toEqual("hello world"); }); test("runnables in callbacks should be root runs", async () => { AsyncLocalStorageProviderSingleton.initializeGlobalInstance( new AsyncLocalStorage() ); const nestedChain = RunnableLambda.from(async () => { const subRun = RunnableLambda.from(async () => "hello world"); return await subRun.invoke({ foo: "bar" }); }); let error; let finalInputs; const res = await nestedChain.invoke( {}, { callbacks: [ { handleChainStart: (_chain, inputs) => { finalInputs = inputs; try { expect( AsyncLocalStorageProviderSingleton.getRunnableConfig() ).toEqual(undefined); } catch (e) { error = e; } }, }, ], } ); await awaitAllCallbacks(); expect(res).toEqual("hello world"); expect(error).toBe(undefined); expect(finalInputs).toEqual({ foo: "bar" }); });
0
lc_public_repos/langchainjs/langchain-core/src/callbacks
lc_public_repos/langchainjs/langchain-core/src/callbacks/tests/run_collector.test.ts
import { v4 as uuidv4, validate } from "uuid"; import { Run } from "langsmith/schemas"; import { describe, it, expect } from "@jest/globals"; import { ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, } from "../../prompts/chat.js"; import { BaseLLM } from "../../language_models/llms.js"; import { StringOutputParser } from "../../output_parsers/string.js"; import type { LLMResult } from "../../outputs.js"; import { RunCollectorCallbackHandler } from "../../tracers/run_collector.js"; class FakeLLM extends BaseLLM { nrMapCalls = 0; nrReduceCalls = 0; _llmType(): string { return "fake_1"; } async _generate(_prompts: string[]): Promise<LLMResult> { return { generations: [ [ { text: "Foo.", }, ], ], }; } } describe("RunCollectorCallbackHandler", () => { it("should persist the given run object and set the reference_example_id to the exampleId", async () => { // Create a chain that uses the dataset const prompt = ChatPromptTemplate.fromMessages([ SystemMessagePromptTemplate.fromTemplate("You are in a rap battle."), HumanMessagePromptTemplate.fromTemplate("Write the following {input}"), ]); const model = new FakeLLM({}); const chain = prompt.pipe(model).pipe(new StringOutputParser()); const exampleId = uuidv4(); const collector = new RunCollectorCallbackHandler({ exampleId }); await chain.invoke({ input: "foo" }, { callbacks: [collector] }); expect(collector.tracedRuns.length).toBe(1); const tracedRun = collector.tracedRuns[0]; expect(tracedRun.id).toBeDefined(); if (tracedRun.id && validate(tracedRun.id)) { expect(validate(tracedRun.id)).toBe(true); } expect(tracedRun.reference_example_id).toBe(exampleId); expect((tracedRun as Run)?.child_runs?.length).toBe(3); }); });
0
lc_public_repos/langchainjs/langchain-core
lc_public_repos/langchainjs/langchain-core/scripts/jest-setup-after-env.js
import { awaitAllCallbacks } from "../src/callbacks/promises.js"; afterAll(awaitAllCallbacks);
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-dynamic-sessions/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": ["ES2021", "ES2022.Object", "DOM"], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": ["src/**/*"], "exclude": ["node_modules", "dist", "docs"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-dynamic-sessions/LICENSE
The MIT License Copyright (c) 2023 LangChain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.