index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/chains/combine_docs_chain.ts
import { Document } from "@langchain/core/documents"; import { ChainValues } from "@langchain/core/utils/types"; import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager"; import { BasePromptTemplate, PromptTemplate } from "@langchain/core/prompts"; import type { SerializedStuffDocumentsChain, SerializedMapReduceDocumentsChain, SerializedRefineDocumentsChain, } from "./serde.js"; import { BaseChain, ChainInputs } from "./base.js"; import { LLMChain } from "./llm_chain.js"; /** * Interface for the input properties of the StuffDocumentsChain class. */ export interface StuffDocumentsChainInput extends ChainInputs { /** LLM Wrapper to use after formatting documents */ llmChain: LLMChain; inputKey?: string; /** Variable name in the LLM chain to put the documents in */ documentVariableName?: string; } /** * Chain that combines documents by stuffing into context. * @augments BaseChain * @augments StuffDocumentsChainInput */ export class StuffDocumentsChain extends BaseChain implements StuffDocumentsChainInput { static lc_name() { return "StuffDocumentsChain"; } llmChain: LLMChain; inputKey = "input_documents"; documentVariableName = "context"; get inputKeys() { return [this.inputKey, ...this.llmChain.inputKeys].filter( (key) => key !== this.documentVariableName ); } get outputKeys() { return this.llmChain.outputKeys; } constructor(fields: StuffDocumentsChainInput) { super(fields); this.llmChain = fields.llmChain; this.documentVariableName = fields.documentVariableName ?? this.documentVariableName; this.inputKey = fields.inputKey ?? this.inputKey; } /** @ignore */ _prepInputs(values: ChainValues): ChainValues { if (!(this.inputKey in values)) { throw new Error(`Document key ${this.inputKey} not found.`); } const { [this.inputKey]: docs, ...rest } = values; const texts = (docs as Document[]).map(({ pageContent }) => pageContent); const text = texts.join("\n\n"); return { ...rest, [this.documentVariableName]: text, }; } /** @ignore */ async _call( values: ChainValues, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { const result = await this.llmChain.call( this._prepInputs(values), runManager?.getChild("combine_documents") ); return result; } _chainType() { return "stuff_documents_chain" as const; } static async deserialize(data: SerializedStuffDocumentsChain) { if (!data.llm_chain) { throw new Error("Missing llm_chain"); } return new StuffDocumentsChain({ llmChain: await LLMChain.deserialize(data.llm_chain), }); } serialize(): SerializedStuffDocumentsChain { return { _type: this._chainType(), llm_chain: this.llmChain.serialize(), }; } } /** * Interface for the input properties of the MapReduceDocumentsChain * class. */ export interface MapReduceDocumentsChainInput extends StuffDocumentsChainInput { /** The maximum number of tokens before requiring to do the reduction */ maxTokens?: number; /** The maximum number of iterations to run through the map */ maxIterations?: number; /** Ensures that the map step is taken regardless of max tokens */ ensureMapStep?: boolean; /** Chain to use to combine results of applying llm_chain to documents. */ combineDocumentChain: StuffDocumentsChain; /** Return the results of the map steps in the output. */ returnIntermediateSteps?: boolean; } /** * Combine documents by mapping a chain over them, then combining results. * @augments BaseChain * @augments StuffDocumentsChainInput */ export class MapReduceDocumentsChain extends BaseChain implements MapReduceDocumentsChainInput { static lc_name() { return "MapReduceDocumentsChain"; } llmChain: LLMChain; inputKey = "input_documents"; documentVariableName = "context"; returnIntermediateSteps = false; get inputKeys() { return [this.inputKey, ...this.combineDocumentChain.inputKeys]; } get outputKeys() { return this.combineDocumentChain.outputKeys; } maxTokens = 3000; maxIterations = 10; ensureMapStep = false; combineDocumentChain: StuffDocumentsChain; constructor(fields: MapReduceDocumentsChainInput) { super(fields); this.llmChain = fields.llmChain; this.combineDocumentChain = fields.combineDocumentChain; this.documentVariableName = fields.documentVariableName ?? this.documentVariableName; this.ensureMapStep = fields.ensureMapStep ?? this.ensureMapStep; this.inputKey = fields.inputKey ?? this.inputKey; this.maxTokens = fields.maxTokens ?? this.maxTokens; this.maxIterations = fields.maxIterations ?? this.maxIterations; this.returnIntermediateSteps = fields.returnIntermediateSteps ?? false; } /** @ignore */ async _call( values: ChainValues, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { if (!(this.inputKey in values)) { throw new Error(`Document key ${this.inputKey} not found.`); } const { [this.inputKey]: docs, ...rest } = values; let currentDocs = docs as Document[]; let intermediateSteps: string[] = []; // For each iteration, we'll use the `llmChain` to get a new result for (let i = 0; i < this.maxIterations; i += 1) { const inputs = currentDocs.map((d) => ({ [this.documentVariableName]: d.pageContent, ...rest, })); const canSkipMapStep = i !== 0 || !this.ensureMapStep; if (canSkipMapStep) { // Calculate the total tokens required in the input const formatted = await this.combineDocumentChain.llmChain.prompt.format( this.combineDocumentChain._prepInputs({ [this.combineDocumentChain.inputKey]: currentDocs, ...rest, }) ); const length = await this.combineDocumentChain.llmChain._getNumTokens( formatted ); const withinTokenLimit = length < this.maxTokens; // If we can skip the map step, and we're within the token limit, we don't // need to run the map step, so just break out of the loop. if (withinTokenLimit) { break; } } const results = await this.llmChain.apply( inputs, // If we have a runManager, then we need to create a child for each input // so that we can track the progress of each input. runManager ? Array.from({ length: inputs.length }, (_, i) => runManager.getChild(`map_${i + 1}`) ) : undefined ); const { outputKey } = this.llmChain; // If the flag is set, then concat that to the intermediate steps if (this.returnIntermediateSteps) { intermediateSteps = intermediateSteps.concat( results.map((r) => r[outputKey]) ); } currentDocs = results.map((r) => ({ pageContent: r[outputKey], metadata: {}, })); } // Now, with the final result of all the inputs from the `llmChain`, we can // run the `combineDocumentChain` over them. const newInputs = { [this.combineDocumentChain.inputKey]: currentDocs, ...rest, }; const result = await this.combineDocumentChain.call( newInputs, runManager?.getChild("combine_documents") ); // Return the intermediate steps results if the flag is set if (this.returnIntermediateSteps) { return { ...result, intermediateSteps }; } return result; } _chainType() { return "map_reduce_documents_chain" as const; } static async deserialize(data: SerializedMapReduceDocumentsChain) { if (!data.llm_chain) { throw new Error("Missing llm_chain"); } if (!data.combine_document_chain) { throw new Error("Missing combine_document_chain"); } return new MapReduceDocumentsChain({ llmChain: await LLMChain.deserialize(data.llm_chain), combineDocumentChain: await StuffDocumentsChain.deserialize( data.combine_document_chain ), }); } serialize(): SerializedMapReduceDocumentsChain { return { _type: this._chainType(), llm_chain: this.llmChain.serialize(), combine_document_chain: this.combineDocumentChain.serialize(), }; } } /** * Interface for the input properties of the RefineDocumentsChain class. */ export interface RefineDocumentsChainInput extends StuffDocumentsChainInput { refineLLMChain: LLMChain; documentPrompt?: BasePromptTemplate; initialResponseName?: string; documentVariableName?: string; outputKey?: string; } /** * Combine documents by doing a first pass and then refining on more documents. * @augments BaseChain * @augments RefineDocumentsChainInput */ export class RefineDocumentsChain extends BaseChain implements RefineDocumentsChainInput { static lc_name() { return "RefineDocumentsChain"; } llmChain: LLMChain; inputKey = "input_documents"; outputKey = "output_text"; documentVariableName = "context"; initialResponseName = "existing_answer"; refineLLMChain: LLMChain; get defaultDocumentPrompt(): BasePromptTemplate { return new PromptTemplate({ inputVariables: ["page_content"], template: "{page_content}", }); } documentPrompt = this.defaultDocumentPrompt; get inputKeys() { return [ ...new Set([ this.inputKey, ...this.llmChain.inputKeys, ...this.refineLLMChain.inputKeys, ]), ].filter( (key) => key !== this.documentVariableName && key !== this.initialResponseName ); } get outputKeys() { return [this.outputKey]; } constructor(fields: RefineDocumentsChainInput) { super(fields); this.llmChain = fields.llmChain; this.refineLLMChain = fields.refineLLMChain; this.documentVariableName = fields.documentVariableName ?? this.documentVariableName; this.inputKey = fields.inputKey ?? this.inputKey; this.outputKey = fields.outputKey ?? this.outputKey; this.documentPrompt = fields.documentPrompt ?? this.documentPrompt; this.initialResponseName = fields.initialResponseName ?? this.initialResponseName; } /** @ignore */ async _constructInitialInputs(doc: Document, rest: Record<string, unknown>) { const baseInfo: Record<string, unknown> = { page_content: doc.pageContent, ...doc.metadata, }; const documentInfo: Record<string, unknown> = {}; this.documentPrompt.inputVariables.forEach((value) => { documentInfo[value] = baseInfo[value]; }); const baseInputs: Record<string, unknown> = { [this.documentVariableName]: await this.documentPrompt.format({ ...documentInfo, }), }; const inputs = { ...baseInputs, ...rest }; return inputs; } /** @ignore */ async _constructRefineInputs(doc: Document, res: string) { const baseInfo: Record<string, unknown> = { page_content: doc.pageContent, ...doc.metadata, }; const documentInfo: Record<string, unknown> = {}; this.documentPrompt.inputVariables.forEach((value) => { documentInfo[value] = baseInfo[value]; }); const baseInputs: Record<string, unknown> = { [this.documentVariableName]: await this.documentPrompt.format({ ...documentInfo, }), }; const inputs = { [this.initialResponseName]: res, ...baseInputs }; return inputs; } /** @ignore */ async _call( values: ChainValues, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { if (!(this.inputKey in values)) { throw new Error(`Document key ${this.inputKey} not found.`); } const { [this.inputKey]: docs, ...rest } = values; const currentDocs = docs as Document[]; const initialInputs = await this._constructInitialInputs( currentDocs[0], rest ); let res = await this.llmChain.predict( { ...initialInputs }, runManager?.getChild("answer") ); const refineSteps = [res]; for (let i = 1; i < currentDocs.length; i += 1) { const refineInputs = await this._constructRefineInputs( currentDocs[i], res ); const inputs = { ...refineInputs, ...rest }; res = await this.refineLLMChain.predict( { ...inputs }, runManager?.getChild("refine") ); refineSteps.push(res); } return { [this.outputKey]: res }; } _chainType() { return "refine_documents_chain" as const; } static async deserialize(data: SerializedRefineDocumentsChain) { const SerializedLLMChain = data.llm_chain; if (!SerializedLLMChain) { throw new Error("Missing llm_chain"); } const SerializedRefineDocumentChain = data.refine_llm_chain; if (!SerializedRefineDocumentChain) { throw new Error("Missing refine_llm_chain"); } return new RefineDocumentsChain({ llmChain: await LLMChain.deserialize(SerializedLLMChain), refineLLMChain: await LLMChain.deserialize(SerializedRefineDocumentChain), }); } serialize(): SerializedRefineDocumentsChain { return { _type: this._chainType(), llm_chain: this.llmChain.serialize(), refine_llm_chain: this.refineLLMChain.serialize(), }; } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/chains/llm_chain.ts
import { BaseLanguageModel, BaseLanguageModelInterface, BaseLanguageModelInput, } from "@langchain/core/language_models/base"; import type { ChainValues } from "@langchain/core/utils/types"; import type { Generation } from "@langchain/core/outputs"; import type { BaseMessage } from "@langchain/core/messages"; import type { BasePromptValueInterface } from "@langchain/core/prompt_values"; import { BasePromptTemplate } from "@langchain/core/prompts"; import { BaseLLMOutputParser, BaseOutputParser, } from "@langchain/core/output_parsers"; import { CallbackManager, BaseCallbackConfig, CallbackManagerForChainRun, Callbacks, } from "@langchain/core/callbacks/manager"; import { Runnable, type RunnableInterface } from "@langchain/core/runnables"; import { BaseChain, ChainInputs } from "./base.js"; import { SerializedLLMChain } from "./serde.js"; import { NoOpOutputParser } from "../output_parsers/noop.js"; type LLMType = | BaseLanguageModelInterface | Runnable<BaseLanguageModelInput, string> | Runnable<BaseLanguageModelInput, BaseMessage>; // eslint-disable-next-line @typescript-eslint/no-explicit-any type CallOptionsIfAvailable<T> = T extends { CallOptions: infer CO } ? CO : any; /** * Interface for the input parameters of the LLMChain class. */ export interface LLMChainInput< T extends string | object = string, Model extends LLMType = LLMType > extends ChainInputs { /** Prompt object to use */ prompt: BasePromptTemplate; /** LLM Wrapper to use */ llm: Model; /** Kwargs to pass to LLM */ llmKwargs?: CallOptionsIfAvailable<Model>; /** OutputParser to use */ outputParser?: BaseLLMOutputParser<T>; /** Key to use for output, defaults to `text` */ outputKey?: string; } function isBaseLanguageModel(llmLike: unknown): llmLike is BaseLanguageModel { return typeof (llmLike as BaseLanguageModelInterface)._llmType === "function"; } function _getLanguageModel(llmLike: RunnableInterface): BaseLanguageModel { if (isBaseLanguageModel(llmLike)) { return llmLike; } else if ("bound" in llmLike && Runnable.isRunnable(llmLike.bound)) { return _getLanguageModel(llmLike.bound); } else if ( "runnable" in llmLike && "fallbacks" in llmLike && Runnable.isRunnable(llmLike.runnable) ) { return _getLanguageModel(llmLike.runnable); } else if ("default" in llmLike && Runnable.isRunnable(llmLike.default)) { return _getLanguageModel(llmLike.default); } else { throw new Error("Unable to extract BaseLanguageModel from llmLike object."); } } /** * @deprecated This class will be removed in 1.0.0. Use the LangChain Expression Language (LCEL) instead. * See the example below for how to use LCEL with the LLMChain class: * * Chain to run queries against LLMs. * * @example * ```ts * import { ChatPromptTemplate } from "@langchain/core/prompts"; * import { ChatOpenAI } from "@langchain/openai"; * * const prompt = ChatPromptTemplate.fromTemplate("Tell me a {adjective} joke"); * const llm = new ChatOpenAI(); * const chain = prompt.pipe(llm); * * const response = await chain.invoke({ adjective: "funny" }); * ``` */ export class LLMChain< T extends string | object = string, Model extends LLMType = LLMType > extends BaseChain implements LLMChainInput<T> { static lc_name() { return "LLMChain"; } lc_serializable = true; prompt: BasePromptTemplate; llm: Model; llmKwargs?: CallOptionsIfAvailable<Model>; outputKey = "text"; outputParser?: BaseLLMOutputParser<T>; get inputKeys() { return this.prompt.inputVariables; } get outputKeys() { return [this.outputKey]; } constructor(fields: LLMChainInput<T, Model>) { super(fields); this.prompt = fields.prompt; this.llm = fields.llm; this.llmKwargs = fields.llmKwargs; this.outputKey = fields.outputKey ?? this.outputKey; this.outputParser = fields.outputParser ?? (new NoOpOutputParser() as BaseOutputParser<T>); if (this.prompt.outputParser) { if (fields.outputParser) { throw new Error("Cannot set both outputParser and prompt.outputParser"); } this.outputParser = this.prompt.outputParser as BaseOutputParser<T>; } } private getCallKeys(): string[] { const callKeys = "callKeys" in this.llm ? this.llm.callKeys : []; return callKeys; } /** @ignore */ _selectMemoryInputs(values: ChainValues): ChainValues { const valuesForMemory = super._selectMemoryInputs(values); const callKeys = this.getCallKeys(); for (const key of callKeys) { if (key in values) { delete valuesForMemory[key]; } } return valuesForMemory; } /** @ignore */ async _getFinalOutput( generations: Generation[], promptValue: BasePromptValueInterface, runManager?: CallbackManagerForChainRun ): Promise<unknown> { let finalCompletion: unknown; if (this.outputParser) { finalCompletion = await this.outputParser.parseResultWithPrompt( generations, promptValue, runManager?.getChild() ); } else { finalCompletion = generations[0].text; } return finalCompletion; } /** * Run the core logic of this chain and add to output if desired. * * Wraps _call and handles memory. */ call( values: ChainValues & CallOptionsIfAvailable<Model>, config?: Callbacks | BaseCallbackConfig ): Promise<ChainValues> { return super.call(values, config); } /** @ignore */ async _call( values: ChainValues & CallOptionsIfAvailable<Model>, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { const valuesForPrompt = { ...values }; const valuesForLLM = { ...this.llmKwargs, } as CallOptionsIfAvailable<Model>; const callKeys = this.getCallKeys(); for (const key of callKeys) { if (key in values) { if (valuesForLLM) { valuesForLLM[key as keyof CallOptionsIfAvailable<Model>] = values[key]; delete valuesForPrompt[key]; } } } const promptValue = await this.prompt.formatPromptValue(valuesForPrompt); if ("generatePrompt" in this.llm) { const { generations } = await this.llm.generatePrompt( [promptValue], valuesForLLM, runManager?.getChild() ); return { [this.outputKey]: await this._getFinalOutput( generations[0], promptValue, runManager ), }; } const modelWithParser = this.outputParser ? this.llm.pipe(this.outputParser) : this.llm; const response = await modelWithParser.invoke( promptValue, runManager?.getChild() ); return { [this.outputKey]: response, }; } /** * Format prompt with values and pass to LLM * * @param values - keys to pass to prompt template * @param callbackManager - CallbackManager to use * @returns Completion from LLM. * * @example * ```ts * llm.predict({ adjective: "funny" }) * ``` */ async predict( values: ChainValues & CallOptionsIfAvailable<Model>, callbackManager?: CallbackManager ): Promise<T> { const output = await this.call(values, callbackManager); return output[this.outputKey]; } _chainType() { return "llm" as const; } static async deserialize(data: SerializedLLMChain) { const { llm, prompt } = data; if (!llm) { throw new Error("LLMChain must have llm"); } if (!prompt) { throw new Error("LLMChain must have prompt"); } return new LLMChain({ llm: await BaseLanguageModel.deserialize(llm), prompt: await BasePromptTemplate.deserialize(prompt), }); } /** @deprecated */ serialize(): SerializedLLMChain { const serialize = "serialize" in this.llm ? this.llm.serialize() : undefined; return { _type: `${this._chainType()}_chain`, llm: serialize, prompt: this.prompt.serialize(), }; } _getNumTokens(text: string): Promise<number> { return _getLanguageModel(this.llm).getNumTokens(text); } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/chains/serde.ts
import type { SerializedLLM } from "@langchain/core/language_models/llms"; import { SerializedBasePromptTemplate } from "@langchain/core/prompts"; /** * Represents the serialized form of an LLMChain. It includes properties * such as `_type`, `llm`, and `prompt`. * * @deprecated Use newer {@link https://api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedLLMChain = { _type: "llm_chain"; llm?: SerializedLLM; prompt?: SerializedBasePromptTemplate; }; /** * Represents the serialized form of a SequentialChain. It includes * properties such as `_type`, `input_variables`, `output_variables`, and * `chains`. * * @deprecated Use newer {@link https://api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedSequentialChain = { _type: "sequential_chain"; input_variables: string[]; output_variables: string[]; chains: SerializedBaseChain[]; }; /** * Represents the serialized form of a SimpleSequentialChain. It includes * properties such as `_type` and `chains`. * * @deprecated Use newer {@link https://api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedSimpleSequentialChain = { _type: "simple_sequential_chain"; chains: Array<SerializedBaseChain>; }; /** * Represents the serialized form of a VectorDBQAChain. It includes * properties such as `_type`, `k`, and `combine_documents_chain`. * * @deprecated Use newer {@link https://api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedVectorDBQAChain = { _type: "vector_db_qa"; k: number; combine_documents_chain: SerializedBaseChain; }; /** * Represents the serialized form of an APIChain. It includes properties * such as `_type`, `api_request_chain`, `api_answer_chain`, and * `api_docs`. * * @deprecated Use newer {@link https://api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedAPIChain = { _type: "api_chain"; api_request_chain: SerializedLLMChain; api_answer_chain: SerializedLLMChain; api_docs: string; }; /** * Represents the serialized form of a StuffDocumentsChain. It includes * properties such as `_type` and `llm_chain`. * * @deprecated Use newer {@link https://api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedStuffDocumentsChain = { _type: "stuff_documents_chain"; llm_chain?: SerializedLLMChain; }; /** * Represents the serialized form of a ChatVectorDBQAChain. It includes * properties such as `_type`, `k`, `combine_documents_chain`, and * `question_generator`. * * @deprecated Use newer {@link https://api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedChatVectorDBQAChain = { _type: "chat-vector-db"; k: number; combine_documents_chain: SerializedBaseChain; question_generator: SerializedLLMChain; }; /** * Represents the serialized form of a MapReduceDocumentsChain. It * includes properties such as `_type`, `llm_chain`, and * `combine_document_chain`. * * @deprecated Use newer {@link https://api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedMapReduceDocumentsChain = { _type: "map_reduce_documents_chain"; llm_chain?: SerializedLLMChain; combine_document_chain?: SerializedStuffDocumentsChain; }; /** * Represents the serialized form of a RefineDocumentsChain. It includes * properties such as `_type`, `llm_chain`, and `refine_llm_chain`. * * @deprecated Use newer {@link https://api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedRefineDocumentsChain = { _type: "refine_documents_chain"; llm_chain?: SerializedLLMChain; refine_llm_chain?: SerializedLLMChain; }; /** * Represents the serialized form of an AnalyzeDocumentChain. It includes * properties such as `_type` and `combine_document_chain`. * * @deprecated Use newer {@link https://api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedAnalyzeDocumentChain = { _type: "analyze_document_chain"; combine_document_chain?: SerializedBaseChain; }; /** * Represents the serialized form of a ConstitutionalPrinciple. It * includes properties such as `_type`, `critiqueRequest`, * `revisionRequest`, and `name`. * * @deprecated Use newer {@link https://api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedConstitutionalPrinciple = { _type: "constitutional_principle"; critiqueRequest: string; revisionRequest: string; name: string; }; /** * Represents the serialized form of a ConstitutionalChain. It includes * properties such as `_type`, `chain`, `critiqueChain`, `revisionChain`, * and `ConstitutionalPrinciple`. * * @deprecated */ export type SerializedConstitutionalChain = { _type: "constitutional_chain"; chain?: SerializedLLMChain; critiqueChain?: SerializedBaseChain; revisionChain?: SerializedBaseChain; ConstitutionalPrinciple?: SerializedConstitutionalPrinciple[]; }; /** * Represents the serialized form of a BaseChain. It can be one of the * above serialized chain types. * * @deprecated Use newer {@link https://api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedBaseChain = | SerializedLLMChain | SerializedSequentialChain | SerializedSimpleSequentialChain | SerializedVectorDBQAChain | SerializedAPIChain | SerializedStuffDocumentsChain | SerializedChatVectorDBQAChain | SerializedMapReduceDocumentsChain | SerializedAnalyzeDocumentChain | SerializedRefineDocumentsChain | SerializedConstitutionalChain;
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/chains/conversation.ts
import { PromptTemplate } from "@langchain/core/prompts"; import { LLMChain, LLMChainInput } from "./llm_chain.js"; import { BufferMemory } from "../memory/buffer_memory.js"; import { Optional } from "../types/type-utils.js"; export const DEFAULT_TEMPLATE = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. Current conversation: {history} Human: {input} AI:`; /** * A class for conducting conversations between a human and an AI. It * extends the {@link LLMChain} class. * @example * ```typescript * const model = new ChatOpenAI({}); * const chain = new ConversationChain({ llm: model }); * * // Sending a greeting to the conversation chain * const res1 = await chain.call({ input: "Hi! I'm Jim." }); * console.log({ res1 }); * * // Following up with a question in the conversation * const res2 = await chain.call({ input: "What's my name?" }); * console.log({ res2 }); * ``` */ export class ConversationChain extends LLMChain { static lc_name() { return "ConversationChain"; } constructor({ prompt, outputKey, memory, ...rest }: Optional<LLMChainInput, "prompt">) { super({ prompt: prompt ?? new PromptTemplate({ template: DEFAULT_TEMPLATE, inputVariables: ["history", "input"], }), outputKey: outputKey ?? "response", memory: memory ?? new BufferMemory(), ...rest, }); } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/chains/history_aware_retriever.ts
import type { LanguageModelLike } from "@langchain/core/language_models/base"; import { type Runnable, type RunnableInterface, RunnableSequence, RunnableBranch, } from "@langchain/core/runnables"; import { type BasePromptTemplate } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; import type { DocumentInterface } from "@langchain/core/documents"; import type { BaseMessage } from "@langchain/core/messages"; /** * Params for the createHistoryAwareRetriever method. */ export type CreateHistoryAwareRetrieverParams = { /** * Language model to use for generating a search term given chat history. */ llm: LanguageModelLike; /** * RetrieverLike object that takes a string as input and outputs a list of Documents. */ retriever: RunnableInterface<string, DocumentInterface[]>; /** * The prompt used to generate the search query for the retriever. */ rephrasePrompt: BasePromptTemplate; }; /** * Create a chain that takes conversation history and returns documents. * If there is no `chat_history`, then the `input` is just passed directly to the * retriever. If there is `chat_history`, then the prompt and LLM will be used * to generate a search query. That search query is then passed to the retriever. * @param {CreateHistoryAwareRetriever} params * @returns An LCEL Runnable. The runnable input must take in `input`, and if there * is chat history should take it in the form of `chat_history`. * The Runnable output is a list of Documents * @example * ```typescript * // yarn add langchain @langchain/openai * * import { ChatOpenAI } from "@langchain/openai"; * import { pull } from "langchain/hub"; * import { createHistoryAwareRetriever } from "langchain/chains/history_aware_retriever"; * * const rephrasePrompt = await pull("langchain-ai/chat-langchain-rephrase"); * const llm = new ChatOpenAI({}); * const retriever = ... * const chain = await createHistoryAwareRetriever({ * llm, * retriever, * rephrasePrompt, * }); * const result = await chain.invoke({"input": "...", "chat_history": [] }) * ``` */ export async function createHistoryAwareRetriever({ llm, retriever, rephrasePrompt, }: CreateHistoryAwareRetrieverParams): Promise< Runnable< { input: string; chat_history: string | BaseMessage[] }, DocumentInterface[] > > { if (!rephrasePrompt.inputVariables.includes("input")) { throw new Error( `Expected "input" to be a prompt variable, but got ${JSON.stringify( rephrasePrompt.inputVariables )}` ); } const retrieveDocuments = RunnableBranch.from([ [ (input) => !input.chat_history || input.chat_history.length === 0, RunnableSequence.from([(input) => input.input, retriever]), ], RunnableSequence.from([ rephrasePrompt, llm, new StringOutputParser(), retriever, ]), ]).withConfig({ runName: "history_aware_retriever", }); return retrieveDocuments; }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/chains/analyze_documents_chain.ts
import { ChainValues } from "@langchain/core/utils/types"; import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager"; import { BaseChain, ChainInputs } from "./base.js"; import { TextSplitter, RecursiveCharacterTextSplitter, } from "../text_splitter.js"; import { SerializedAnalyzeDocumentChain } from "./serde.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any export type LoadValues = Record<string, any>; /** * Interface for the input parameters required by the AnalyzeDocumentChain * class. */ export interface AnalyzeDocumentChainInput extends Omit<ChainInputs, "memory"> { combineDocumentsChain: BaseChain; textSplitter?: TextSplitter; inputKey?: string; } /** * Chain that combines documents by stuffing into context. * @augments BaseChain * @augments StuffDocumentsChainInput * @example * ```typescript * const model = new ChatOpenAI({ temperature: 0 }); * const combineDocsChain = loadSummarizationChain(model); * const chain = new AnalyzeDocumentChain({ * combineDocumentsChain: combineDocsChain, * }); * * // Read the text from a file (this is a placeholder for actual file reading) * const text = readTextFromFile("state_of_the_union.txt"); * * // Invoke the chain to analyze the document * const res = await chain.call({ * input_document: text, * }); * * console.log({ res }); * ``` */ export class AnalyzeDocumentChain extends BaseChain implements AnalyzeDocumentChainInput { static lc_name() { return "AnalyzeDocumentChain"; } inputKey = "input_document"; combineDocumentsChain: BaseChain; textSplitter: TextSplitter; constructor(fields: AnalyzeDocumentChainInput) { super(fields); this.combineDocumentsChain = fields.combineDocumentsChain; this.inputKey = fields.inputKey ?? this.inputKey; this.textSplitter = fields.textSplitter ?? new RecursiveCharacterTextSplitter(); } get inputKeys(): string[] { return [this.inputKey]; } get outputKeys(): string[] { return this.combineDocumentsChain.outputKeys; } /** @ignore */ async _call( values: ChainValues, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { if (!(this.inputKey in values)) { throw new Error(`Document key ${this.inputKey} not found.`); } const { [this.inputKey]: doc, ...rest } = values; const currentDoc = doc as string; const currentDocs = await this.textSplitter.createDocuments([currentDoc]); const newInputs = { input_documents: currentDocs, ...rest }; const result = await this.combineDocumentsChain.call( newInputs, runManager?.getChild("combine_documents") ); return result; } _chainType() { return "analyze_document_chain" as const; } static async deserialize( data: SerializedAnalyzeDocumentChain, values: LoadValues ) { if (!("text_splitter" in values)) { throw new Error( `Need to pass in a text_splitter to deserialize AnalyzeDocumentChain.` ); } const { text_splitter } = values; if (!data.combine_document_chain) { throw new Error( `Need to pass in a combine_document_chain to deserialize AnalyzeDocumentChain.` ); } return new AnalyzeDocumentChain({ combineDocumentsChain: await BaseChain.deserialize( data.combine_document_chain ), textSplitter: text_splitter, }); } serialize(): SerializedAnalyzeDocumentChain { return { _type: this._chainType(), combine_document_chain: this.combineDocumentsChain.serialize(), }; } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/chains/retrieval_qa.ts
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import type { BaseRetrieverInterface } from "@langchain/core/retrievers"; import { ChainValues } from "@langchain/core/utils/types"; import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager"; import { BaseChain, ChainInputs } from "./base.js"; import { SerializedVectorDBQAChain } from "./serde.js"; import { StuffQAChainParams, loadQAStuffChain, } from "./question_answering/load.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any export type LoadValues = Record<string, any>; /** * Interface for the input parameters of the RetrievalQAChain class. */ export interface RetrievalQAChainInput extends Omit<ChainInputs, "memory"> { retriever: BaseRetrieverInterface; combineDocumentsChain: BaseChain; inputKey?: string; returnSourceDocuments?: boolean; } /** * @deprecated This class will be removed in 1.0.0. See below for an example implementation using * `createRetrievalChain`: * Class representing a chain for performing question-answering tasks with * a retrieval component. * @example * ```typescript * import { createStuffDocumentsChain } from "langchain/chains/combine_documents"; * import { ChatPromptTemplate } from "@langchain/core/prompts"; * import { createRetrievalChain } from "langchain/chains/retrieval"; * import { MemoryVectorStore } from "langchain/vectorstores/memory"; * * const documents = [...your documents here]; * const embeddings = ...your embeddings model; * const llm = ...your LLM model; * * const vectorstore = await MemoryVectorStore.fromDocuments( * documents, * embeddings * ); * const prompt = ChatPromptTemplate.fromTemplate(`Answer the user's question: {input} based on the following context {context}`); * * const combineDocsChain = await createStuffDocumentsChain({ * llm, * prompt, * }); * const retriever = vectorstore.asRetriever(); * * const retrievalChain = await createRetrievalChain({ * combineDocsChain, * retriever, * }); * ``` */ export class RetrievalQAChain extends BaseChain implements RetrievalQAChainInput { static lc_name() { return "RetrievalQAChain"; } inputKey = "query"; get inputKeys() { return [this.inputKey]; } get outputKeys() { return this.combineDocumentsChain.outputKeys.concat( this.returnSourceDocuments ? ["sourceDocuments"] : [] ); } retriever: BaseRetrieverInterface; combineDocumentsChain: BaseChain; returnSourceDocuments = false; constructor(fields: RetrievalQAChainInput) { super(fields); this.retriever = fields.retriever; this.combineDocumentsChain = fields.combineDocumentsChain; this.inputKey = fields.inputKey ?? this.inputKey; this.returnSourceDocuments = fields.returnSourceDocuments ?? this.returnSourceDocuments; } /** @ignore */ async _call( values: ChainValues, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { if (!(this.inputKey in values)) { throw new Error(`Question key "${this.inputKey}" not found.`); } const question: string = values[this.inputKey]; const docs = await this.retriever.getRelevantDocuments( question, runManager?.getChild("retriever") ); const inputs = { question, input_documents: docs, ...values }; const result = await this.combineDocumentsChain.call( inputs, runManager?.getChild("combine_documents") ); if (this.returnSourceDocuments) { return { ...result, sourceDocuments: docs, }; } return result; } _chainType() { return "retrieval_qa" as const; } static async deserialize( _data: SerializedVectorDBQAChain, _values: LoadValues ): Promise<RetrievalQAChain> { throw new Error("Not implemented"); } serialize(): SerializedVectorDBQAChain { throw new Error("Not implemented"); } /** * Creates a new instance of RetrievalQAChain using a BaseLanguageModel * and a BaseRetriever. * @param llm The BaseLanguageModel used to generate a new question. * @param retriever The BaseRetriever used to retrieve relevant documents. * @param options Optional parameters for the RetrievalQAChain. * @returns A new instance of RetrievalQAChain. */ static fromLLM( llm: BaseLanguageModelInterface, retriever: BaseRetrieverInterface, options?: Partial< Omit< RetrievalQAChainInput, "retriever" | "combineDocumentsChain" | "index" > > & StuffQAChainParams ): RetrievalQAChain { const qaChain = loadQAStuffChain(llm, { prompt: options?.prompt, }); return new this({ ...options, retriever, combineDocumentsChain: qaChain, }); } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/chains/index.ts
export { BaseChain, type ChainInputs } from "./base.js"; export { LLMChain, type LLMChainInput } from "./llm_chain.js"; export { APIChain, type APIChainInput, type APIChainOptions, } from "./api/api_chain.js"; export { ConversationChain } from "./conversation.js"; export { SequentialChain, type SequentialChainInput, SimpleSequentialChain, type SimpleSequentialChainInput, } from "./sequential_chain.js"; export { StuffDocumentsChain, type StuffDocumentsChainInput, MapReduceDocumentsChain, type MapReduceDocumentsChainInput, RefineDocumentsChain, type RefineDocumentsChainInput, } from "./combine_docs_chain.js"; export { ChatVectorDBQAChain, type ChatVectorDBQAChainInput, } from "./chat_vector_db_chain.js"; export { AnalyzeDocumentChain, type AnalyzeDocumentChainInput, } from "./analyze_documents_chain.js"; export { VectorDBQAChain, type VectorDBQAChainInput } from "./vector_db_qa.js"; export { loadQAChain, type QAChainParams, loadQAStuffChain, type StuffQAChainParams, loadQAMapReduceChain, type MapReduceQAChainParams, loadQARefineChain, type RefineQAChainParams, } from "./question_answering/load.js"; export { loadSummarizationChain, type SummarizationChainParams, } from "./summarization/load.js"; export { ConversationalRetrievalQAChain, type ConversationalRetrievalQAChainInput, } from "./conversational_retrieval_chain.js"; export { RetrievalQAChain, type RetrievalQAChainInput, } from "./retrieval_qa.js"; export { type ConstitutionalChainInput, ConstitutionalChain, } from "./constitutional_ai/constitutional_chain.js"; export { ConstitutionalPrinciple, PRINCIPLES, } from "./constitutional_ai/constitutional_principle.js"; export type { SerializedLLMChain, SerializedSequentialChain, SerializedSimpleSequentialChain, SerializedAnalyzeDocumentChain, SerializedAPIChain, SerializedBaseChain, SerializedChatVectorDBQAChain, SerializedMapReduceDocumentsChain, SerializedStuffDocumentsChain, SerializedVectorDBQAChain, SerializedRefineDocumentsChain, } from "./serde.js"; export { OpenAIModerationChain } from "./openai_moderation.js"; export { MultiRouteChain, type MultiRouteChainInput, RouterChain, } from "./router/multi_route.js"; export { LLMRouterChain, type LLMRouterChainInput, type RouterOutputSchema, } from "./router/llm_router.js"; export { MultiPromptChain } from "./router/multi_prompt.js"; export { MultiRetrievalQAChain } from "./router/multi_retrieval_qa.js"; export { TransformChain, type TransformChainFields } from "./transform.js"; export { createExtractionChain, createExtractionChainFromZod, } from "./openai_functions/extraction.js"; export { type TaggingChainOptions, createTaggingChain, createTaggingChainFromZod, } from "./openai_functions/tagging.js"; export { type OpenAPIChainOptions, createOpenAPIChain, } from "./openai_functions/openapi.js";
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/chains/chat_vector_db_chain.ts
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import type { VectorStoreInterface } from "@langchain/core/vectorstores"; import { ChainValues } from "@langchain/core/utils/types"; import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager"; import { PromptTemplate } from "@langchain/core/prompts"; import { SerializedChatVectorDBQAChain } from "./serde.js"; import { BaseChain, ChainInputs } from "./base.js"; import { LLMChain } from "./llm_chain.js"; import { loadQAStuffChain } from "./question_answering/load.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any export type LoadValues = Record<string, any>; const question_generator_template = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. Chat History: {chat_history} Follow Up Input: {question} Standalone question:`; const qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. {context} Question: {question} Helpful Answer:`; /** * Interface for the input parameters of the ChatVectorDBQAChain class. */ export interface ChatVectorDBQAChainInput extends ChainInputs { vectorstore: VectorStoreInterface; combineDocumentsChain: BaseChain; questionGeneratorChain: LLMChain; returnSourceDocuments?: boolean; outputKey?: string; inputKey?: string; k?: number; } /** @deprecated use `ConversationalRetrievalQAChain` instead. */ export class ChatVectorDBQAChain extends BaseChain implements ChatVectorDBQAChainInput { k = 4; inputKey = "question"; chatHistoryKey = "chat_history"; get inputKeys() { return [this.inputKey, this.chatHistoryKey]; } outputKey = "result"; get outputKeys() { return [this.outputKey]; } vectorstore: VectorStoreInterface; combineDocumentsChain: BaseChain; questionGeneratorChain: LLMChain; returnSourceDocuments = false; constructor(fields: ChatVectorDBQAChainInput) { super(fields); this.vectorstore = fields.vectorstore; this.combineDocumentsChain = fields.combineDocumentsChain; this.questionGeneratorChain = fields.questionGeneratorChain; this.inputKey = fields.inputKey ?? this.inputKey; this.outputKey = fields.outputKey ?? this.outputKey; this.k = fields.k ?? this.k; this.returnSourceDocuments = fields.returnSourceDocuments ?? this.returnSourceDocuments; } /** @ignore */ async _call( values: ChainValues, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { if (!(this.inputKey in values)) { throw new Error(`Question key ${this.inputKey} not found.`); } if (!(this.chatHistoryKey in values)) { throw new Error(`chat history key ${this.inputKey} not found.`); } const question: string = values[this.inputKey]; const chatHistory: string = values[this.chatHistoryKey]; let newQuestion = question; if (chatHistory.length > 0) { const result = await this.questionGeneratorChain.call( { question, chat_history: chatHistory, }, runManager?.getChild("question_generator") ); const keys = Object.keys(result); console.log("_call", values, keys); if (keys.length === 1) { newQuestion = result[keys[0]]; } else { throw new Error( "Return from llm chain has multiple values, only single values supported." ); } } const docs = await this.vectorstore.similaritySearch( newQuestion, this.k, undefined, runManager?.getChild("vectorstore") ); const inputs = { question: newQuestion, input_documents: docs, chat_history: chatHistory, }; const result = await this.combineDocumentsChain.call( inputs, runManager?.getChild("combine_documents") ); if (this.returnSourceDocuments) { return { ...result, sourceDocuments: docs, }; } return result; } _chainType() { return "chat-vector-db" as const; } static async deserialize( data: SerializedChatVectorDBQAChain, values: LoadValues ) { if (!("vectorstore" in values)) { throw new Error( `Need to pass in a vectorstore to deserialize VectorDBQAChain` ); } const { vectorstore } = values; return new ChatVectorDBQAChain({ combineDocumentsChain: await BaseChain.deserialize( data.combine_documents_chain ), questionGeneratorChain: await LLMChain.deserialize( data.question_generator ), k: data.k, vectorstore, }); } serialize(): SerializedChatVectorDBQAChain { return { _type: this._chainType(), combine_documents_chain: this.combineDocumentsChain.serialize(), question_generator: this.questionGeneratorChain.serialize(), k: this.k, }; } /** * Creates an instance of ChatVectorDBQAChain using a BaseLanguageModel * and other options. * @param llm Instance of BaseLanguageModel used to generate a new question. * @param vectorstore Instance of VectorStore used for vector operations. * @param options (Optional) Additional options for creating the ChatVectorDBQAChain instance. * @returns New instance of ChatVectorDBQAChain. */ static fromLLM( llm: BaseLanguageModelInterface, vectorstore: VectorStoreInterface, options: { inputKey?: string; outputKey?: string; k?: number; returnSourceDocuments?: boolean; questionGeneratorTemplate?: string; qaTemplate?: string; verbose?: boolean; } = {} ): ChatVectorDBQAChain { const { questionGeneratorTemplate, qaTemplate, verbose, ...rest } = options; const question_generator_prompt = PromptTemplate.fromTemplate( questionGeneratorTemplate || question_generator_template ); const qa_prompt = PromptTemplate.fromTemplate(qaTemplate || qa_template); const qaChain = loadQAStuffChain(llm, { prompt: qa_prompt, verbose }); const questionGeneratorChain = new LLMChain({ prompt: question_generator_prompt, llm, verbose, }); const instance = new this({ vectorstore, combineDocumentsChain: qaChain, questionGeneratorChain, ...rest, }); return instance; } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/chains/base.ts
import { BaseMemory } from "@langchain/core/memory"; import { ChainValues } from "@langchain/core/utils/types"; import { RUN_KEY } from "@langchain/core/outputs"; import { CallbackManagerForChainRun, CallbackManager, Callbacks, parseCallbackConfigArg, } from "@langchain/core/callbacks/manager"; import { ensureConfig, type RunnableConfig } from "@langchain/core/runnables"; import { BaseLangChain, BaseLangChainParams, } from "@langchain/core/language_models/base"; import { SerializedBaseChain } from "./serde.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any export type LoadValues = Record<string, any>; export interface ChainInputs extends BaseLangChainParams { memory?: BaseMemory; /** * @deprecated Use `callbacks` instead */ callbackManager?: CallbackManager; } /** * Base interface that all chains must implement. */ export abstract class BaseChain< RunInput extends ChainValues = ChainValues, RunOutput extends ChainValues = ChainValues > extends BaseLangChain<RunInput, RunOutput> implements ChainInputs { declare memory?: BaseMemory; get lc_namespace(): string[] { return ["langchain", "chains", this._chainType()]; } constructor( fields?: BaseMemory | ChainInputs, /** @deprecated */ verbose?: boolean, /** @deprecated */ callbacks?: Callbacks ) { if ( arguments.length === 1 && typeof fields === "object" && !("saveContext" in fields) ) { // fields is not a BaseMemory const { memory, callbackManager, ...rest } = fields; super({ ...rest, callbacks: callbackManager ?? rest.callbacks }); this.memory = memory; } else { // fields is a BaseMemory super({ verbose, callbacks }); this.memory = fields as BaseMemory; } } /** @ignore */ _selectMemoryInputs(values: ChainValues): ChainValues { const valuesForMemory = { ...values }; if ("signal" in valuesForMemory) { delete valuesForMemory.signal; } if ("timeout" in valuesForMemory) { delete valuesForMemory.timeout; } return valuesForMemory; } /** * Invoke the chain with the provided input and returns the output. * @param input Input values for the chain run. * @param config Optional configuration for the Runnable. * @returns Promise that resolves with the output of the chain run. */ async invoke(input: RunInput, options?: RunnableConfig): Promise<RunOutput> { const config = ensureConfig(options); const fullValues = await this._formatValues(input); const callbackManager_ = await CallbackManager.configure( config?.callbacks, this.callbacks, config?.tags, this.tags, config?.metadata, this.metadata, { verbose: this.verbose } ); const runManager = await callbackManager_?.handleChainStart( this.toJSON(), fullValues, undefined, undefined, undefined, undefined, config?.runName ); let outputValues: RunOutput; try { outputValues = await (fullValues.signal ? (Promise.race([ this._call(fullValues as RunInput, runManager, config), new Promise((_, reject) => { fullValues.signal?.addEventListener("abort", () => { reject(new Error("AbortError")); }); }), ]) as Promise<RunOutput>) : this._call(fullValues as RunInput, runManager, config)); } catch (e) { await runManager?.handleChainError(e); throw e; } if (!(this.memory == null)) { await this.memory.saveContext( this._selectMemoryInputs(input), outputValues ); } await runManager?.handleChainEnd(outputValues); // add the runManager's currentRunId to the outputValues Object.defineProperty(outputValues, RUN_KEY, { value: runManager ? { runId: runManager?.runId } : undefined, configurable: true, }); return outputValues; } private _validateOutputs(outputs: Record<string, unknown>): void { const missingKeys = this.outputKeys.filter((k) => !(k in outputs)); if (missingKeys.length) { throw new Error( `Missing output keys: ${missingKeys.join( ", " )} from chain ${this._chainType()}` ); } } async prepOutputs( inputs: Record<string, unknown>, outputs: Record<string, unknown>, returnOnlyOutputs = false ) { this._validateOutputs(outputs); if (this.memory) { await this.memory.saveContext(inputs, outputs); } if (returnOnlyOutputs) { return outputs; } return { ...inputs, ...outputs }; } /** * Run the core logic of this chain and return the output */ abstract _call( values: RunInput, runManager?: CallbackManagerForChainRun, config?: RunnableConfig ): Promise<RunOutput>; /** * Return the string type key uniquely identifying this class of chain. */ abstract _chainType(): string; /** * Return a json-like object representing this chain. */ serialize(): SerializedBaseChain { throw new Error("Method not implemented."); } abstract get inputKeys(): string[]; abstract get outputKeys(): string[]; /** @deprecated Use .invoke() instead. Will be removed in 0.2.0. */ async run( // eslint-disable-next-line @typescript-eslint/no-explicit-any input: any, config?: Callbacks | RunnableConfig ): Promise<string> { const inputKeys = this.inputKeys.filter( (k) => !this.memory?.memoryKeys.includes(k) ?? true ); const isKeylessInput = inputKeys.length <= 1; if (!isKeylessInput) { throw new Error( `Chain ${this._chainType()} expects multiple inputs, cannot use 'run' ` ); } // eslint-disable-next-line @typescript-eslint/no-explicit-any const values = inputKeys.length ? { [inputKeys[0]]: input } : ({} as any); const returnValues = await this.call(values, config); const keys = Object.keys(returnValues); if (keys.length === 1) { return returnValues[keys[0]]; } throw new Error( "return values have multiple keys, `run` only supported when one key currently" ); } protected async _formatValues( values: ChainValues & { signal?: AbortSignal; timeout?: number } ) { const fullValues = { ...values } as typeof values; if (fullValues.timeout && !fullValues.signal) { fullValues.signal = AbortSignal.timeout(fullValues.timeout); delete fullValues.timeout; } if (!(this.memory == null)) { const newValues = await this.memory.loadMemoryVariables( this._selectMemoryInputs(values) ); for (const [key, value] of Object.entries(newValues)) { fullValues[key] = value; } } return fullValues; } /** * @deprecated Use .invoke() instead. Will be removed in 0.2.0. * * Run the core logic of this chain and add to output if desired. * * Wraps _call and handles memory. */ async call( values: ChainValues & { signal?: AbortSignal; timeout?: number }, config?: Callbacks | RunnableConfig, /** @deprecated */ tags?: string[] ): Promise<RunOutput> { const parsedConfig = { tags, ...parseCallbackConfigArg(config) }; return this.invoke(values as RunInput, parsedConfig); } /** * @deprecated Use .batch() instead. Will be removed in 0.2.0. * * Call the chain on all inputs in the list */ async apply( inputs: RunInput[], config?: (Callbacks | RunnableConfig)[] ): Promise<RunOutput[]> { return Promise.all( inputs.map(async (i, idx) => this.call(i, config?.[idx])) ); } /** * Load a chain from a json-like object describing it. */ static async deserialize( data: SerializedBaseChain, values: LoadValues = {} ): Promise<BaseChain> { switch (data._type) { case "llm_chain": { const { LLMChain } = await import("./llm_chain.js"); return LLMChain.deserialize(data); } case "sequential_chain": { const { SequentialChain } = await import("./sequential_chain.js"); return SequentialChain.deserialize(data); } case "simple_sequential_chain": { const { SimpleSequentialChain } = await import("./sequential_chain.js"); return SimpleSequentialChain.deserialize(data); } case "stuff_documents_chain": { const { StuffDocumentsChain } = await import("./combine_docs_chain.js"); return StuffDocumentsChain.deserialize(data); } case "map_reduce_documents_chain": { const { MapReduceDocumentsChain } = await import( "./combine_docs_chain.js" ); return MapReduceDocumentsChain.deserialize(data); } case "refine_documents_chain": { const { RefineDocumentsChain } = await import( "./combine_docs_chain.js" ); return RefineDocumentsChain.deserialize(data); } case "vector_db_qa": { const { VectorDBQAChain } = await import("./vector_db_qa.js"); return VectorDBQAChain.deserialize(data, values); } case "api_chain": { const { APIChain } = await import("./api/api_chain.js"); return APIChain.deserialize(data); } default: throw new Error( `Invalid prompt type in config: ${ (data as SerializedBaseChain)._type }` ); } } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/chains/sequential_chain.ts
import { ChainValues } from "@langchain/core/utils/types"; import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager"; import { BaseChain, ChainInputs } from "./base.js"; import { SerializedBaseChain, SerializedSequentialChain, SerializedSimpleSequentialChain, } from "./serde.js"; import { intersection, union, difference } from "../util/set.js"; function formatSet(input: Set<string>) { return Array.from(input) .map((i) => `"${i}"`) .join(", "); } /** * Interface for the input parameters of the SequentialChain class. * * @deprecated * Switch to expression language: https://js.langchain.com/docs/expression_language/ * Will be removed in 0.2.0 */ export interface SequentialChainInput extends ChainInputs { /** Array of chains to run as a sequence. The chains are run in order they appear in the array. */ chains: BaseChain[]; /** Defines which variables should be passed as initial input to the first chain. */ inputVariables: string[]; /** Which variables should be returned as a result of executing the chain. If not specified, output of the last of the chains is used. */ outputVariables?: string[]; /** Whether or not to return all intermediate outputs and variables (excluding initial input variables). */ returnAll?: boolean; } /** * Chain where the outputs of one chain feed directly into next. * @example * ```typescript * const promptTemplate = new PromptTemplate({ * template: `You are a playwright. Given the title of play and the era it is set in, it is your job to write a synopsis for that title. * Title: {title} * Era: {era} * Playwright: This is a synopsis for the above play:`, * inputVariables: ["title", "era"], * }); * const reviewPromptTemplate = new PromptTemplate({ * template: `You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play. * * Play Synopsis: * {synopsis} * Review from a New York Times play critic of the above play:`, * inputVariables: ["synopsis"], * }); * const overallChain = new SequentialChain({ * chains: [ * new LLMChain({ * llm: new ChatOpenAI({ temperature: 0 }), * prompt: promptTemplate, * outputKey: "synopsis", * }), * new LLMChain({ * llm: new OpenAI({ temperature: 0 }), * prompt: reviewPromptTemplate, * outputKey: "review", * }), * ], * inputVariables: ["era", "title"], * outputVariables: ["synopsis", "review"], * verbose: true, * }); * const chainExecutionResult = await overallChain.call({ * title: "Tragedy at sunset on the beach", * era: "Victorian England", * }); * console.log(chainExecutionResult); * ``` * * @deprecated * Switch to {@link https://js.langchain.com/docs/expression_language/ | expression language}. * Will be removed in 0.2.0 */ export class SequentialChain extends BaseChain implements SequentialChainInput { static lc_name() { return "SequentialChain"; } chains: BaseChain[]; inputVariables: string[]; outputVariables: string[]; returnAll?: boolean | undefined; get inputKeys() { return this.inputVariables; } get outputKeys(): string[] { return this.outputVariables; } constructor(fields: SequentialChainInput) { super(fields); this.chains = fields.chains; this.inputVariables = fields.inputVariables; this.outputVariables = fields.outputVariables ?? []; if (this.outputVariables.length > 0 && fields.returnAll) { throw new Error( "Either specify variables to return using `outputVariables` or use `returnAll` param. Cannot apply both conditions at the same time." ); } this.returnAll = fields.returnAll ?? false; this._validateChains(); } /** @ignore */ _validateChains() { if (this.chains.length === 0) { throw new Error("Sequential chain must have at least one chain."); } const memoryKeys = this.memory?.memoryKeys ?? []; const inputKeysSet = new Set(this.inputKeys); const memoryKeysSet = new Set(memoryKeys); const keysIntersection = intersection(inputKeysSet, memoryKeysSet); if (keysIntersection.size > 0) { throw new Error( `The following keys: ${formatSet( keysIntersection )} are overlapping between memory and input keys of the chain variables. This can lead to unexpected behaviour. Please use input and memory keys that don't overlap.` ); } const availableKeys = union(inputKeysSet, memoryKeysSet); for (const chain of this.chains) { let missingKeys = difference(new Set(chain.inputKeys), availableKeys); if (chain.memory) { missingKeys = difference(missingKeys, new Set(chain.memory.memoryKeys)); } if (missingKeys.size > 0) { throw new Error( `Missing variables for chain "${chain._chainType()}": ${formatSet( missingKeys )}. Only got the following variables: ${formatSet(availableKeys)}.` ); } const outputKeysSet = new Set(chain.outputKeys); const overlappingOutputKeys = intersection(availableKeys, outputKeysSet); if (overlappingOutputKeys.size > 0) { throw new Error( `The following output variables for chain "${chain._chainType()}" are overlapping: ${formatSet( overlappingOutputKeys )}. This can lead to unexpected behaviour.` ); } for (const outputKey of outputKeysSet) { availableKeys.add(outputKey); } } if (this.outputVariables.length === 0) { if (this.returnAll) { const outputKeys = difference(availableKeys, inputKeysSet); this.outputVariables = Array.from(outputKeys); } else { this.outputVariables = this.chains[this.chains.length - 1].outputKeys; } } else { const missingKeys = difference( new Set(this.outputVariables), new Set(availableKeys) ); if (missingKeys.size > 0) { throw new Error( `The following output variables were expected to be in the final chain output but were not found: ${formatSet( missingKeys )}.` ); } } } /** @ignore */ async _call( values: ChainValues, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { let input: ChainValues = {}; const allChainValues: ChainValues = values; let i = 0; for (const chain of this.chains) { i += 1; input = await chain.call( allChainValues, runManager?.getChild(`step_${i}`) ); for (const key of Object.keys(input)) { allChainValues[key] = input[key]; } } const output: ChainValues = {}; for (const key of this.outputVariables) { output[key] = allChainValues[key]; } return output; } _chainType() { return "sequential_chain" as const; } static async deserialize(data: SerializedSequentialChain) { const chains: BaseChain[] = []; const inputVariables: string[] = data.input_variables; const outputVariables: string[] = data.output_variables; const serializedChains = data.chains; for (const serializedChain of serializedChains) { const deserializedChain = await BaseChain.deserialize(serializedChain); chains.push(deserializedChain); } return new SequentialChain({ chains, inputVariables, outputVariables }); } serialize(): SerializedSequentialChain { const chains: SerializedBaseChain[] = []; for (const chain of this.chains) { chains.push(chain.serialize()); } return { _type: this._chainType(), input_variables: this.inputVariables, output_variables: this.outputVariables, chains, }; } } /** * @deprecated Switch to expression language: https://js.langchain.com/docs/expression_language/ * Interface for the input parameters of the SimpleSequentialChain class. */ export interface SimpleSequentialChainInput extends ChainInputs { /** Array of chains to run as a sequence. The chains are run in order they appear in the array. */ chains: Array<BaseChain>; /** Whether or not to trim the intermediate outputs. */ trimOutputs?: boolean; } /** * @deprecated Switch to expression language: https://js.langchain.com/docs/expression_language/ * Simple chain where a single string output of one chain is fed directly into the next. * @augments BaseChain * @augments SimpleSequentialChainInput * * @example * ```ts * import { SimpleSequentialChain, LLMChain } from "langchain/chains"; * import { OpenAI } from "langchain/llms/openai"; * import { PromptTemplate } from "langchain/prompts"; * * // This is an LLMChain to write a synopsis given a title of a play. * const llm = new OpenAI({ temperature: 0 }); * const template = `You are a playwright. Given the title of play, it is your job to write a synopsis for that title. * * Title: {title} * Playwright: This is a synopsis for the above play:` * const promptTemplate = new PromptTemplate({ template, inputVariables: ["title"] }); * const synopsisChain = new LLMChain({ llm, prompt: promptTemplate }); * * * // This is an LLMChain to write a review of a play given a synopsis. * const reviewLLM = new OpenAI({ temperature: 0 }) * const reviewTemplate = `You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play. * * Play Synopsis: * {synopsis} * Review from a New York Times play critic of the above play:` * const reviewPromptTemplate = new PromptTemplate({ template: reviewTemplate, inputVariables: ["synopsis"] }); * const reviewChain = new LLMChain({ llm: reviewLLM, prompt: reviewPromptTemplate }); * * const overallChain = new SimpleSequentialChain({chains: [synopsisChain, reviewChain], verbose:true}) * const review = await overallChain.run("Tragedy at sunset on the beach") * // the variable review contains resulting play review. * ``` */ export class SimpleSequentialChain extends BaseChain implements SimpleSequentialChainInput { static lc_name() { return "SimpleSequentialChain"; } chains: Array<BaseChain>; inputKey = "input"; outputKey = "output"; trimOutputs: boolean; get inputKeys() { return [this.inputKey]; } get outputKeys(): string[] { return [this.outputKey]; } constructor(fields: SimpleSequentialChainInput) { super(fields); this.chains = fields.chains; this.trimOutputs = fields.trimOutputs ?? false; this._validateChains(); } /** @ignore */ _validateChains() { for (const chain of this.chains) { if ( chain.inputKeys.filter( (k) => !chain.memory?.memoryKeys.includes(k) ?? true ).length !== 1 ) { throw new Error( `Chains used in SimpleSequentialChain should all have one input, got ${ chain.inputKeys.length } for ${chain._chainType()}.` ); } if (chain.outputKeys.length !== 1) { throw new Error( `Chains used in SimpleSequentialChain should all have one output, got ${ chain.outputKeys.length } for ${chain._chainType()}.` ); } } } /** @ignore */ async _call( values: ChainValues, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { let input: string = values[this.inputKey]; let i = 0; for (const chain of this.chains) { i += 1; input = ( await chain.call( { [chain.inputKeys[0]]: input, signal: values.signal }, runManager?.getChild(`step_${i}`) ) )[chain.outputKeys[0]]; if (this.trimOutputs) { input = input.trim(); } await runManager?.handleText(input); } return { [this.outputKey]: input }; } _chainType() { return "simple_sequential_chain" as const; } static async deserialize(data: SerializedSimpleSequentialChain) { const chains: Array<BaseChain> = []; const serializedChains = data.chains; for (const serializedChain of serializedChains) { const deserializedChain = await BaseChain.deserialize(serializedChain); chains.push(deserializedChain); } return new SimpleSequentialChain({ chains }); } serialize(): SerializedSimpleSequentialChain { const chains: Array<SerializedBaseChain> = []; for (const chain of this.chains) { chains.push(chain.serialize()); } return { _type: this._chainType(), chains, }; } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/chains/conversational_retrieval_chain.ts
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import type { BaseRetrieverInterface } from "@langchain/core/retrievers"; import { PromptTemplate } from "@langchain/core/prompts"; import { BaseMessage, HumanMessage, AIMessage } from "@langchain/core/messages"; import { ChainValues } from "@langchain/core/utils/types"; import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager"; import { SerializedChatVectorDBQAChain } from "./serde.js"; import { BaseChain, ChainInputs } from "./base.js"; import { LLMChain } from "./llm_chain.js"; import { QAChainParams, loadQAChain } from "./question_answering/load.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any export type LoadValues = Record<string, any>; const question_generator_template = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. Chat History: {chat_history} Follow Up Input: {question} Standalone question:`; /** * Interface for the input parameters of the * ConversationalRetrievalQAChain class. */ export interface ConversationalRetrievalQAChainInput extends ChainInputs { retriever: BaseRetrieverInterface; combineDocumentsChain: BaseChain; questionGeneratorChain: LLMChain; returnSourceDocuments?: boolean; returnGeneratedQuestion?: boolean; inputKey?: string; } /** * @deprecated This class will be removed in 1.0.0. See below for an example implementation using * `createRetrievalChain`. * * Class for conducting conversational question-answering tasks with a * retrieval component. Extends the BaseChain class and implements the * ConversationalRetrievalQAChainInput interface. * @example * ```typescript * import { ChatAnthropic } from "@langchain/anthropic"; * import { * ChatPromptTemplate, * MessagesPlaceholder, * } from "@langchain/core/prompts"; * import { BaseMessage } from "@langchain/core/messages"; * import { createStuffDocumentsChain } from "langchain/chains/combine_documents"; * import { createHistoryAwareRetriever } from "langchain/chains/history_aware_retriever"; * import { createRetrievalChain } from "langchain/chains/retrieval"; * * const retriever = ...your retriever; * const llm = new ChatAnthropic(); * * // Contextualize question * const contextualizeQSystemPrompt = ` * Given a chat history and the latest user question * which might reference context in the chat history, * formulate a standalone question which can be understood * without the chat history. Do NOT answer the question, just * reformulate it if needed and otherwise return it as is.`; * const contextualizeQPrompt = ChatPromptTemplate.fromMessages([ * ["system", contextualizeQSystemPrompt], * new MessagesPlaceholder("chat_history"), * ["human", "{input}"], * ]); * const historyAwareRetriever = await createHistoryAwareRetriever({ * llm, * retriever, * rephrasePrompt: contextualizeQPrompt, * }); * * // Answer question * const qaSystemPrompt = ` * You are an assistant for question-answering tasks. Use * the following pieces of retrieved context to answer the * question. If you don't know the answer, just say that you * don't know. Use three sentences maximum and keep the answer * concise. * \n\n * {context}`; * const qaPrompt = ChatPromptTemplate.fromMessages([ * ["system", qaSystemPrompt], * new MessagesPlaceholder("chat_history"), * ["human", "{input}"], * ]); * * // Below we use createStuffDocuments_chain to feed all retrieved context * // into the LLM. Note that we can also use StuffDocumentsChain and other * // instances of BaseCombineDocumentsChain. * const questionAnswerChain = await createStuffDocumentsChain({ * llm, * prompt: qaPrompt, * }); * * const ragChain = await createRetrievalChain({ * retriever: historyAwareRetriever, * combineDocsChain: questionAnswerChain, * }); * * // Usage: * const chat_history: BaseMessage[] = []; * const response = await ragChain.invoke({ * chat_history, * input: "...", * }); * ``` */ export class ConversationalRetrievalQAChain extends BaseChain implements ConversationalRetrievalQAChainInput { static lc_name() { return "ConversationalRetrievalQAChain"; } inputKey = "question"; chatHistoryKey = "chat_history"; get inputKeys() { return [this.inputKey, this.chatHistoryKey]; } get outputKeys() { return this.combineDocumentsChain.outputKeys.concat( this.returnSourceDocuments ? ["sourceDocuments"] : [] ); } retriever: BaseRetrieverInterface; combineDocumentsChain: BaseChain; questionGeneratorChain: LLMChain; returnSourceDocuments = false; returnGeneratedQuestion = false; constructor(fields: ConversationalRetrievalQAChainInput) { super(fields); this.retriever = fields.retriever; this.combineDocumentsChain = fields.combineDocumentsChain; this.questionGeneratorChain = fields.questionGeneratorChain; this.inputKey = fields.inputKey ?? this.inputKey; this.returnSourceDocuments = fields.returnSourceDocuments ?? this.returnSourceDocuments; this.returnGeneratedQuestion = fields.returnGeneratedQuestion ?? this.returnGeneratedQuestion; } /** * Static method to convert the chat history input into a formatted * string. * @param chatHistory Chat history input which can be a string, an array of BaseMessage instances, or an array of string arrays. * @returns A formatted string representing the chat history. */ static getChatHistoryString( chatHistory: string | BaseMessage[] | string[][] ) { let historyMessages: BaseMessage[]; if (Array.isArray(chatHistory)) { // TODO: Deprecate on a breaking release if ( Array.isArray(chatHistory[0]) && typeof chatHistory[0][0] === "string" ) { console.warn( "Passing chat history as an array of strings is deprecated.\nPlease see https://js.langchain.com/docs/modules/chains/popular/chat_vector_db#externally-managed-memory for more information." ); historyMessages = chatHistory.flat().map((stringMessage, i) => { if (i % 2 === 0) { return new HumanMessage(stringMessage); } else { return new AIMessage(stringMessage); } }); } else { historyMessages = chatHistory as BaseMessage[]; } return historyMessages .map((chatMessage) => { if (chatMessage._getType() === "human") { return `Human: ${chatMessage.content}`; } else if (chatMessage._getType() === "ai") { return `Assistant: ${chatMessage.content}`; } else { return `${chatMessage.content}`; } }) .join("\n"); } return chatHistory; } /** @ignore */ async _call( values: ChainValues, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { if (!(this.inputKey in values)) { throw new Error(`Question key ${this.inputKey} not found.`); } if (!(this.chatHistoryKey in values)) { throw new Error(`Chat history key ${this.chatHistoryKey} not found.`); } const question: string = values[this.inputKey]; const chatHistory: string = ConversationalRetrievalQAChain.getChatHistoryString( values[this.chatHistoryKey] ); let newQuestion = question; if (chatHistory.length > 0) { const result = await this.questionGeneratorChain.call( { question, chat_history: chatHistory, }, runManager?.getChild("question_generator") ); const keys = Object.keys(result); if (keys.length === 1) { newQuestion = result[keys[0]]; } else { throw new Error( "Return from llm chain has multiple values, only single values supported." ); } } const docs = await this.retriever.getRelevantDocuments( newQuestion, runManager?.getChild("retriever") ); const inputs = { question: newQuestion, input_documents: docs, chat_history: chatHistory, }; let result = await this.combineDocumentsChain.call( inputs, runManager?.getChild("combine_documents") ); if (this.returnSourceDocuments) { result = { ...result, sourceDocuments: docs, }; } if (this.returnGeneratedQuestion) { result = { ...result, generatedQuestion: newQuestion, }; } return result; } _chainType(): string { return "conversational_retrieval_chain"; } static async deserialize( _data: SerializedChatVectorDBQAChain, _values: LoadValues ): Promise<ConversationalRetrievalQAChain> { throw new Error("Not implemented."); } serialize(): SerializedChatVectorDBQAChain { throw new Error("Not implemented."); } /** * Static method to create a new ConversationalRetrievalQAChain from a * BaseLanguageModel and a BaseRetriever. * @param llm {@link BaseLanguageModelInterface} instance used to generate a new question. * @param retriever {@link BaseRetrieverInterface} instance used to retrieve relevant documents. * @param options.returnSourceDocuments Whether to return source documents in the final output * @param options.questionGeneratorChainOptions Options to initialize the standalone question generation chain used as the first internal step * @param options.qaChainOptions {@link QAChainParams} used to initialize the QA chain used as the second internal step * @returns A new instance of ConversationalRetrievalQAChain. */ static fromLLM( llm: BaseLanguageModelInterface, retriever: BaseRetrieverInterface, options: { outputKey?: string; // not used returnSourceDocuments?: boolean; /** @deprecated Pass in questionGeneratorChainOptions.template instead */ questionGeneratorTemplate?: string; /** @deprecated Pass in qaChainOptions.prompt instead */ qaTemplate?: string; questionGeneratorChainOptions?: { llm?: BaseLanguageModelInterface; template?: string; }; qaChainOptions?: QAChainParams; } & Omit< ConversationalRetrievalQAChainInput, "retriever" | "combineDocumentsChain" | "questionGeneratorChain" > = {} ): ConversationalRetrievalQAChain { const { questionGeneratorTemplate, qaTemplate, qaChainOptions = { type: "stuff", prompt: qaTemplate ? PromptTemplate.fromTemplate(qaTemplate) : undefined, }, questionGeneratorChainOptions, verbose, ...rest } = options; const qaChain = loadQAChain(llm, qaChainOptions); const questionGeneratorChainPrompt = PromptTemplate.fromTemplate( questionGeneratorChainOptions?.template ?? questionGeneratorTemplate ?? question_generator_template ); const questionGeneratorChain = new LLMChain({ prompt: questionGeneratorChainPrompt, llm: questionGeneratorChainOptions?.llm ?? llm, verbose, }); const instance = new this({ retriever, combineDocumentsChain: qaChain, questionGeneratorChain, verbose, ...rest, }); return instance; } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/chains/transform.ts
import { CallbackManagerForChainRun, Callbacks, } from "@langchain/core/callbacks/manager"; import { ChainValues } from "@langchain/core/utils/types"; import { ChainInputs, BaseChain } from "./base.js"; /** * Interface that extends the `ChainInputs` interface and defines the * fields required for a transform chain. It includes the `transform` * function, `inputVariables`, and `outputVariables` properties. * * @deprecated * Switch to expression language: https://js.langchain.com/docs/expression_language/ * Will be removed in 0.2.0 */ export interface TransformChainFields< I extends ChainValues, O extends ChainValues > extends ChainInputs { transform: (values: I, callbacks?: Callbacks) => O | Promise<O>; inputVariables: (keyof I extends string ? keyof I : never)[]; outputVariables: (keyof O extends string ? keyof O : never)[]; } /** * Class that represents a transform chain. It extends the `BaseChain` * class and implements the `TransformChainFields` interface. It provides * a way to transform input values to output values using a specified * transform function. * * @deprecated * Switch to {@link https://js.langchain.com/docs/expression_language/ | expression language}. * Will be removed in 0.2.0 */ export class TransformChain< I extends ChainValues, O extends ChainValues > extends BaseChain { static lc_name() { return "TransformChain"; } transformFunc: (values: I, callbacks?: Callbacks) => O | Promise<O>; inputVariables: (keyof I extends string ? keyof I : never)[]; outputVariables: (keyof O extends string ? keyof O : never)[]; _chainType() { return "transform" as const; } get inputKeys() { return this.inputVariables; } get outputKeys() { return this.outputVariables; } constructor(fields: TransformChainFields<I, O>) { super(fields); this.transformFunc = fields.transform; this.inputVariables = fields.inputVariables; this.outputVariables = fields.outputVariables; } async _call(values: I, runManager?: CallbackManagerForChainRun): Promise<O> { return this.transformFunc(values, runManager?.getChild("transform")); } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/chains/load.ts
import { BaseChain } from "./base.js"; import { loadFromHub } from "../util/hub.js"; import { FileLoader, LoadValues, loadFromFile } from "../util/load.js"; import { parseFileConfig } from "../util/parse.js"; const loadChainFromFile: FileLoader<BaseChain> = async ( file: string, path: string, values: LoadValues = {} ) => { const serialized = parseFileConfig(file, path); return BaseChain.deserialize(serialized, values); }; /** * Load a chain from {@link https://github.com/hwchase17/langchain-hub | LangchainHub} or local filesystem. * * @example * Loading from LangchainHub: * ```ts * import { loadChain } from "langchain/chains/load"; * const chain = await loadChain("lc://chains/hello-world/chain.json"); * const res = await chain.call({ topic: "my favorite color" }); * ``` * * @example * Loading from local filesystem: * ```ts * import { loadChain } from "langchain/chains/load"; * const chain = await loadChain("/path/to/chain.json"); * ``` * * @deprecated Use newer {@link https://api.js.langchain.com/functions/langchain.load.load.html | load method}. */ export const loadChain = async ( uri: string, values: LoadValues = {} ): Promise<BaseChain> => { const hubResult = await loadFromHub( uri, loadChainFromFile, "chains", new Set(["json", "yaml"]), values ); if (hubResult) { return hubResult; } return loadFromFile(uri, loadChainFromFile, values); };
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/graph_qa/cypher.ts
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import { ChainValues } from "@langchain/core/utils/types"; import { BasePromptTemplate } from "@langchain/core/prompts"; import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager"; import { LLMChain } from "../llm_chain.js"; import { BaseChain, ChainInputs } from "../base.js"; import { CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT } from "./prompts.js"; import { logVersion020MigrationWarning } from "../../util/entrypoint_deprecation.js"; /* #__PURE__ */ logVersion020MigrationWarning({ oldEntrypointName: "chains/graph_qa/cypher", newPackageName: "@langchain/community", }); export const INTERMEDIATE_STEPS_KEY = "intermediateSteps"; export interface GraphCypherQAChainInput extends ChainInputs { // eslint-disable-next-line @typescript-eslint/no-explicit-any graph: any; cypherGenerationChain: LLMChain; qaChain: LLMChain; inputKey?: string; outputKey?: string; topK?: number; returnIntermediateSteps?: boolean; returnDirect?: boolean; } export interface FromLLMInput { // eslint-disable-next-line @typescript-eslint/no-explicit-any graph: any; llm?: BaseLanguageModelInterface; cypherLLM?: BaseLanguageModelInterface; qaLLM?: BaseLanguageModelInterface; qaPrompt?: BasePromptTemplate; cypherPrompt?: BasePromptTemplate; returnIntermediateSteps?: boolean; returnDirect?: boolean; } /** * Chain for question-answering against a graph by generating Cypher statements. * * @example * ```typescript * const chain = new GraphCypherQAChain({ * llm: new ChatOpenAI({ temperature: 0 }), * graph: new Neo4jGraph(), * }); * const res = await chain.invoke("Who played in Pulp Fiction?"); * ``` * * @security * This chain will execute Cypher statements against the provided database. * Make sure that the database connection uses credentials * that are narrowly-scoped to only include necessary permissions. * Failure to do so may result in data corruption or loss, since the calling code * may attempt commands that would result in deletion, mutation of data * if appropriately prompted or reading sensitive data if such data is present in the database. * The best way to guard against such negative outcomes is to (as appropriate) limit the * permissions granted to the credentials used with this tool. * * See https://js.langchain.com/docs/security for more information. */ export class GraphCypherQAChain extends BaseChain { // eslint-disable-next-line @typescript-eslint/no-explicit-any private graph: any; private cypherGenerationChain: LLMChain; private qaChain: LLMChain; private inputKey = "query"; private outputKey = "result"; private topK = 10; private returnDirect = false; private returnIntermediateSteps = false; constructor(props: GraphCypherQAChainInput) { super(props); const { graph, cypherGenerationChain, qaChain, inputKey, outputKey, topK, returnIntermediateSteps, returnDirect, } = props; this.graph = graph; this.cypherGenerationChain = cypherGenerationChain; this.qaChain = qaChain; if (inputKey) { this.inputKey = inputKey; } if (outputKey) { this.outputKey = outputKey; } if (topK) { this.topK = topK; } if (returnIntermediateSteps) { this.returnIntermediateSteps = returnIntermediateSteps; } if (returnDirect) { this.returnDirect = returnDirect; } } _chainType() { return "graph_cypher_chain" as const; } get inputKeys(): string[] { return [this.inputKey]; } get outputKeys(): string[] { return [this.outputKey]; } static fromLLM(props: FromLLMInput): GraphCypherQAChain { const { graph, qaPrompt = CYPHER_QA_PROMPT, cypherPrompt = CYPHER_GENERATION_PROMPT, llm, cypherLLM, qaLLM, returnIntermediateSteps = false, returnDirect = false, } = props; if (!cypherLLM && !llm) { throw new Error( "Either 'llm' or 'cypherLLM' parameters must be provided" ); } if (!qaLLM && !llm) { throw new Error("Either 'llm' or 'qaLLM' parameters must be provided"); } if (cypherLLM && qaLLM && llm) { throw new Error( "You can specify up to two of 'cypherLLM', 'qaLLM', and 'llm', but not all three simultaneously." ); } const qaChain = new LLMChain({ llm: (qaLLM || llm) as BaseLanguageModelInterface, prompt: qaPrompt, }); const cypherGenerationChain = new LLMChain({ llm: (cypherLLM || llm) as BaseLanguageModelInterface, prompt: cypherPrompt, }); return new GraphCypherQAChain({ cypherGenerationChain, qaChain, graph, returnIntermediateSteps, returnDirect, }); } private extractCypher(text: string): string { const pattern = /```(.*?)```/s; const matches = text.match(pattern); return matches ? matches[1] : text; } async _call( values: ChainValues, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { const callbacks = runManager?.getChild(); const question = values[this.inputKey]; const intermediateSteps = []; const generatedCypher = await this.cypherGenerationChain.call( { question, schema: this.graph.getSchema() }, callbacks ); const extractedCypher = this.extractCypher(generatedCypher.text); await runManager?.handleText(`Generated Cypher:\n`); await runManager?.handleText(`${extractedCypher} green\n`); intermediateSteps.push({ query: extractedCypher }); let chainResult: ChainValues; const context = await this.graph.query(extractedCypher, { topK: this.topK, }); if (this.returnDirect) { chainResult = { [this.outputKey]: context }; } else { await runManager?.handleText("Full Context:\n"); await runManager?.handleText(`${context} green\n`); intermediateSteps.push({ context }); const result = await this.qaChain.call( { question, context: JSON.stringify(context) }, callbacks ); chainResult = { [this.outputKey]: result[this.qaChain.outputKey], }; } if (this.returnIntermediateSteps) { chainResult[INTERMEDIATE_STEPS_KEY] = intermediateSteps; } return chainResult; } }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/graph_qa/prompts.ts
import { PromptTemplate } from "@langchain/core/prompts"; const CYPHER_GENERATION_TEMPLATE = `Task:Generate Cypher statement to query a graph database. Instructions: Use only the provided relationship types and properties in the schema. Do not use any other relationship types or properties that are not provided. Schema: {schema} Note: Do not include any explanations or apologies in your responses. Do not respond to any questions that might ask anything else than for you to construct a Cypher statement. Do not include any text except the generated Cypher statement. The question is: {question}`; export const CYPHER_GENERATION_PROMPT = /* #__PURE__ */ new PromptTemplate({ template: CYPHER_GENERATION_TEMPLATE, inputVariables: ["schema", "question"], }); const CYPHER_QA_TEMPLATE = `You are an assistant that helps to form nice and human understandable answers. The information part contains the provided information that you must use to construct an answer. The provided information is authoritative, you must never doubt it or try to use your internal knowledge to correct it. Make the answer sound as a response to the question. Do not mention that you based the result on the given information. Here is an example: Question: Which managers own Neo4j stocks? Context:[manager:CTL LLC, manager:JANE STREET GROUP LLC] Helpful Answer: CTL LLC, JANE STREET GROUP LLC owns Neo4j stocks. Follow this example when generating answers. If the provided information is empty, say that you don't know the answer. Information: {context} Question: {question} Helpful Answer:`; export const CYPHER_QA_PROMPT = /* #__PURE__ */ new PromptTemplate({ template: CYPHER_QA_TEMPLATE, inputVariables: ["context", "question"], });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/summarization/stuff_prompts.ts
/* eslint-disable spaced-comment */ import { PromptTemplate } from "@langchain/core/prompts"; const template = `Write a concise summary of the following: "{text}" CONCISE SUMMARY:`; export const DEFAULT_PROMPT = /*#__PURE__*/ new PromptTemplate({ template, inputVariables: ["text"], });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/summarization/refine_prompts.ts
import { PromptTemplate } from "@langchain/core/prompts"; const refinePromptTemplate = `Your job is to produce a final summary We have provided an existing summary up to a certain point: "{existing_answer}" We have the opportunity to refine the existing summary (only if needed) with some more context below. ------------ "{text}" ------------ Given the new context, refine the original summary If the context isn't useful, return the original summary. REFINED SUMMARY:`; export const REFINE_PROMPT = /* #__PURE__ */ new PromptTemplate({ template: refinePromptTemplate, inputVariables: ["existing_answer", "text"], });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/summarization/load.ts
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import { BasePromptTemplate } from "@langchain/core/prompts"; import { LLMChain } from "../llm_chain.js"; import { StuffDocumentsChain, MapReduceDocumentsChain, RefineDocumentsChain, MapReduceDocumentsChainInput, } from "../combine_docs_chain.js"; import { DEFAULT_PROMPT } from "./stuff_prompts.js"; import { REFINE_PROMPT } from "./refine_prompts.js"; /** * Type for the base parameters that can be used to configure a * summarization chain. */ type BaseParams = { verbose?: boolean; }; /** @interface */ export type SummarizationChainParams = BaseParams & ( | { type?: "stuff"; prompt?: BasePromptTemplate; } | ({ type?: "map_reduce"; combineMapPrompt?: BasePromptTemplate; combinePrompt?: BasePromptTemplate; combineLLM?: BaseLanguageModelInterface; } & Pick<MapReduceDocumentsChainInput, "returnIntermediateSteps">) | { type?: "refine"; refinePrompt?: BasePromptTemplate; refineLLM?: BaseLanguageModelInterface; questionPrompt?: BasePromptTemplate; } ); export const loadSummarizationChain = ( llm: BaseLanguageModelInterface, params: SummarizationChainParams = { type: "map_reduce" } ) => { const { verbose } = params; if (params.type === "stuff") { const { prompt = DEFAULT_PROMPT } = params; const llmChain = new LLMChain({ prompt, llm, verbose }); const chain = new StuffDocumentsChain({ llmChain, documentVariableName: "text", verbose, }); return chain; } if (params.type === "map_reduce") { const { combineMapPrompt = DEFAULT_PROMPT, combinePrompt = DEFAULT_PROMPT, combineLLM, returnIntermediateSteps, } = params; const llmChain = new LLMChain({ prompt: combineMapPrompt, llm, verbose }); const combineLLMChain = new LLMChain({ prompt: combinePrompt, llm: combineLLM ?? llm, verbose, }); const combineDocumentChain = new StuffDocumentsChain({ llmChain: combineLLMChain, documentVariableName: "text", verbose, }); const chain = new MapReduceDocumentsChain({ llmChain, combineDocumentChain, documentVariableName: "text", returnIntermediateSteps, verbose, }); return chain; } if (params.type === "refine") { const { refinePrompt = REFINE_PROMPT, refineLLM, questionPrompt = DEFAULT_PROMPT, } = params; const llmChain = new LLMChain({ prompt: questionPrompt, llm, verbose }); const refineLLMChain = new LLMChain({ prompt: refinePrompt, llm: refineLLM ?? llm, verbose, }); const chain = new RefineDocumentsChain({ llmChain, refineLLMChain, documentVariableName: "text", verbose, }); return chain; } throw new Error(`Invalid _type: ${params.type}`); };
0
lc_public_repos/langchainjs/langchain/src/chains/summarization
lc_public_repos/langchainjs/langchain/src/chains/summarization/tests/load.int.test.ts
import { test } from "@jest/globals"; import { OpenAI } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; import { loadSummarizationChain } from "../load.js"; test("Test loadSummzationChain stuff", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const chain = loadSummarizationChain(model, { type: "stuff" }); const docs = [ new Document({ pageContent: "foo" }), new Document({ pageContent: "bar" }), new Document({ pageContent: "baz" }), ]; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ input_documents: docs, question: "Whats up" }); // console.log({ res }); }); test("Test loadSummarizationChain map_reduce", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const chain = loadSummarizationChain(model, { type: "map_reduce" }); const docs = [ new Document({ pageContent: "foo" }), new Document({ pageContent: "bar" }), new Document({ pageContent: "baz" }), ]; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ input_documents: docs, question: "Whats up" }); // console.log({ res }); }); test("Test loadSummarizationChain refine", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const chain = loadSummarizationChain(model, { type: "refine" }); const docs = [ new Document({ pageContent: "foo" }), new Document({ pageContent: "bar" }), new Document({ pageContent: "baz" }), ]; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ input_documents: docs, question: "Whats up" }); // console.log({ res }); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/api/api_chain.ts
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import { ChainValues } from "@langchain/core/utils/types"; import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager"; import { BasePromptTemplate } from "@langchain/core/prompts"; import { BaseChain, ChainInputs } from "../base.js"; import { SerializedAPIChain } from "../serde.js"; import { LLMChain } from "../llm_chain.js"; import { API_URL_PROMPT_TEMPLATE, API_RESPONSE_PROMPT_TEMPLATE, } from "./prompts.js"; /** * Interface that extends ChainInputs and defines additional input * parameters specific to an APIChain. */ export interface APIChainInput extends Omit<ChainInputs, "memory"> { apiAnswerChain: LLMChain; apiRequestChain: LLMChain; apiDocs: string; inputKey?: string; headers?: Record<string, string>; /** Key to use for output, defaults to `output` */ outputKey?: string; } /** * Type that defines optional configuration options for an APIChain. */ export type APIChainOptions = { headers?: Record<string, string>; apiUrlPrompt?: BasePromptTemplate; apiResponsePrompt?: BasePromptTemplate; }; /** * Class that extends BaseChain and represents a chain specifically * designed for making API requests and processing API responses. */ export class APIChain extends BaseChain implements APIChainInput { apiAnswerChain: LLMChain; apiRequestChain: LLMChain; apiDocs: string; headers = {}; inputKey = "question"; outputKey = "output"; get inputKeys() { return [this.inputKey]; } get outputKeys() { return [this.outputKey]; } constructor(fields: APIChainInput) { super(fields); this.apiRequestChain = fields.apiRequestChain; this.apiAnswerChain = fields.apiAnswerChain; this.apiDocs = fields.apiDocs; this.inputKey = fields.inputKey ?? this.inputKey; this.outputKey = fields.outputKey ?? this.outputKey; this.headers = fields.headers ?? this.headers; } /** @ignore */ async _call( values: ChainValues, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { const question: string = values[this.inputKey]; const api_url = await this.apiRequestChain.predict( { question, api_docs: this.apiDocs }, runManager?.getChild("request") ); const res = await fetch(api_url, { headers: this.headers }); const api_response = await res.text(); const answer = await this.apiAnswerChain.predict( { question, api_docs: this.apiDocs, api_url, api_response }, runManager?.getChild("response") ); return { [this.outputKey]: answer }; } _chainType() { return "api_chain" as const; } static async deserialize(data: SerializedAPIChain) { const { api_request_chain, api_answer_chain, api_docs } = data; if (!api_request_chain) { throw new Error("LLMChain must have api_request_chain"); } if (!api_answer_chain) { throw new Error("LLMChain must have api_answer_chain"); } if (!api_docs) { throw new Error("LLMChain must have api_docs"); } return new APIChain({ apiAnswerChain: await LLMChain.deserialize(api_answer_chain), apiRequestChain: await LLMChain.deserialize(api_request_chain), apiDocs: api_docs, }); } serialize(): SerializedAPIChain { return { _type: this._chainType(), api_answer_chain: this.apiAnswerChain.serialize(), api_request_chain: this.apiRequestChain.serialize(), api_docs: this.apiDocs, }; } /** * Static method to create a new APIChain from a BaseLanguageModel and API * documentation. * @param llm BaseLanguageModel instance. * @param apiDocs API documentation. * @param options Optional configuration options for the APIChain. * @returns New APIChain instance. */ static fromLLMAndAPIDocs( llm: BaseLanguageModelInterface, apiDocs: string, options: APIChainOptions & Omit<APIChainInput, "apiAnswerChain" | "apiRequestChain" | "apiDocs"> = {} ): APIChain { const { apiUrlPrompt = API_URL_PROMPT_TEMPLATE, apiResponsePrompt = API_RESPONSE_PROMPT_TEMPLATE, } = options; const apiRequestChain = new LLMChain({ prompt: apiUrlPrompt, llm }); const apiAnswerChain = new LLMChain({ prompt: apiResponsePrompt, llm }); return new this({ apiAnswerChain, apiRequestChain, apiDocs, ...options, }); } }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/api/prompts.ts
/* eslint-disable spaced-comment */ import { PromptTemplate } from "@langchain/core/prompts"; export const API_URL_RAW_PROMPT_TEMPLATE = `You are given the below API Documentation: {api_docs} Using this documentation, generate the full API url to call for answering the user question. You should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call. Question:{question} API url:`; export const API_URL_PROMPT_TEMPLATE = /* #__PURE__ */ new PromptTemplate({ inputVariables: ["api_docs", "question"], template: API_URL_RAW_PROMPT_TEMPLATE, }); export const API_RESPONSE_RAW_PROMPT_TEMPLATE = `${API_URL_RAW_PROMPT_TEMPLATE} {api_url} Here is the response from the API: {api_response} Summarize this response to answer the original question. Summary:`; export const API_RESPONSE_PROMPT_TEMPLATE = /* #__PURE__ */ new PromptTemplate({ inputVariables: ["api_docs", "question", "api_url", "api_response"], template: API_RESPONSE_RAW_PROMPT_TEMPLATE, });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/sql_db/sql_db_chain.ts
import type { BaseLanguageModel, BaseLanguageModelInterface, } from "@langchain/core/language_models/base"; import type { TiktokenModel } from "js-tiktoken/lite"; import type { OpenAI } from "@langchain/openai"; import { ChainValues } from "@langchain/core/utils/types"; import { BasePromptTemplate, PromptTemplate } from "@langchain/core/prompts"; import { calculateMaxTokens, getModelContextSize, } from "@langchain/core/language_models/base"; import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager"; import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { DEFAULT_SQL_DATABASE_PROMPT, SQL_PROMPTS_MAP, SqlDialect, } from "./sql_db_prompt.js"; import { BaseChain, ChainInputs } from "../base.js"; import { LLMChain } from "../llm_chain.js"; import type { SqlDatabase } from "../../sql_db.js"; import { getPromptTemplateFromDataSource } from "../../util/sql_utils.js"; /** * Interface that extends the ChainInputs interface and defines additional * fields specific to a SQL database chain. It represents the input fields * for a SQL database chain. */ export interface SqlDatabaseChainInput extends ChainInputs { llm: BaseLanguageModelInterface; database: SqlDatabase; topK?: number; inputKey?: string; outputKey?: string; sqlOutputKey?: string; prompt?: PromptTemplate; } /** * Class that represents a SQL database chain in the LangChain framework. * It extends the BaseChain class and implements the functionality * specific to a SQL database chain. * * @security **Security Notice** * This chain generates SQL queries for the given database. * The SQLDatabase class provides a getTableInfo method that can be used * to get column information as well as sample data from the table. * To mitigate risk of leaking sensitive data, limit permissions * to read and scope to the tables that are needed. * Optionally, use the includesTables or ignoreTables class parameters * to limit which tables can/cannot be accessed. * * @link See https://js.langchain.com/docs/security for more information. * @example * ```typescript * const chain = new SqlDatabaseChain({ * llm: new OpenAI({ temperature: 0 }), * database: new SqlDatabase({ ...config }), * }); * * const result = await chain.run("How many tracks are there?"); * ``` */ export class SqlDatabaseChain extends BaseChain { static lc_name() { return "SqlDatabaseChain"; } // LLM wrapper to use llm: BaseLanguageModelInterface; // SQL Database to connect to. database: SqlDatabase; // Prompt to use to translate natural language to SQL. prompt = DEFAULT_SQL_DATABASE_PROMPT; // Number of results to return from the query topK = 5; inputKey = "query"; outputKey = "result"; sqlOutputKey: string | undefined = undefined; // Whether to return the result of querying the SQL table directly. returnDirect = false; constructor(fields: SqlDatabaseChainInput) { super(fields); this.llm = fields.llm; this.database = fields.database; this.topK = fields.topK ?? this.topK; this.inputKey = fields.inputKey ?? this.inputKey; this.outputKey = fields.outputKey ?? this.outputKey; this.sqlOutputKey = fields.sqlOutputKey ?? this.sqlOutputKey; this.prompt = fields.prompt ?? getPromptTemplateFromDataSource(this.database.appDataSource); } /** @ignore */ async _call( values: ChainValues, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { const llmChain = new LLMChain({ prompt: this.prompt, llm: this.llm, outputKey: this.outputKey, memory: this.memory, }); if (!(this.inputKey in values)) { throw new Error(`Question key ${this.inputKey} not found.`); } const question: string = values[this.inputKey]; let inputText = `${question}\nSQLQuery:`; const tablesToUse = values.table_names_to_use; const tableInfo = await this.database.getTableInfo(tablesToUse); const llmInputs = { input: inputText, top_k: this.topK, dialect: this.database.appDataSourceOptions.type, table_info: tableInfo, stop: ["\nSQLResult:"], }; await this.verifyNumberOfTokens(inputText, tableInfo); const sqlCommand = await llmChain.predict( llmInputs, runManager?.getChild("sql_generation") ); let queryResult = ""; try { queryResult = await this.database.appDataSource.query(sqlCommand); } catch (error) { console.error(error); } let finalResult; if (this.returnDirect) { finalResult = { [this.outputKey]: queryResult }; } else { inputText += `${sqlCommand}\nSQLResult: ${JSON.stringify( queryResult )}\nAnswer:`; llmInputs.input = inputText; finalResult = { [this.outputKey]: await llmChain.predict( llmInputs, runManager?.getChild("result_generation") ), }; } if (this.sqlOutputKey != null) { finalResult[this.sqlOutputKey] = sqlCommand; } return finalResult; } _chainType() { return "sql_database_chain" as const; } get inputKeys(): string[] { return [this.inputKey]; } get outputKeys(): string[] { if (this.sqlOutputKey != null) { return [this.outputKey, this.sqlOutputKey]; } return [this.outputKey]; } /** * Private method that verifies the number of tokens in the input text and * table information. It throws an error if the number of tokens exceeds * the maximum allowed by the language model. * @param inputText The input text. * @param tableinfo The table information. * @returns A promise that resolves when the verification is complete. */ private async verifyNumberOfTokens( inputText: string, tableinfo: string ): Promise<void> { // We verify it only for OpenAI for the moment if (this.llm._llmType() !== "openai") { return; } const llm = this.llm as OpenAI; const promptTemplate = this.prompt.template; const stringWeSend = `${inputText}${promptTemplate}${tableinfo}`; const maxToken = await calculateMaxTokens({ prompt: stringWeSend, // Cast here to allow for other models that may not fit the union modelName: llm.modelName as TiktokenModel, }); if (maxToken < llm.maxTokens) { throw new Error(`The combination of the database structure and your question is too big for the model ${ llm.modelName } which can compute only a max tokens of ${getModelContextSize( llm.modelName )}. We suggest you to use the includeTables parameters when creating the SqlDatabase object to select only a subset of the tables. You can also use a model which can handle more tokens.`); } } } export interface CreateSqlQueryChainFields { llm: BaseLanguageModel; db: SqlDatabase; prompt?: BasePromptTemplate; /** * @default 5 */ k?: number; dialect: SqlDialect; } type SqlInput = { question: string; }; type SqlInoutWithTables = SqlInput & { tableNamesToUse: string[]; }; const strip = (text: string) => { // Replace escaped quotes with actual quotes let newText = text.replace(/\\"/g, '"').trim(); // Remove wrapping quotes if the entire string is wrapped in quotes if (newText.startsWith('"') && newText.endsWith('"')) { newText = newText.substring(1, newText.length - 1); } return newText; }; const difference = (setA: Set<string>, setB: Set<string>) => new Set([...setA].filter((x) => !setB.has(x))); /** * Create a SQL query chain that can create SQL queries for the given database. * Returns a Runnable. * * @param {BaseLanguageModel} llm The language model to use in the chain. * @param {SqlDatabase} db The database to use in the chain. * @param {BasePromptTemplate | undefined} prompt The prompt to use in the chain. * @param {BaseLanguageModel | undefined} k The amount of docs/results to return. Passed through the prompt input value `top_k`. * @param {SqlDialect} dialect The SQL dialect to use in the chain. * @returns {Promise<RunnableSequence<Record<string, unknown>, string>>} A runnable sequence representing the chain. * @example ```typescript * const datasource = new DataSource({ * type: "sqlite", * database: "../../../../Chinook.db", * }); * const db = await SqlDatabase.fromDataSourceParams({ * appDataSource: datasource, * }); * const llm = new ChatOpenAI({ temperature: 0 }); * const chain = await createSqlQueryChain({ * llm, * db, * dialect: "sqlite", * }); * ``` */ export async function createSqlQueryChain({ llm, db, prompt, k = 5, dialect, }: CreateSqlQueryChainFields) { let promptToUse: BasePromptTemplate; if (prompt) { promptToUse = prompt; } else if (SQL_PROMPTS_MAP[dialect]) { promptToUse = SQL_PROMPTS_MAP[dialect]; } else { promptToUse = DEFAULT_SQL_DATABASE_PROMPT; } if ( difference( new Set(["input", "top_k", "table_info"]), new Set(promptToUse.inputVariables) ).size > 0 ) { throw new Error( `Prompt must have input variables: 'input', 'top_k', 'table_info'. Received prompt with input variables: ` + `${promptToUse.inputVariables}. Full prompt:\n\n${promptToUse}` ); } if (promptToUse.inputVariables.includes("dialect")) { promptToUse = await promptToUse.partial({ dialect }); } promptToUse = await promptToUse.partial({ top_k: k.toString() }); const inputs = { input: (x: Record<string, unknown>) => { if ("question" in x) { return `${(x as SqlInput).question}\nSQLQuery: `; } throw new Error("Input must include a question property."); }, table_info: async (x: Record<string, unknown>) => db.getTableInfo((x as SqlInoutWithTables).tableNamesToUse), }; return RunnableSequence.from([ RunnablePassthrough.assign(inputs), (x) => { const newInputs = { ...x }; delete newInputs.question; delete newInputs.tableNamesToUse; return newInputs; }, promptToUse, llm.bind({ stop: ["\nSQLResult:"] }), new StringOutputParser(), strip, ]); }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/sql_db/index.ts
export { SqlDatabaseChain, type SqlDatabaseChainInput, type CreateSqlQueryChainFields, createSqlQueryChain, } from "./sql_db_chain.js"; export { DEFAULT_SQL_DATABASE_PROMPT, SQL_POSTGRES_PROMPT, SQL_SQLITE_PROMPT, SQL_MSSQL_PROMPT, SQL_MYSQL_PROMPT, SQL_SAP_HANA_PROMPT, SQL_PROMPTS_MAP, } from "./sql_db_prompt.js";
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/sql_db/sql_db_prompt.ts
/* eslint-disable spaced-comment */ import { BasePromptTemplate, PromptTemplate } from "@langchain/core/prompts"; export const DEFAULT_SQL_DATABASE_PROMPT = /*#__PURE__*/ new PromptTemplate({ template: `Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database. Never query for all the columns from a specific table, only ask for a the few relevant columns given the question. Pay attention to use only the column names that you can see in the schema description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. Use the following format: Question: "Question here" SQLQuery: "SQL Query to run" SQLResult: "Result of the SQLQuery" Answer: "Final answer here" Only use the tables listed below. {table_info} Question: {input}`, inputVariables: ["dialect", "table_info", "input", "top_k"], }); export const SQL_POSTGRES_PROMPT = /*#__PURE__*/ new PromptTemplate({ template: `You are a PostgreSQL expert. Given an input question, first create a syntactically correct PostgreSQL query to run, then look at the results of the query and return the answer to the input question. Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per PostgreSQL. You can order the results to return the most informative data in the database. Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers. Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. Use the following format: Question: "Question here" SQLQuery: "SQL Query to run" SQLResult: "Result of the SQLQuery" Answer: "Final answer here" Only use the following tables: {table_info} Question: {input}`, inputVariables: ["dialect", "table_info", "input", "top_k"], }); export const SQL_SQLITE_PROMPT = /*#__PURE__*/ new PromptTemplate({ template: `You are a SQLite expert. Given an input question, first create a syntactically correct SQLite query to run, then look at the results of the query and return the answer to the input question. Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per SQLite. You can order the results to return the most informative data in the database. Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers. Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. Use the following format: Question: "Question here" SQLQuery: "SQL Query to run" SQLResult: "Result of the SQLQuery" Answer: "Final answer here" Only use the following tables: {table_info} Question: {input}`, inputVariables: ["dialect", "table_info", "input", "top_k"], }); export const SQL_MYSQL_PROMPT = /*#__PURE__*/ new PromptTemplate({ template: `You are a MySQL expert. Given an input question, first create a syntactically correct MySQL query to run, then look at the results of the query and return the answer to the input question. Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per MySQL. You can order the results to return the most informative data in the database. Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in backticks (\`) to denote them as delimited identifiers. Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. Use the following format: Question: "Question here" SQLQuery: "SQL Query to run" SQLResult: "Result of the SQLQuery" Answer: "Final answer here" Only use the following tables: {table_info} Question: {input}`, inputVariables: ["dialect", "table_info", "input", "top_k"], }); export const SQL_MSSQL_PROMPT = /*#__PURE__*/ new PromptTemplate({ template: `You are an MS SQL expert. Given an input question, first create a syntactically correct MS SQL query to run, then look at the results of the query and return the answer to the input question. Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the TOP clause as per MS SQL. You can order the results to return the most informative data in the database. Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in square brackets ([]) to denote them as delimited identifiers. Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. Use the following format: Question: "Question here" SQLQuery: "SQL Query to run" SQLResult: "Result of the SQLQuery" Answer: "Final answer here" Only use the following tables: {table_info} Question: {input}`, inputVariables: ["dialect", "table_info", "input", "top_k"], }); export const SQL_SAP_HANA_PROMPT = /*#__PURE__*/ new PromptTemplate({ template: `You are a SAP HANA expert. Given an input question, first create a syntactically correct SAP HANA query to run, then look at the results of the query and return the answer to the input question. Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per SAP HANA. You can order the results to return the most informative data in the database. Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers. Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. Always use a schema name when executing a query. Use the following format: Question: "Question here" SQLQuery: "SQL Query to run" SQLResult: "Result of the SQLQuery" Answer: "Final answer here" Only use the following tables: {table_info} Question: {input}`, inputVariables: ["dialect", "table_info", "input", "top_k"], }); export const SQL_ORACLE_PROMPT = /*#__PURE__*/ new PromptTemplate({ template: `You are a ORACLE expert. Given an input question, first create a syntactically correct ORACLE query to run, then look at the results of the query and return the answer to the input question. Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the ROWNUM clause as per ORACLE. You can order the results to return the most informative data in the database. Never query for all columns from a table. You must query only the columns that are needed to answer the question. Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. Use the following format: Question: "Question here" SQLQuery: "SQL Query to run" SQLResult: "Result of the SQLQuery" Answer: "Final answer here" Only use the following tables: {table_info} Question: {input}`, inputVariables: ["dialect", "table_info", "input", "top_k"], }); export type SqlDialect = | "oracle" | "postgres" | "sqlite" | "mysql" | "mssql" | "sap hana"; export const SQL_PROMPTS_MAP: Record<SqlDialect, BasePromptTemplate> = { oracle: SQL_ORACLE_PROMPT, postgres: SQL_POSTGRES_PROMPT, sqlite: SQL_SQLITE_PROMPT, mysql: SQL_MYSQL_PROMPT, mssql: SQL_MSSQL_PROMPT, "sap hana": SQL_SAP_HANA_PROMPT, };
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/router/multi_retrieval_prompt.ts
export const MULTI_RETRIEVAL_ROUTER_TEMPLATE = `Given a query to a question answering system, select the system best suited for the input. You will be given the names of the available systems and a description of what questions the system is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response. << FORMATTING >> Return a markdown code snippet with a JSON object formatted to look like: \`\`\`json {{{{ "destination": string \\ name of the question answering system to use or "DEFAULT" "next_inputs": string \\ a potentially modified version of the original input }}}} \`\`\` REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR it can be "DEFAULT" if the input is not well suited for any of the candidate prompts. REMEMBER: "next_inputs" can just be the original input if you don't think any modifications are needed. << CANDIDATE PROMPTS >> {destinations} << INPUT >> {{input}} << OUTPUT >> `; export const STRUCTURED_MULTI_RETRIEVAL_ROUTER_TEMPLATE = ( formatting: string ) => `Given a query to a question answering system, select the system best suited for the input. You will be given the names of the available systems and a description of what questions the system is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response. << FORMATTING >> ${formatting} REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR it can be "DEFAULT" if the input is not well suited for any of the candidate prompts. REMEMBER: "next_inputs.query" can just be the original input if you don't think any modifications are needed. << CANDIDATE PROMPTS >> {destinations} << INPUT >> {{input}} << OUTPUT >> `;
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/router/multi_route.ts
import { ChainValues } from "@langchain/core/utils/types"; import { CallbackManagerForChainRun, Callbacks, } from "@langchain/core/callbacks/manager"; import { BaseChain, ChainInputs } from "../../chains/base.js"; /** * A type that represents the inputs for the MultiRouteChain. It is a * recursive type that can contain nested objects, arrays, strings, and * numbers. */ type Inputs = { [key: string]: Inputs | Inputs[] | string | string[] | number | number[]; }; /** * An interface that represents the route returned by the RouterChain. It * includes optional fields for the destination and nextInputs. */ export interface Route { destination?: string; nextInputs: { [key: string]: Inputs }; } /** * An interface that extends the ChainInputs interface and adds additional * properties for the routerChain, destinationChains, defaultChain, and * silentErrors. It represents the input expected by the MultiRouteChain * class. */ export interface MultiRouteChainInput extends ChainInputs { routerChain: RouterChain; destinationChains: { [name: string]: BaseChain }; defaultChain: BaseChain; silentErrors?: boolean; } /** * A class that represents a router chain. It * extends the BaseChain class and provides functionality for routing * inputs to different chains. */ export abstract class RouterChain extends BaseChain { get outputKeys(): string[] { return ["destination", "next_inputs"]; } async route(inputs: ChainValues, callbacks?: Callbacks): Promise<Route> { const result = await this.call(inputs, callbacks); return { destination: result.destination, nextInputs: result.next_inputs, }; } } /** * A class that represents a multi-route chain. * It extends the BaseChain class and provides functionality for routing * inputs to different chains based on a router chain. */ export class MultiRouteChain extends BaseChain { static lc_name() { return "MultiRouteChain"; } routerChain: RouterChain; destinationChains: { [name: string]: BaseChain }; defaultChain: BaseChain; silentErrors = false; constructor(fields: MultiRouteChainInput) { super(fields); this.routerChain = fields.routerChain; this.destinationChains = fields.destinationChains; this.defaultChain = fields.defaultChain; this.silentErrors = fields.silentErrors ?? this.silentErrors; } get inputKeys(): string[] { return this.routerChain.inputKeys; } get outputKeys(): string[] { return []; } async _call( values: ChainValues, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { const { destination, nextInputs } = await this.routerChain.route( values, runManager?.getChild() ); await runManager?.handleText( `${destination}: ${JSON.stringify(nextInputs)}` ); if (!destination) { return this.defaultChain .call(nextInputs, runManager?.getChild()) .catch((err) => { throw new Error(`Error in default chain: ${err}`); }); } if (destination in this.destinationChains) { return this.destinationChains[destination] .call(nextInputs, runManager?.getChild()) .catch((err) => { throw new Error(`Error in ${destination} chain: ${err}`); }); } if (this.silentErrors) { return this.defaultChain .call(nextInputs, runManager?.getChild()) .catch((err) => { throw new Error(`Error in default chain: ${err}`); }); } throw new Error( `Destination ${destination} not found in destination chains with keys ${Object.keys( this.destinationChains )}` ); } _chainType(): string { return "multi_route_chain"; } }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/router/multi_prompt.ts
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import { z } from "zod"; import { interpolateFString, PromptTemplate } from "@langchain/core/prompts"; import { MultiRouteChain, MultiRouteChainInput } from "./multi_route.js"; import { STRUCTURED_MULTI_PROMPT_ROUTER_TEMPLATE } from "./multi_prompt_prompt.js"; import { BaseChain } from "../../chains/base.js"; import { LLMChain, LLMChainInput } from "../../chains/llm_chain.js"; import { LLMRouterChain } from "./llm_router.js"; import { ConversationChain } from "../../chains/conversation.js"; import { zipEntries } from "./utils.js"; import { RouterOutputParser } from "../../output_parsers/router.js"; /** * A class that represents a multi-prompt chain in the LangChain * framework. It extends the MultiRouteChain class and provides additional * functionality specific to multi-prompt chains. * @example * ```typescript * const multiPromptChain = MultiPromptChain.fromLLMAndPrompts(new ChatOpenAI(), { * promptNames: ["physics", "math", "history"], * promptDescriptions: [ * "Good for answering questions about physics", * "Good for answering math questions", * "Good for answering questions about history", * ], * promptTemplates: [ * `You are a very smart physics professor. Here is a question:\n{input}\n`, * `You are a very good mathematician. Here is a question:\n{input}\n`, * `You are a very smart history professor. Here is a question:\n{input}\n`, * ], * }); * const result = await multiPromptChain.call({ * input: "What is the speed of light?", * }); * ``` */ export class MultiPromptChain extends MultiRouteChain { /** * @deprecated Use `fromLLMAndPrompts` instead */ static fromPrompts( llm: BaseLanguageModelInterface, promptNames: string[], promptDescriptions: string[], promptTemplates: string[] | PromptTemplate[], defaultChain?: BaseChain, options?: Omit<MultiRouteChainInput, "defaultChain"> ) { return MultiPromptChain.fromLLMAndPrompts(llm, { promptNames, promptDescriptions, promptTemplates, defaultChain, multiRouteChainOpts: options, }); } /** * A static method that creates an instance of MultiPromptChain from a * BaseLanguageModel and a set of prompts. It takes in optional parameters * for the default chain and additional options. * @param llm A BaseLanguageModel instance. * @param promptNames An array of prompt names. * @param promptDescriptions An array of prompt descriptions. * @param promptTemplates An array of prompt templates. * @param defaultChain An optional BaseChain instance to be used as the default chain. * @param llmChainOpts Optional parameters for the LLMChainInput, excluding 'llm' and 'prompt'. * @param conversationChainOpts Optional parameters for the LLMChainInput, excluding 'llm' and 'outputKey'. * @param multiRouteChainOpts Optional parameters for the MultiRouteChainInput, excluding 'defaultChain'. * @returns An instance of MultiPromptChain. */ static fromLLMAndPrompts( llm: BaseLanguageModelInterface, { promptNames, promptDescriptions, promptTemplates, defaultChain, llmChainOpts, conversationChainOpts, multiRouteChainOpts, }: { promptNames: string[]; promptDescriptions: string[]; promptTemplates: string[] | PromptTemplate[]; defaultChain?: BaseChain; llmChainOpts?: Omit<LLMChainInput, "llm" | "prompt">; conversationChainOpts?: Omit<LLMChainInput, "llm" | "outputKey">; multiRouteChainOpts?: Omit<MultiRouteChainInput, "defaultChain">; } ): MultiPromptChain { const destinations = zipEntries(promptNames, promptDescriptions).map( ([name, desc]) => `${name}: ${desc}` ); const structuredOutputParserSchema = z.object({ destination: z .string() .optional() .describe('name of the question answering system to use or "DEFAULT"'), next_inputs: z .object({ input: z .string() .describe("a potentially modified version of the original input"), }) .describe("input to be fed to the next model"), }); const outputParser = new RouterOutputParser(structuredOutputParserSchema); const destinationsStr = destinations.join("\n"); const routerTemplate = interpolateFString( STRUCTURED_MULTI_PROMPT_ROUTER_TEMPLATE( outputParser.getFormatInstructions({ interpolationDepth: 4 }) ), { destinations: destinationsStr, } ); const routerPrompt = new PromptTemplate({ template: routerTemplate, inputVariables: ["input"], outputParser, }); const routerChain = LLMRouterChain.fromLLM(llm, routerPrompt); const destinationChains = zipEntries<[string, string | PromptTemplate]>( promptNames, promptTemplates ).reduce((acc, [name, template]) => { let myPrompt: string | PromptTemplate; if (typeof template === "object") { myPrompt = template; } else if (typeof template === "string") { myPrompt = new PromptTemplate({ template: template as string, inputVariables: ["input"], }); } else { throw new Error("Invalid prompt template"); } acc[name as string] = new LLMChain({ ...llmChainOpts, llm, prompt: myPrompt, }); return acc; }, {} as { [name: string]: LLMChain }); const convChain = new ConversationChain({ ...conversationChainOpts, llm, outputKey: "text", }); return new MultiPromptChain({ ...multiRouteChainOpts, routerChain, destinationChains, defaultChain: defaultChain ?? convChain, }); } _chainType(): string { return "multi_prompt_chain"; } }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/router/multi_retrieval_qa.ts
import { z } from "zod"; import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import type { BaseRetrieverInterface } from "@langchain/core/retrievers"; import { interpolateFString, PromptTemplate } from "@langchain/core/prompts"; import { MultiRouteChain, MultiRouteChainInput } from "./multi_route.js"; import { BaseChain } from "../../chains/base.js"; import { LLMRouterChain } from "./llm_router.js"; import { ConversationChain, DEFAULT_TEMPLATE, } from "../../chains/conversation.js"; import { STRUCTURED_MULTI_RETRIEVAL_ROUTER_TEMPLATE } from "./multi_retrieval_prompt.js"; import { zipEntries } from "./utils.js"; import { RetrievalQAChain, RetrievalQAChainInput, } from "../../chains/retrieval_qa.js"; import { RouterOutputParser } from "../../output_parsers/router.js"; /** * A type that represents the default values for the MultiRetrievalQAChain * class. It includes optional properties for the default retriever, * default prompt, and default chain. */ export type MultiRetrievalDefaults = { defaultRetriever?: BaseRetrieverInterface; defaultPrompt?: PromptTemplate; defaultChain?: BaseChain; }; /** * A class that represents a multi-retrieval question answering chain in * the LangChain framework. It extends the MultiRouteChain class and * provides additional functionality specific to multi-retrieval QA * chains. * @example * ```typescript * const multiRetrievalQAChain = MultiRetrievalQAChain.fromLLMAndRetrievers( * new ChatOpenAI(), * { * retrieverNames: ["aqua teen", "mst3k", "animaniacs"], * retrieverDescriptions: [ * "Good for answering questions about Aqua Teen Hunger Force theme song", * "Good for answering questions about Mystery Science Theater 3000 theme song", * "Good for answering questions about Animaniacs theme song", * ], * retrievers: [ * new MemoryVectorStore().asRetriever(3), * new MemoryVectorStore().asRetriever(3), * new MemoryVectorStore().asRetriever(3), * ], * retrievalQAChainOpts: { * returnSourceDocuments: true, * }, * }, * ); * * const result = await multiRetrievalQAChain.call({ * input: * "In the Aqua Teen Hunger Force theme song, who calls himself the mike rula?", * }); * * console.log(result.sourceDocuments, result.text); * ``` */ export class MultiRetrievalQAChain extends MultiRouteChain { get outputKeys(): string[] { return ["result"]; } /** * @deprecated Use `fromRetrieversAndPrompts` instead */ static fromRetrievers( llm: BaseLanguageModelInterface, retrieverNames: string[], retrieverDescriptions: string[], retrievers: BaseRetrieverInterface[], retrieverPrompts?: PromptTemplate[], defaults?: MultiRetrievalDefaults, options?: Omit<MultiRouteChainInput, "defaultChain"> ) { return MultiRetrievalQAChain.fromLLMAndRetrievers(llm, { retrieverNames, retrieverDescriptions, retrievers, retrieverPrompts, defaults, multiRetrievalChainOpts: options, }); } /** * A static method that creates an instance of MultiRetrievalQAChain from * a BaseLanguageModel and a set of retrievers. It takes in optional * parameters for the retriever names, descriptions, prompts, defaults, * and additional options. It is an alternative method to fromRetrievers * and provides more flexibility in configuring the underlying chains. * @param llm A BaseLanguageModel instance. * @param retrieverNames An array of retriever names. * @param retrieverDescriptions An array of retriever descriptions. * @param retrievers An array of BaseRetrieverInterface instances. * @param retrieverPrompts An optional array of PromptTemplate instances for the retrievers. * @param defaults An optional MultiRetrievalDefaults instance. * @param multiRetrievalChainOpts Additional optional parameters for the multi-retrieval chain. * @param retrievalQAChainOpts Additional optional parameters for the retrieval QA chain. * @returns A new instance of MultiRetrievalQAChain. */ static fromLLMAndRetrievers( llm: BaseLanguageModelInterface, { retrieverNames, retrieverDescriptions, retrievers, retrieverPrompts, defaults, multiRetrievalChainOpts, retrievalQAChainOpts, }: { retrieverNames: string[]; retrieverDescriptions: string[]; retrievers: BaseRetrieverInterface[]; retrieverPrompts?: PromptTemplate[]; defaults?: MultiRetrievalDefaults; multiRetrievalChainOpts?: Omit<MultiRouteChainInput, "defaultChain">; retrievalQAChainOpts?: Partial< Omit<RetrievalQAChainInput, "retriever" | "combineDocumentsChain"> > & { prompt?: PromptTemplate; }; } ): MultiRetrievalQAChain { const { defaultRetriever, defaultPrompt, defaultChain } = defaults ?? {}; if (defaultPrompt && !defaultRetriever) { throw new Error( "`default_retriever` must be specified if `default_prompt` is \nprovided. Received only `default_prompt`." ); } const destinations = zipEntries<[string, string]>( retrieverNames, retrieverDescriptions ).map(([name, desc]) => `${name}: ${desc}`); const structuredOutputParserSchema = z.object({ destination: z .string() .optional() .describe('name of the question answering system to use or "DEFAULT"'), next_inputs: z .object({ query: z .string() .describe("a potentially modified version of the original input"), }) .describe("input to be fed to the next model"), }); const outputParser = new RouterOutputParser< typeof structuredOutputParserSchema >(structuredOutputParserSchema); const destinationsStr = destinations.join("\n"); const routerTemplate = interpolateFString( STRUCTURED_MULTI_RETRIEVAL_ROUTER_TEMPLATE( outputParser.getFormatInstructions({ interpolationDepth: 4 }) ), { destinations: destinationsStr, } ); const routerPrompt = new PromptTemplate({ template: routerTemplate, inputVariables: ["input"], outputParser, }); const routerChain = LLMRouterChain.fromLLM(llm, routerPrompt); const prompts = retrieverPrompts ?? retrievers.map(() => null); const destinationChains = zipEntries< [string, BaseRetrieverInterface, PromptTemplate | null] >(retrieverNames, retrievers, prompts).reduce( (acc, [name, retriever, prompt]) => { const opt: Partial<RetrievalQAChainInput> & { prompt?: PromptTemplate; } = retrievalQAChainOpts ?? {}; if (prompt) { opt.prompt = prompt; } acc[name] = RetrievalQAChain.fromLLM(llm, retriever, opt); return acc; }, {} as { [name: string]: RetrievalQAChain } ); let _defaultChain; if (defaultChain) { _defaultChain = defaultChain; } else if (defaultRetriever) { _defaultChain = RetrievalQAChain.fromLLM(llm, defaultRetriever, { ...retrievalQAChainOpts, prompt: defaultPrompt, }); } else { const promptTemplate = DEFAULT_TEMPLATE.replace("input", "query"); const prompt = new PromptTemplate({ template: promptTemplate, inputVariables: ["history", "query"], }); _defaultChain = new ConversationChain({ llm, prompt, outputKey: "result", }); } return new MultiRetrievalQAChain({ ...multiRetrievalChainOpts, routerChain, destinationChains, defaultChain: _defaultChain, }); } _chainType(): string { return "multi_retrieval_qa_chain"; } }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/router/multi_prompt_prompt.ts
export const MULTI_PROMPT_ROUTER_TEMPLATE = `Given a raw text input to a language model, select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model. << FORMATTING >> Return a markdown code snippet with a JSON object formatted to look like: \`\`\`json {{{{ "destination": string \\ name of the prompt to use or "DEFAULT" "next_inputs": string \\ a potentially modified version of the original input }}}} \`\`\` REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR it can be "DEFAULT" if the input is not well suited for any of the candidate prompts. REMEMBER: "next_inputs" can just be the original input if you don't think any modifications are needed. << CANDIDATE PROMPTS >> {destinations} << INPUT >> {{input}} << OUTPUT >> `; export const STRUCTURED_MULTI_PROMPT_ROUTER_TEMPLATE = ( formatting: string ) => `Given a raw text input to a language model, select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model. << FORMATTING >> ${formatting} REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR it can be "DEFAULT" if the input is not well suited for any of the candidate prompts. REMEMBER: "next_inputs.input" can just be the original input if you don't think any modifications are needed. << CANDIDATE PROMPTS >> {destinations} << INPUT >> {{input}} << OUTPUT >> `;
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/router/llm_router.ts
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import { ChainValues } from "@langchain/core/utils/types"; import { BasePromptTemplate } from "@langchain/core/prompts"; import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager"; import { LLMChain } from "../../chains/llm_chain.js"; import { RouterChain } from "./multi_route.js"; import { ChainInputs } from "../../chains/base.js"; /** * A type that represents the output schema of a router chain. It defines * the structure of the output data returned by the router chain. */ export type RouterOutputSchema = { destination: string; next_inputs: { [key: string]: string }; }; /** * An interface that extends the default ChainInputs interface and adds an * additional "llmChain" property. */ export interface LLMRouterChainInput extends ChainInputs { llmChain: LLMChain<RouterOutputSchema>; } /** * A class that represents an LLM router chain in the LangChain framework. * It extends the RouterChain class and implements the LLMRouterChainInput * interface. It provides additional functionality specific to LLMs and * routing based on LLM predictions. */ export class LLMRouterChain extends RouterChain implements LLMRouterChainInput { llmChain: LLMChain<RouterOutputSchema>; constructor(fields: LLMRouterChainInput) { super(fields); this.llmChain = fields.llmChain; } get inputKeys(): string[] { return this.llmChain.inputKeys; } async _call( values: ChainValues, runManager?: CallbackManagerForChainRun | undefined ): Promise<RouterOutputSchema> { return this.llmChain.predict(values, runManager?.getChild()); } _chainType(): string { return "llm_router_chain"; } /** * A static method that creates an instance of LLMRouterChain from a * BaseLanguageModel and a BasePromptTemplate. It takes in an optional * options object and returns an instance of LLMRouterChain with the * specified LLMChain. * @param llm A BaseLanguageModel instance. * @param prompt A BasePromptTemplate instance. * @param options Optional LLMRouterChainInput object, excluding "llmChain". * @returns An instance of LLMRouterChain. */ static fromLLM( llm: BaseLanguageModelInterface, prompt: BasePromptTemplate, options?: Omit<LLMRouterChainInput, "llmChain"> ) { const llmChain = new LLMChain<RouterOutputSchema>({ llm, prompt }); return new LLMRouterChain({ ...options, llmChain }); } }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/router/utils.ts
// eslint-disable-next-line @typescript-eslint/no-explicit-any export function zipEntries<T extends any[]>( ...arrays: { [P in keyof T]: T[P][]; } ): T[] { // Check for empty input if (arrays.length === 0) { return []; } // Find the length of the first input array const firstArrayLength = arrays[0].length; // Ensure all input arrays have the same length for (const array of arrays) { if (array.length !== firstArrayLength) { throw new Error("All input arrays must have the same length."); } } // Create an empty array to store the zipped arrays const zipped: T[] = []; // Iterate through each element of the first input array for (let i = 0; i < firstArrayLength; i += 1) { // Create an array to store the zipped elements at the current index const zippedElement: T[keyof T][] = []; // Iterate through each input array for (const array of arrays) { // Add the element at the current index to the zipped element array zippedElement.push(array[i]); } // Add the zipped element array to the zipped array zipped.push(zippedElement as T); } return zipped; }
0
lc_public_repos/langchainjs/langchain/src/chains/router
lc_public_repos/langchainjs/langchain/src/chains/router/tests/multi_prompt.int.test.ts
import { test } from "@jest/globals"; import { ConsoleCallbackHandler } from "@langchain/core/tracers/console"; import { OpenAIChat } from "@langchain/openai"; import { MultiPromptChain } from "../multi_prompt.js"; test("Test MultiPromptChain", async () => { const llm = new OpenAIChat({ callbacks: [new ConsoleCallbackHandler()], }); const promptNames = ["physics", "math", "history"]; const promptDescriptions = [ "Good for answering questions about physics", "Good for answering math questions", "Good for answering questions about history", ]; const physicsTemplate = `You are a very smart physics professor. You are great at answering questions about physics in a concise and easy to understand manner. When you don't know the answer to a question you admit that you don't know. Here is a question: {input} `; const mathTemplate = `You are a very good mathematician. You are great at answering math questions. You are so good because you are able to break down hard problems into their component parts, answer the component parts, and then put them together to answer the broader question. Here is a question: {input}`; const historyTemplate = `You are a very smart history professor. You are great at answering questions about history in a concise and easy to understand manner. When you don't know the answer to a question you admit that you don't know. Here is a question: {input}`; const promptTemplates = [physicsTemplate, mathTemplate, historyTemplate]; const multiPromptChain = MultiPromptChain.fromLLMAndPrompts(llm, { promptNames, promptDescriptions, promptTemplates, }); const testPromise1 = multiPromptChain.call({ input: "What is the speed of light?", }); const testPromise2 = multiPromptChain.call({ input: "What is the derivative of x^2?", }); const testPromise3 = multiPromptChain.call({ input: "Who was the first president of the United States?", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const [{ text: result1 }, { text: result2 }, { text: result3 }] = await Promise.all([testPromise1, testPromise2, testPromise3]); // console.log(result1, result2, result3); });
0
lc_public_repos/langchainjs/langchain/src/chains/router
lc_public_repos/langchainjs/langchain/src/chains/router/tests/multi_prompt.test.ts
import { test, expect } from "@jest/globals"; import { BaseLLM } from "@langchain/core/language_models/llms"; import { LLMResult } from "@langchain/core/outputs"; import { MultiPromptChain } from "../multi_prompt.js"; let pickedPrompt: string; class FakeLLM extends BaseLLM { _llmType(): string { return "fake"; } async _generate( prompts: string[], _: this["ParsedCallOptions"] ): Promise<LLMResult> { function buildResponse(name: string) { return `\`\`\`\n{\n\t"destination": "${name}",\n\t"next_inputs": {\n\t\t"input": "<from ${name}>"\n\t}\n}\n\`\`\``; } const flatPrompt = prompts.join("\n"); let response: string; if (flatPrompt.includes("prompt template")) { const splitted = flatPrompt.split(" "); response = `${splitted[splitted.length - 2]} ${ splitted[splitted.length - 1] }`; } else { // randomly choose 1 out of three responses const random = Math.random(); if (random < 0.33) { pickedPrompt = "prompt1"; } else if (random < 0.66) { pickedPrompt = "prompt2"; } else { pickedPrompt = "prompt3"; } response = buildResponse(pickedPrompt); } return { generations: [ [ { text: response, }, ], ], }; } } test("Test MultiPromptChain", async () => { const llm = new FakeLLM({}); const promptNames = ["prompt1", "prompt2", "prompt3"]; const promptDescriptions = ["description1", "description2", "description3"]; const promptTemplates = [ "prompt template1 {input}", "prompt template2 {input}", "prompt template3 {input}", ]; const multiPromptChain = MultiPromptChain.fromLLMAndPrompts(llm, { promptNames, promptDescriptions, promptTemplates, }); const { text: result } = await multiPromptChain.invoke({ input: "Test input", }); expect(result).toEqual(`<from ${pickedPrompt}>`); });
0
lc_public_repos/langchainjs/langchain/src/chains/router
lc_public_repos/langchainjs/langchain/src/chains/router/tests/multi_retrieval_qa.int.test.ts
import { test } from "@jest/globals"; import { OpenAIEmbeddings, OpenAIChat } from "@langchain/openai"; import { ConsoleCallbackHandler } from "@langchain/core/tracers/console"; import { MultiRetrievalQAChain } from "../multi_retrieval_qa.js"; import { MemoryVectorStore } from "../../../vectorstores/memory.js"; test("Test MultiPromptChain", async () => { const embeddings = new OpenAIEmbeddings(); const aquaTeen = await MemoryVectorStore.fromTexts( [ "My name is shake zula, the mike rula, the old schoola, you want a trip I'll bring it to ya", "Frylock and I'm on top rock you like a cop meatwad you're up next with your knock knock", "Meatwad make the money see meatwad get the honeys g drivin' in my car livin' like a star", "Ice on my fingers and my toes and I'm a taurus uh check-check it yeah", "Cause we are the Aqua Teens make the homies say ho and the girlies wanna scream", "Aqua Teen Hunger Force number one in the hood G", ], { series: "Aqua Teen Hunger Force" }, embeddings ); const mst3k = await MemoryVectorStore.fromTexts( [ "In the not too distant future next Sunday A.D. There was a guy named Joel not too different from you or me. He worked at Gizmonic Institute, just another face in a red jumpsuit", "He did a good job cleaning up the place but his bosses didn't like him so they shot him into space. We'll send him cheesy movies the worst we can find He'll have to sit and watch them all and we'll monitor his mind", "Now keep in mind Joel can't control where the movies begin or end Because he used those special parts to make his robot friends. Robot Roll Call Cambot Gypsy Tom Servo Croooow", "If you're wondering how he eats and breathes and other science facts La la la just repeat to yourself it's just a show I should really just relax. For Mystery Science Theater 3000", ], { series: "Mystery Science Theater 3000" }, embeddings ); const animaniacs = await MemoryVectorStore.fromTexts( [ "It's time for Animaniacs And we're zany to the max So just sit back and relax You'll laugh 'til you collapse We're Animaniacs", "Come join the Warner Brothers And the Warner Sister Dot Just for fun we run around the Warner movie lot", "They lock us in the tower whenever we get caught But we break loose and then vamoose And now you know the plot", "We're Animaniacs, Dot is cute, and Yakko yaks, Wakko packs away the snacks While Bill Clinton plays the sax", "We're Animaniacs Meet Pinky and the Brain who want to rule the universe Goodfeathers flock together Slappy whacks 'em with her purse", "Buttons chases Mindy while Rita sings a verse The writers flipped we have no script Why bother to rehearse", "We're Animaniacs We have pay-or-play contracts We're zany to the max There's baloney in our slacks", "We're Animanie Totally insaney Here's the show's namey", "Animaniacs Those are the facts", ], { series: "Animaniacs" }, embeddings ); const llm = new OpenAIChat({ callbacks: [new ConsoleCallbackHandler()], }); const retrieverNames = ["aqua teen", "mst3k", "animaniacs"]; const retrieverDescriptions = [ "Good for answering questions about Aqua Teen Hunger Force theme song", "Good for answering questions about Mystery Science Theater 3000 theme song", "Good for answering questions about Animaniacs theme song", ]; const retrievers = [ aquaTeen.asRetriever(3), mst3k.asRetriever(3), animaniacs.asRetriever(3), ]; const multiRetrievalQAChain = MultiRetrievalQAChain.fromLLMAndRetrievers( llm, { retrieverNames, retrieverDescriptions, retrievers, retrievalQAChainOpts: { returnSourceDocuments: true, }, } ); const testPromise1 = multiRetrievalQAChain.call({ input: "In the Aqua Teen Hunger Force theme song, who calls himself the mike rula?", }); const testPromise2 = multiRetrievalQAChain.call({ input: "In the Mystery Science Theater 3000 theme song, who worked at Gizmonic Institute?", }); const testPromise3 = multiRetrievalQAChain.call({ input: "In the Animaniacs theme song, who plays the sax while Wakko packs away the snacks?", }); const [ // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var { text: result1, sourceDocuments: sourceDocuments1 }, // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var { text: result2, sourceDocuments: sourceDocuments2 }, // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var { text: result3, sourceDocuments: sourceDocuments3 }, ] = await Promise.all([testPromise1, testPromise2, testPromise3]); // console.log({ sourceDocuments1, sourceDocuments2, sourceDocuments3 }); // console.log({ result1, result2, result3 }); });
0
lc_public_repos/langchainjs/langchain/src/chains/router
lc_public_repos/langchainjs/langchain/src/chains/router/tests/multi_retrieval_qa.test.ts
import { test, expect } from "@jest/globals"; import { BaseRetriever } from "@langchain/core/retrievers"; import { BaseLLM } from "@langchain/core/language_models/llms"; import { LLMResult } from "@langchain/core/outputs"; import { Document } from "@langchain/core/documents"; import { PromptTemplate } from "@langchain/core/prompts"; import { MultiRetrievalQAChain } from "../multi_retrieval_qa.js"; class FakeRetrievers extends BaseRetriever { lc_namespace: string[] = []; name: string; constructor(name: string) { super(); this.name = name; } async _getRelevantDocuments(query: string): Promise<Document[]> { return [ new Document({ pageContent: `Test document ${query} ${this.name}`, metadata: {}, }), ]; } } let pickedRetriever: string; class FakeLLM extends BaseLLM { _llmType(): string { return "fake"; } async _generate( prompts: string[], _: this["ParsedCallOptions"] ): Promise<LLMResult> { function buildResponse(name: string) { return `\`\`\`\n{\n\t"destination": "${name}",\n\t"next_inputs": {\n\t\t"query": "<from ${name}>"\n\t}\n}\n\`\`\``; } const flatPrompt = prompts.join("\n"); let response: string; if (flatPrompt.includes("Retriever prompt")) { response = flatPrompt; } else if (flatPrompt.includes("Helpful Answer")) { response = `Helpful Answer ${pickedRetriever}`; } else { // randomly choose 1 out of three responses const random = Math.random(); if (random < 0.33) { pickedRetriever = "retriever1"; } else if (random < 0.66) { pickedRetriever = "retriever2"; } else { pickedRetriever = "retriever3"; } response = buildResponse(pickedRetriever); } return { generations: [ [ { text: response, }, ], ], }; } } test("Test MultiRetrievalQAChain No Defaults With Retriever Prompts", async () => { const llm = new FakeLLM({}); const retrieverNames = ["retriever1", "retriever2", "retriever3"]; const retrieverDescriptions = [ "description1", "description2", "description3", ]; const retrievers = retrieverNames.map((name) => new FakeRetrievers(name)); const retrieverPrompts = retrieverNames.map( (name) => new PromptTemplate({ template: `Retriever prompt for ${name} {context} {question}`, inputVariables: ["context", "question"], }) ); const multiRetrievalQAChain = MultiRetrievalQAChain.fromLLMAndRetrievers( llm, { retrieverNames, retrieverDescriptions, retrievers, retrieverPrompts, } ); const { text: result } = await multiRetrievalQAChain.invoke({ input: "test input", }); expect(result).toContain(pickedRetriever); }); test("Test MultiRetrievalQAChain No Defaults No Retriever Prompts", async () => { const llm = new FakeLLM({}); const retrieverNames = ["retriever1", "retriever2", "retriever3"]; const retrieverDescriptions = [ "description1", "description2", "description3", ]; const retrievers = retrieverNames.map((name) => new FakeRetrievers(name)); const multiRetrievalQAChain = MultiRetrievalQAChain.fromLLMAndRetrievers( llm, { retrieverNames, retrieverDescriptions, retrievers, retrievalQAChainOpts: { returnSourceDocuments: true, }, } ); const { text: result, sourceDocuments } = await multiRetrievalQAChain.invoke({ input: "test input", }); const testDocs = ["retriever1", "retriever2", "retriever3"].map( (name) => new Document({ pageContent: `Test document <from ${name}> ${name}`, metadata: {}, }) ); expect(testDocs).toContainEqual(sourceDocuments[0]); expect(result).toEqual(`Helpful Answer ${pickedRetriever}`); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/vector_db_qa_chain.int.test.ts
import { test } from "@jest/globals"; import { OpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { Document } from "@langchain/core/documents"; import { LLMChain } from "../llm_chain.js"; import { StuffDocumentsChain } from "../combine_docs_chain.js"; import { VectorDBQAChain } from "../vector_db_qa.js"; import { MemoryVectorStore } from "../../vectorstores/memory.js"; test("Test VectorDBQAChain", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const prompt = new PromptTemplate({ template: "Print {foo}", inputVariables: ["foo"], }); const vectorStore = await MemoryVectorStore.fromTexts( ["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const llmChain = new LLMChain({ prompt, llm: model }); const combineDocsChain = new StuffDocumentsChain({ llmChain, documentVariableName: "foo", }); const chain = new VectorDBQAChain({ combineDocumentsChain: combineDocsChain, vectorstore: vectorStore, }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ query: "What up" }); // console.log({ res }); }); test("Test VectorDBQAChain from LLM", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const vectorStore = await MemoryVectorStore.fromTexts( ["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const chain = VectorDBQAChain.fromLLM(model, vectorStore); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ query: "What up" }); // console.log({ res }); }); test("Test VectorDBQAChain from LLM with a filter function", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const vectorStore = await MemoryVectorStore.fromTexts( ["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const chain = VectorDBQAChain.fromLLM(model, vectorStore, { returnSourceDocuments: true, }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ query: "What up", filter: (document: Document) => document.metadata.id === 3, }); // console.log({ res, sourceDocuments: res.sourceDocuments }); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/conversational_retrieval_chain.int.test.ts
import { expect, test } from "@jest/globals"; import { OpenAI, OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { ConversationalRetrievalQAChain } from "../conversational_retrieval_chain.js"; import { MemoryVectorStore } from "../../vectorstores/memory.js"; import { BufferMemory } from "../../memory/buffer_memory.js"; test("Test ConversationalRetrievalQAChain from LLM", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const vectorStore = await MemoryVectorStore.fromTexts( ["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const chain = ConversationalRetrievalQAChain.fromLLM( model, vectorStore.asRetriever() ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ question: "foo", chat_history: "bar" }); // console.log({ res }); }); test("Test ConversationalRetrievalQAChain from LLM with flag option to return source", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const vectorStore = await MemoryVectorStore.fromTexts( ["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const chain = ConversationalRetrievalQAChain.fromLLM( model, vectorStore.asRetriever(), { returnSourceDocuments: true, } ); const res = await chain.call({ question: "foo", chat_history: "bar" }); expect(res).toEqual( expect.objectContaining({ text: expect.any(String), sourceDocuments: expect.arrayContaining([ expect.objectContaining({ metadata: expect.objectContaining({ id: expect.any(Number), }), pageContent: expect.any(String), }), ]), }) ); }); test("Test ConversationalRetrievalQAChain from LLM with flag option to return source and memory set", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const vectorStore = await MemoryVectorStore.fromTexts( ["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const chain = ConversationalRetrievalQAChain.fromLLM( model, vectorStore.asRetriever(), { returnSourceDocuments: true, memory: new BufferMemory({ memoryKey: "chat_history", inputKey: "question", outputKey: "text", }), } ); const res = await chain.call({ question: "foo", chat_history: "bar" }); expect(res).toEqual( expect.objectContaining({ text: expect.any(String), sourceDocuments: expect.arrayContaining([ expect.objectContaining({ metadata: expect.objectContaining({ id: expect.any(Number), }), pageContent: expect.any(String), }), ]), }) ); }); test("Test ConversationalRetrievalQAChain from LLM with override default prompts", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct", temperature: 0, }); const vectorStore = await MemoryVectorStore.fromTexts( ["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say "Sorry I dont know, I am learning from Aliens", don't try to make up an answer. {context} Question: {question} Helpful Answer:`; const chain = ConversationalRetrievalQAChain.fromLLM( model, vectorStore.asRetriever(), { qaTemplate: qa_template, } ); const res = await chain.call({ question: "What is better programming Language Python or Javascript ", chat_history: "bar", }); expect(res.text).toContain("I am learning from Aliens"); // console.log({ res }); }); test("Test ConversationalRetrievalQAChain from LLM with a chat model", async () => { const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo", temperature: 0, }); const vectorStore = await MemoryVectorStore.fromTexts( ["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say "Sorry I dont know, I am learning from Aliens", don't try to make up an answer. {context} Question: {question} Helpful Answer:`; const chain = ConversationalRetrievalQAChain.fromLLM( model, vectorStore.asRetriever(), { qaChainOptions: { type: "stuff", prompt: PromptTemplate.fromTemplate(qa_template), }, } ); const res = await chain.call({ question: "What is better programming Language Python or Javascript ", chat_history: "bar", }); expect(res.text).toContain("I am learning from Aliens"); // console.log({ res }); }); test("Test ConversationalRetrievalQAChain from LLM with a map reduce chain", async () => { const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo", temperature: 0, }); const vectorStore = await MemoryVectorStore.fromTexts( ["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const chain = ConversationalRetrievalQAChain.fromLLM( model, vectorStore.asRetriever(), { qaChainOptions: { type: "map_reduce", }, } ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ question: "What is better programming Language Python or Javascript ", chat_history: "bar", }); // console.log({ res }); }); test("Test ConversationalRetrievalQAChain from LLM without memory", async () => { const model = new OpenAI({ temperature: 0, }); const vectorStore = await MemoryVectorStore.fromTexts( [ "Mitochondria are the powerhouse of the cell", "Foo is red", "Bar is red", "Buildings are made out of brick", "Mitochondria are made of lipids", ], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const chain = ConversationalRetrievalQAChain.fromLLM( model, vectorStore.asRetriever() ); const question = "What is the powerhouse of the cell?"; const res = await chain.call({ question, chat_history: "", }); // console.log({ res }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res2 = await chain.call({ question: "What are they made out of?", chat_history: question + res.text, }); // console.log({ res2 }); }); test("Test ConversationalRetrievalQAChain from LLM with a chat model without memory", async () => { const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo", temperature: 0, }); const vectorStore = await MemoryVectorStore.fromTexts( [ "Mitochondria are the powerhouse of the cell", "Foo is red", "Bar is red", "Buildings are made out of brick", "Mitochondria are made of lipids", ], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const chain = ConversationalRetrievalQAChain.fromLLM( model, vectorStore.asRetriever() ); const question = "What is the powerhouse of the cell?"; const res = await chain.call({ question, chat_history: "", }); // console.log({ res }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res2 = await chain.call({ question: "What are they made out of?", chat_history: question + res.text, }); // console.log({ res2 }); }); test("Test ConversationalRetrievalQAChain from LLM with memory", async () => { const model = new OpenAI({ temperature: 0, }); const vectorStore = await MemoryVectorStore.fromTexts( [ "Mitochondria are the powerhouse of the cell", "Foo is red", "Bar is red", "Buildings are made out of brick", "Mitochondria are made of lipids", ], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const chain = ConversationalRetrievalQAChain.fromLLM( model, vectorStore.asRetriever(), { memory: new BufferMemory({ memoryKey: "chat_history", }), } ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ question: "What is the powerhouse of the cell?", }); // console.log({ res }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res2 = await chain.call({ question: "What are they made out of?", }); // console.log({ res2 }); }); test("Test ConversationalRetrievalQAChain from LLM with a chat model and memory", async () => { const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo", temperature: 0, }); const vectorStore = await MemoryVectorStore.fromTexts( [ "Mitochondria are the powerhouse of the cell", "Foo is red", "Bar is red", "Buildings are made out of brick", "Mitochondria are made of lipids", ], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const chain = ConversationalRetrievalQAChain.fromLLM( model, vectorStore.asRetriever(), { memory: new BufferMemory({ memoryKey: "chat_history", returnMessages: true, }), } ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ question: "What is the powerhouse of the cell?", }); // console.log({ res }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res2 = await chain.call({ question: "What are they made out of?", }); // console.log({ res2 }); }); test("Test ConversationalRetrievalQAChain from LLM with deprecated history syntax", async () => { const model = new OpenAI({ temperature: 0, }); const vectorStore = await MemoryVectorStore.fromTexts( [ "Mitochondria are the powerhouse of the cell", "Foo is red", "Bar is red", "Buildings are made out of brick", "Mitochondria are made of lipids", ], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const chain = ConversationalRetrievalQAChain.fromLLM( model, vectorStore.asRetriever() ); const question = "What is the powerhouse of the cell?"; const res = await chain.call({ question, chat_history: [], }); // console.log({ res }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res2 = await chain.call({ question: "What are they made out of?", chat_history: [[question, res.text]], }); // console.log({ res2 }); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/combine_docs_chain.test.ts
import { test, expect } from "@jest/globals"; import { Document } from "@langchain/core/documents"; import { BaseLLM } from "@langchain/core/language_models/llms"; import { LLMResult } from "@langchain/core/outputs"; import { loadQAMapReduceChain } from "../question_answering/load.js"; import { loadSummarizationChain } from "../index.js"; class FakeLLM extends BaseLLM { nrMapCalls = 0; nrReduceCalls = 0; _llmType(): string { return "fake"; } async _generate(prompts: string[]): Promise<LLMResult> { return { generations: prompts.map((prompt) => { let completion = ""; if (prompt.startsWith("Use the following portion")) { this.nrMapCalls += 1; completion = "a portion of context"; } else if (prompt.startsWith("Given the following extracted")) { this.nrReduceCalls += 1; completion = "a final answer"; } return [ { text: completion, score: 0, }, ]; }), }; } } test("Test MapReduceDocumentsChain", async () => { const model = new FakeLLM({}); const chain = loadQAMapReduceChain(model); const docs = [ new Document({ pageContent: "harrison went to harvard" }), new Document({ pageContent: "ankush went to princeton" }), ]; const res = await chain.invoke({ input_documents: docs, question: "Where did harrison go to college", }); // console.log({ res }); expect(res).toEqual({ text: "a final answer", }); expect(model.nrMapCalls).toBe(0); // below maxTokens expect(model.nrReduceCalls).toBe(1); }); test("Test MapReduceDocumentsChain with content above maxTokens and intermediate steps", async () => { const model = new FakeLLM({}); const chain = loadQAMapReduceChain(model, { returnIntermediateSteps: true, }); const aString = "a".repeat(4000); const bString = "b".repeat(4000); const docs = [ new Document({ pageContent: aString }), new Document({ pageContent: bString }), ]; const res = await chain.invoke({ input_documents: docs, question: "Is the letter c present in the document", }); // console.log({ res }); expect(res).toEqual({ text: "a final answer", intermediateSteps: ["a portion of context", "a portion of context"], }); expect(model.nrMapCalls).toBe(2); // above maxTokens expect(model.nrReduceCalls).toBe(1); }); test("Test RefineDocumentsChain", async () => { const model = new FakeLLM({}); const chain = loadSummarizationChain(model, { type: "refine" }); const docs = [ new Document({ pageContent: "harrison went to harvard" }), new Document({ pageContent: "ankush went to princeton" }), ]; expect(chain.inputKeys).toEqual(["input_documents"]); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.run(docs); // console.log({ res }); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/llm_chain.int.test.ts
import { test } from "@jest/globals"; import { OpenAI, ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate, } from "@langchain/core/prompts"; import { LLMChain } from "../llm_chain.js"; import { BufferMemory } from "../../memory/buffer_memory.js"; test("Test OpenAI", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const prompt = new PromptTemplate({ template: "Print {foo}", inputVariables: ["foo"], }); const chain = new LLMChain({ prompt, llm: model }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ foo: "my favorite color" }); // console.log({ res }); }); test("Test OpenAI with timeout", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const prompt = new PromptTemplate({ template: "Print {foo}", inputVariables: ["foo"], }); const chain = new LLMChain({ prompt, llm: model }); await expect(() => chain.call({ foo: "my favorite color", timeout: 10, }) ).rejects.toThrow(); }); test("Test run method", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const prompt = new PromptTemplate({ template: "Print {foo}", inputVariables: ["foo"], }); const chain = new LLMChain({ prompt, llm: model }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.run("my favorite color"); // console.log({ res }); }); test("Test run method", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const prompt = new PromptTemplate({ template: "{history} Print {foo}", inputVariables: ["foo", "history"], }); const chain = new LLMChain({ prompt, llm: model, memory: new BufferMemory(), }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.run("my favorite color"); // console.log({ res }); }); test("Test memory + cancellation", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const prompt = new PromptTemplate({ template: "{history} Print {foo}", inputVariables: ["foo", "history"], }); const chain = new LLMChain({ prompt, llm: model, memory: new BufferMemory(), }); await expect(() => chain.call({ foo: "my favorite color", signal: AbortSignal.timeout(20), }) ).rejects.toThrow(); }); test("Test memory + timeout", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const prompt = new PromptTemplate({ template: "{history} Print {foo}", inputVariables: ["foo", "history"], }); const chain = new LLMChain({ prompt, llm: model, memory: new BufferMemory(), }); await expect(() => chain.call({ foo: "my favorite color", timeout: 20, }) ).rejects.toThrow(); }); test("Test apply", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const prompt = new PromptTemplate({ template: "Print {foo}", inputVariables: ["foo"], }); const chain = new LLMChain({ prompt, llm: model }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.apply([{ foo: "my favorite color" }]); // console.log({ res }); }); test("Test LLMChain with ChatOpenAI", async () => { const model = new ChatOpenAI({ temperature: 0.9 }); const template = "What is a good name for a company that makes {product}?"; const prompt = new PromptTemplate({ template, inputVariables: ["product"] }); const humanMessagePrompt = new HumanMessagePromptTemplate(prompt); const chatPromptTemplate = ChatPromptTemplate.fromMessages([ humanMessagePrompt, ]); const chatChain = new LLMChain({ llm: model, prompt: chatPromptTemplate }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chatChain.call({ product: "colorful socks" }); // console.log({ res }); }); test("Test passing a runnable to an LLMChain", async () => { const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo-1106" }); const runnableModel = model.bind({ response_format: { type: "json_object", }, }); const prompt = PromptTemplate.fromTemplate( "You are a bee --I mean a spelling bee. Respond with a JSON key of 'spelling':\nQuestion:{input}" ); const chain = new LLMChain({ llm: runnableModel, prompt }); const response = await chain.invoke({ input: "How do you spell today?" }); expect(JSON.parse(response.text)).toMatchObject({ spelling: expect.any(String), }); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/simple_sequential_chain.test.ts
import { test, expect } from "@jest/globals"; import { BaseLLM } from "@langchain/core/language_models/llms"; import { LLMResult } from "@langchain/core/outputs"; import { PromptTemplate } from "@langchain/core/prompts"; import { VectorStoreRetriever } from "@langchain/core/vectorstores"; import { FakeEmbeddings } from "@langchain/core/utils/testing"; import { LLMChain } from "../llm_chain.js"; import { SimpleSequentialChain } from "../sequential_chain.js"; import { AnalyzeDocumentChain } from "../analyze_documents_chain.js"; import { ConversationalRetrievalQAChain } from "../conversational_retrieval_chain.js"; import { MemoryVectorStore } from "../../vectorstores/memory.js"; class FakeLLM1 extends BaseLLM { nrMapCalls = 0; nrReduceCalls = 0; _llmType(): string { return "fake_1"; } async _generate(_prompts: string[]): Promise<LLMResult> { return { generations: [ [ { text: "The answer is XXX.", }, ], ], }; } } class FakeLLM2 extends BaseLLM { nrMapCalls = 0; nrReduceCalls = 0; _llmType(): string { return "fake_2"; } async _generate(prompts: string[]): Promise<LLMResult> { let response = "I don't know what you are talking about."; if (prompts[0].includes("XXX")) { response = "final answer"; } return { generations: [ [ { text: response, }, ], ], }; } } test("Test SimpleSequentialChain", async () => { const model1 = new FakeLLM1({}); const model2 = new FakeLLM2({}); const template = "Some arbitrary template with fake {input}."; const prompt = new PromptTemplate({ template, inputVariables: ["input"] }); const chain1 = new LLMChain({ llm: model1, prompt }); const chain2 = new LLMChain({ llm: model2, prompt }); const combinedChain = new SimpleSequentialChain({ chains: [chain1, chain2] }); const response = await combinedChain.run("initial question"); expect(response).toEqual("final answer"); }); test("Test SimpleSequentialChain input chains' single input validation", async () => { const model1 = new FakeLLM1({}); const model2 = new FakeLLM2({}); const template = "Some arbitrary template with fake {input1} and {input2}."; const prompt = new PromptTemplate({ template, inputVariables: ["input1", "input2"], }); const chain1 = new LLMChain({ llm: model1, prompt }); const chain2 = new LLMChain({ llm: model2, prompt }); expect(() => { /* eslint-disable no-new */ new SimpleSequentialChain({ chains: [chain1, chain2] }); }).toThrowErrorMatchingInlineSnapshot( `"Chains used in SimpleSequentialChain should all have one input, got 2 for llm."` ); }); test("Test SimpleSequentialChain input chains' single ouput validation", async () => { const model1 = new FakeLLM1({}); const fakeEmbeddings = new FakeEmbeddings(); const anyStore = new MemoryVectorStore(fakeEmbeddings); const retriever = new VectorStoreRetriever({ vectorStore: anyStore, }); const template = "Some arbitrary template with fake {input}."; const prompt = new PromptTemplate({ template, inputVariables: ["input"] }); const chain1 = new LLMChain({ llm: model1, prompt }); const chain2 = new ConversationalRetrievalQAChain({ retriever, combineDocumentsChain: chain1, questionGeneratorChain: chain1, returnSourceDocuments: true, }); // Chain below is is not meant to work in a real-life scenario. // It's only combined this way to get one input/multiple outputs chain. const multipleOutputChain = new AnalyzeDocumentChain({ combineDocumentsChain: chain2, }); expect(() => { /* eslint-disable no-new */ new SimpleSequentialChain({ chains: [chain1, multipleOutputChain] }); }).toThrowErrorMatchingInlineSnapshot( `"Chains used in SimpleSequentialChain should all have one output, got 2 for analyze_document_chain."` ); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/simple_sequential_chain.int.test.ts
import { test } from "@jest/globals"; import { OpenAI, ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { LLMChain } from "../llm_chain.js"; import { SimpleSequentialChain } from "../sequential_chain.js"; import { BufferMemory } from "../../memory/buffer_memory.js"; test("Test SimpleSequentialChain example usage", async () => { // This is an LLMChain to write a synopsis given a title of a play. const llm = new OpenAI({ temperature: 0 }); const template = `You are a playwright. Given the title of play, it is your job to write a synopsis for that title. Title: {title} Playwright: This is a synopsis for the above play:`; const promptTemplate = new PromptTemplate({ template, inputVariables: ["title"], }); const synopsisChain = new LLMChain({ llm, prompt: promptTemplate }); // This is an LLMChain to write a review of a play given a synopsis. const reviewLLM = new OpenAI({ temperature: 0 }); const reviewTemplate = `You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play. Play Synopsis: {synopsis} Review from a New York Times play critic of the above play:`; const reviewPromptTemplate = new PromptTemplate({ template: reviewTemplate, inputVariables: ["synopsis"], }); const reviewChain = new LLMChain({ llm: reviewLLM, prompt: reviewPromptTemplate, }); const overallChain = new SimpleSequentialChain({ chains: [synopsisChain, reviewChain], verbose: true, }); const review = await overallChain.run("Tragedy at sunset on the beach"); expect(review.trim().toLowerCase()).toContain( "tragedy at sunset on the beach" ); }); test("Test SimpleSequentialChain example usage", async () => { // This is an LLMChain to write a synopsis given a title of a play. const llm = new ChatOpenAI({ temperature: 0 }); const template = `You are a playwright. Given the title of play, it is your job to write a synopsis for that title. {history} Title: {title} Playwright: This is a synopsis for the above play:`; const promptTemplate = new PromptTemplate({ template, inputVariables: ["title", "history"], }); const synopsisChain = new LLMChain({ llm, prompt: promptTemplate, memory: new BufferMemory(), }); // This is an LLMChain to write a review of a play given a synopsis. const reviewLLM = new ChatOpenAI({ temperature: 0 }); const reviewTemplate = `You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play. Play Synopsis: {synopsis} Review from a New York Times play critic of the above play:`; const reviewPromptTemplate = new PromptTemplate({ template: reviewTemplate, inputVariables: ["synopsis"], }); const reviewChain = new LLMChain({ llm: reviewLLM, prompt: reviewPromptTemplate, }); const overallChain = new SimpleSequentialChain({ chains: [synopsisChain, reviewChain], verbose: true, }); await expect(() => overallChain.call({ input: "Tragedy at sunset on the beach", signal: AbortSignal.timeout(1000), }) ).rejects.toThrow("AbortError"); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/conversation_chain.int.test.ts
import { test } from "@jest/globals"; import { OpenAI } from "@langchain/openai"; import { ConversationChain } from "../conversation.js"; test("Test ConversationChain", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const chain = new ConversationChain({ llm: model }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ input: "my favorite color" }); // console.log({ res }); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/sql_db_chain.int.test.ts
import { expect, test } from "@jest/globals"; import { DataSource } from "typeorm"; import { OpenAI } from "@langchain/openai"; import { SqlDatabaseChain } from "../sql_db/sql_db_chain.js"; import { SqlDatabase } from "../../sql_db.js"; import { SQL_SQLITE_PROMPT } from "../sql_db/sql_db_prompt.js"; test("Test SqlDatabaseChain", async () => { const datasource = new DataSource({ type: "sqlite", database: ":memory:", synchronize: true, }); await datasource.initialize(); await datasource.query(` CREATE TABLE users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, age INTEGER); `); await datasource.query(` INSERT INTO users (name, age) VALUES ('Alice', 20); `); await datasource.query(` INSERT INTO users (name, age) VALUES ('Bob', 21); `); await datasource.query(` INSERT INTO users (name, age) VALUES ('Charlie', 22); `); const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, }); const chain = new SqlDatabaseChain({ llm: new OpenAI({ temperature: 0 }), database: db, }); expect(chain.prompt).toBe(SQL_SQLITE_PROMPT); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const run = await chain.run("How many users are there?"); // console.log(run); await datasource.destroy(); }); test("Test SqlDatabaseChain with sqlOutputKey", async () => { const datasource = new DataSource({ type: "sqlite", database: ":memory:", synchronize: true, }); await datasource.initialize(); await datasource.query(` CREATE TABLE users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, age INTEGER); `); await datasource.query(` INSERT INTO users (name, age) VALUES ('Alice', 20); `); await datasource.query(` INSERT INTO users (name, age) VALUES ('Bob', 21); `); await datasource.query(` INSERT INTO users (name, age) VALUES ('Charlie', 22); `); const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, }); const chain = new SqlDatabaseChain({ llm: new OpenAI({ temperature: 0 }), database: db, inputKey: "query", sqlOutputKey: "sql", }); expect(chain.prompt).toBe(SQL_SQLITE_PROMPT); const run = await chain.call({ query: "How many users are there?" }); // console.log(run); expect(run).toHaveProperty("sql"); await datasource.destroy(); }); // We create this string to reach the token limit of the query built to describe the database and get the SQL query. const veryLongString = `Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aliquam orci nisi, vulputate ac pulvinar eu, maximus a tortor. Duis suscipit, nibh vel fermentum vehicula, mauris ante convallis metus, et feugiat turpis mauris non felis. Interdum et malesuada fames ac ante ipsum primis in faucibus. Maecenas efficitur nibh in nisi sagittis ultrices. Donec id velit nunc. Nam a lacus risus. Vestibulum molestie massa eget convallis pellentesque. Mauris a nisl eget velit finibus blandit ac a odio. Sed sagittis consequat urna a egestas. Curabitur pretium convallis nibh, in ullamcorper odio tempus nec. Curabitur laoreet nec nisl sed accumsan. Sed elementum eleifend molestie. Aenean ullamcorper interdum risus, eget pharetra est volutpat ut. Aenean maximus consequat justo rutrum finibus. Mauris consequat facilisis consectetur. Vivamus rutrum dignissim libero, non aliquam lectus tempus id. In hac habitasse platea dictumst. Sed at magna dignissim, tincidunt lectus in, malesuada risus. Phasellus placerat blandit ligula. Integer posuere id elit at commodo. Sed consequat sagittis odio eget congue. Aliquam ultricies, sapien a porta luctus, dolor nibh dignissim erat, dictum luctus orci lorem non quam. Quisque dapibus tempus mattis. Suspendisse gravida consequat mi at viverra. Quisque sed est purus. Fusce tincidunt semper massa eu blandit. Donec in lacus a tortor facilisis facilisis. Interdum et malesuada fames ac ante ipsum primis in faucibus. In aliquam dignissim eros ac consectetur. Aliquam fringilla magna erat. Nullam tincidunt maximus nulla, quis gravida est varius vel. Aliquam cursus, diam non facilisis mollis, nunc diam convallis enim, ac tempus diam tortor in dui. Nunc feugiat ligula odio, eleifend fermentum quam tincidunt sed. Duis pellentesque quam eget volutpat commodo. Aliquam ex velit, porta sit amet augue vulputate, rhoncus fermentum magna. Integer non elementum augue. Phasellus rhoncus nisl nec magna lacinia vulputate. Suspendisse diam nibh, egestas a porta a, pellentesque ut nisl. Donec tempus ligula at leo convallis consequat. Duis sapien lorem, lobortis ac nisl dapibus, bibendum mollis lorem. Sed congue porttitor ex, eget scelerisque ligula consectetur quis. Mauris felis mauris, sodales quis nunc non, condimentum eleifend quam. Ut vitae viverra lorem. Vivamus lacinia et dolor vitae cursus. Proin faucibus venenatis enim vitae tincidunt. Sed sed venenatis leo. Donec eu erat ullamcorper, consectetur dui sed, cursus tellus. Phasellus consectetur felis augue, quis auctor odio semper ac. In scelerisque gravida ligula eget lobortis. Sed tristique ultricies fringilla. Nunc in ultrices purus. Curabitur dictum cursus ante, at tempus est interdum at. Donec gravida lectus ut purus vestibulum, eu accumsan nisl pharetra. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aliquam orci nisi, vulputate ac pulvinar eu, maximus a tortor. Duis suscipit, nibh vel fermentum vehicula, mauris ante convallis metus, et feugiat turpis mauris non felis. Interdum et malesuada fames ac ante ipsum primis in faucibus. Maecenas efficitur nibh in nisi sagittis ultrices. Donec id velit nunc. Nam a lacus risus. Vestibulum molestie massa eget convallis pellentesque. Mauris a nisl eget velit finibus blandit ac a odio. Sed sagittis consequat urna a egestas. Curabitur pretium convallis nibh, in ullamcorper odio tempus nec. Curabitur laoreet nec nisl sed accumsan. Sed elementum eleifend molestie. Aenean ullamcorper interdum risus, eget pharetra est volutpat ut. Aenean maximus consequat justo rutrum finibus. Mauris consequat facilisis consectetur. Vivamus rutrum dignissim libero, non aliquam lectus tempus id. In hac habitasse platea dictumst. Sed at magna dignissim, tincidunt lectus in, malesuada risus. Phasellus placerat blandit ligula. Integer posuere id elit at commodo. Sed consequat sagittis odio eget congue. Aliquam ultricies, sapien a porta luctus, dolor nibh dignissim erat, dictum luctus orci lorem non quam. Quisque dapibus tempus mattis. Suspendisse gravida consequat mi at viverra. Quisque sed est purus. Fusce tincidunt semper massa eu blandit. Donec in lacus a tortor facilisis facilisis. Interdum et malesuada fames ac ante ipsum primis in faucibus. In aliquam dignissim eros ac consectetur. Aliquam fringilla magna erat. Nullam tincidunt maximus nulla, quis gravida est varius vel. Aliquam cursus, diam non facilisis mollis, nunc diam convallis enim, ac tempus diam tortor in dui. Nunc feugiat ligula odio, eleifend fermentum quam tincidunt sed. Duis pellentesque quam eget volutpat commodo. Aliquam ex velit, porta sit amet augue vulputate, rhoncus fermentum magna. Integer non elementum augue. Phasellus rhoncus nisl nec magna lacinia vulputate. Suspendisse diam nibh, egestas a porta a, pellentesque ut nisl. Donec tempus ligula at leo convallis consequat. Duis sapien lorem, lobortis ac nisl dapibus, bibendum mollis lorem. Sed congue porttitor ex, eget scelerisque ligula consectetur quis. Mauris felis mauris, sodales quis nunc non, condimentum eleifend quam. Ut vitae viverra lorem. Vivamus lacinia et dolor vitae cursus. Proin faucibus venenatis enim vitae tincidunt. Sed sed venenatis leo. Donec eu erat ullamcorper, consectetur dui sed, cursus tellus. Phasellus consectetur felis augue, quis auctor odio semper ac. In scelerisque gravida ligula eget lobortis. Sed tristique ultricies fringilla. Nunc in ultrices purus. Curabitur dictum cursus ante, at tempus est interdum at. Donec gravida lectus ut purus vestibulum, eu accumsan nisl pharetra. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aliquam orci nisi, vulputate ac pulvinar eu, maximus a tortor. Duis suscipit, nibh vel fermentum vehicula, mauris ante convallis metus, et feugiat turpis mauris non felis. Interdum et malesuada fames ac ante ipsum primis in faucibus. Maecenas efficitur nibh in nisi sagittis ultrices. Donec id velit nunc. Nam a lacus risus. Vestibulum molestie massa eget convallis pellentesque. Mauris a nisl eget velit finibus blandit ac a odio. Sed sagittis consequat urna a egestas. Curabitur pretium convallis nibh, in ullamcorper odio tempus nec. Curabitur laoreet nec nisl sed accumsan. Sed elementum eleifend molestie. Aenean ullamcorper interdum risus, eget pharetra est volutpat ut. Aenean maximus consequat justo rutrum finibus. Mauris consequat facilisis consectetur. Vivamus rutrum dignissim libero, non aliquam lectus tempus id. In hac habitasse platea dictumst. Sed at magna dignissim, tincidunt lectus in, malesuada risus. Phasellus placerat blandit ligula. Integer posuere id elit at commodo. Sed consequat sagittis odio eget congue. Aliquam ultricies, sapien a porta luctus, dolor nibh dignissim erat, dictum luctus orci lorem non quam. Quisque dapibus tempus mattis. Suspendisse gravida consequat mi at viverra. Quisque sed est purus. Fusce tincidunt semper massa eu blandit. Donec in lacus a tortor facilisis facilisis. Interdum et malesuada fames ac ante ipsum primis in faucibus. In aliquam dignissim eros ac consectetur. Aliquam fringilla magna erat. Nullam tincidunt maximus nulla, quis gravida est varius vel. Aliquam cursus, diam non facilisis mollis, nunc diam convallis enim, ac tempus diam tortor in dui. Nunc feugiat ligula odio, eleifend fermentum quam tincidunt sed. Duis pellentesque quam eget volutpat commodo. Aliquam ex velit, porta sit amet augue vulputate, rhoncus fermentum magna. Integer non elementum augue. Phasellus rhoncus nisl nec magna lacinia vulputate. Suspendisse diam nibh, egestas a porta a, pellentesque ut nisl. Donec tempus ligula at leo convallis consequat. Duis sapien lorem, lobortis ac nisl dapibus, bibendum mollis lorem. Sed congue porttitor ex, eget scelerisque ligula consectetur quis. Mauris felis mauris, sodales quis nunc non, condimentum eleifend quam. Ut vitae viverra lorem. Vivamus lacinia et dolor vitae cursus. Proin faucibus venenatis enim vitae tincidunt. Sed sed venenatis leo. Donec eu erat ullamcorper, consectetur dui sed, cursus tellus. Phasellus consectetur felis augue, quis auctor odio semper ac. In scelerisque gravida ligula eget lobortis. Sed tristique ultricies fringilla. Nunc in ultrices purus. Curabitur dictum cursus ante, at tempus est interdum at. Donec gravida lectus ut purus vestibulum, eu accumsan nisl pharetra. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aliquam orci nisi, vulputate ac pulvinar eu, maximus a tortor. Duis suscipit, nibh vel fermentum vehicula, mauris ante convallis metus, et feugiat turpis mauris non felis. Interdum et malesuada fames ac ante ipsum primis in faucibus. Maecenas efficitur nibh in nisi sagittis ultrices. Donec id velit nunc. Nam a lacus risus. Vestibulum molestie massa eget convallis pellentesque. Mauris a nisl eget velit finibus blandit ac a odio. Sed sagittis consequat urna a egestas. Curabitur pretium convallis nibh, in ullamcorper odio tempus nec. Curabitur laoreet nec nisl sed accumsan. Sed elementum eleifend molestie. Aenean ullamcorper interdum risus, eget pharetra est volutpat ut. Aenean maximus consequat justo rutrum finibus. Mauris consequat facilisis consectetur. Vivamus rutrum dignissim libero, non aliquam lectus tempus id. In hac habitasse platea dictumst. Sed at magna dignissim, tincidunt lectus in, malesuada risus. Phasellus placerat blandit ligula. Integer posuere id elit at commodo. Sed consequat sagittis odio eget congue. Aliquam ultricies, sapien a porta luctus, dolor nibh dignissim erat, dictum luctus orci lorem non quam. Quisque dapibus tempus mattis. Suspendisse gravida consequat mi at viverra. Quisque sed est purus. Fusce tincidunt semper massa eu blandit. Donec in lacus a tortor facilisis facilisis. Interdum et malesuada fames ac ante ipsum primis in faucibus. In aliquam dignissim eros ac consectetur. Aliquam fringilla magna erat. Nullam tincidunt maximus nulla, quis gravida est varius vel. Aliquam cursus, diam non facilisis mollis, nunc diam convallis enim, ac tempus diam tortor in dui. Nunc feugiat ligula odio, eleifend fermentum quam tincidunt sed. Duis pellentesque quam eget volutpat commodo. Aliquam ex velit, porta sit amet augue vulputate, rhoncus fermentum magna. Integer non elementum augue. Phasellus rhoncus nisl nec magna lacinia vulputate. Suspendisse diam nibh, egestas a porta a, pellentesque ut nisl. Donec tempus ligula at leo convallis consequat. Duis sapien lorem, lobortis ac nisl dapibus, bibendum mollis lorem. Sed congue porttitor ex, eget scelerisque ligula consectetur quis. Mauris felis mauris, sodales quis nunc non, condimentum eleifend quam. Ut vitae viverra lorem. Vivamus lacinia et dolor vitae cursus. Proin faucibus venenatis enim vitae tincidunt. Sed sed venenatis leo. `; test.skip("Test token limit SqlDatabaseChain", async () => { const datasource = new DataSource({ type: "sqlite", database: ":memory:", synchronize: true, }); await datasource.initialize(); await datasource.query(` CREATE TABLE users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, age INTEGER); `); await datasource.query(` INSERT INTO users (name, age) VALUES ('Alice', 20); `); await datasource.query(` INSERT INTO users (name, age) VALUES ('Bob', 21); `); // eslint-disable-next-line @typescript-eslint/no-use-before-define await datasource.query(` INSERT INTO users (name, age) VALUES ('${veryLongString}', 22); `); const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, }); const chain = new SqlDatabaseChain({ llm: new OpenAI({ temperature: 0 }), database: db, }); const runChain = async () => { await chain.run("How many users are there?"); }; await expect(runChain()).rejects.toThrow(); await datasource.destroy(); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/api_chain.int.test.ts
import { test } from "@jest/globals"; import { OpenAI } from "@langchain/openai"; import { LLMChain } from "../llm_chain.js"; import { APIChain, APIChainInput } from "../api/api_chain.js"; import { API_URL_PROMPT_TEMPLATE, API_RESPONSE_PROMPT_TEMPLATE, } from "../api/prompts.js"; import { OPEN_METEO_DOCS } from "./example_data/open_meteo_docs.js"; const test_api_docs = ` This API endpoint will search the notes for a user. Endpoint: https://httpbin.org GET /get Query parameters: q | string | The search term for notes `; const testApiData = { api_docs: test_api_docs, question: "Search for notes containing langchain", api_url: "https://httpbin.com/api/notes?q=langchain", api_response: JSON.stringify({ success: true, results: [{ id: 1, content: "Langchain is awesome!" }], }), api_summary: "There is 1 note about langchain.", }; test("Test APIChain", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const apiRequestChain = new LLMChain({ prompt: API_URL_PROMPT_TEMPLATE, llm: model, }); const apiAnswerChain = new LLMChain({ prompt: API_RESPONSE_PROMPT_TEMPLATE, llm: model, }); const apiChainInput: APIChainInput = { apiAnswerChain, apiRequestChain, apiDocs: testApiData.api_docs, }; const chain = new APIChain(apiChainInput); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ question: "Search for notes containing langchain", }); // console.log({ res }); }); test("Test APIChain fromLLMAndApiDocs", async () => { // This test doesn't work as well with earlier models const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const chain = APIChain.fromLLMAndAPIDocs(model, OPEN_METEO_DOCS); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ question: "What is the weather like right now in Munich, Germany in degrees Farenheit?", }); // console.log({ res }); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/sequential_chain.int.test.ts
import { test } from "@jest/globals"; import { OpenAI, ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { LLMChain } from "../llm_chain.js"; import { SequentialChain } from "../sequential_chain.js"; test("Test SequentialChain example usage", async () => { // This is an LLMChain to write a synopsis given a title of a play and the era it is set in. const llm = new OpenAI({ temperature: 0 }); const template = `You are a playwright. Given the title of play and the era it is set in, it is your job to write a synopsis for that title. Title: {title} Era: {era} Playwright: This is a synopsis for the above play:`; const promptTemplate = new PromptTemplate({ template, inputVariables: ["title", "era"], }); const synopsisChain = new LLMChain({ llm, prompt: promptTemplate, outputKey: "synopsis", }); // This is an LLMChain to write a review of a play given a synopsis. const reviewLLM = new OpenAI({ temperature: 0 }); const reviewTemplate = `You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play. Play Synopsis: {synopsis} Review from a New York Times play critic of the above play:`; const reviewPromptTemplate = new PromptTemplate({ template: reviewTemplate, inputVariables: ["synopsis"], }); const reviewChain = new LLMChain({ llm: reviewLLM, prompt: reviewPromptTemplate, outputKey: "review", }); const overallChain = new SequentialChain({ chains: [synopsisChain, reviewChain], inputVariables: ["era", "title"], // Here we return multiple variables outputVariables: ["synopsis", "review"], verbose: true, }); const review = await overallChain.call({ title: "Tragedy at sunset on the beach", era: "Victorian England", }); expect(review.review.toLowerCase()).toContain( "tragedy at sunset on the beach" ); }); test.skip("Test SequentialChain serialize/deserialize", async () => { const llm1 = new ChatOpenAI(); const template1 = `Echo back "{foo} {bar}"`; const promptTemplate1 = new PromptTemplate({ template: template1, inputVariables: ["foo", "bar"], }); const chain1 = new LLMChain({ llm: llm1, prompt: promptTemplate1, outputKey: "baz", }); const llm2 = new ChatOpenAI(); const template2 = `Echo back "{baz}"`; const promptTemplate2 = new PromptTemplate({ template: template2, inputVariables: ["baz"], }); const chain2 = new LLMChain({ llm: llm2, prompt: promptTemplate2, }); const sampleSequentialChain = new SequentialChain({ chains: [chain1, chain2], inputVariables: ["foo", "bar"], outputVariables: ["text"], verbose: true, }); const serializedChain = sampleSequentialChain.serialize(); expect(serializedChain._type).toEqual("sequential_chain"); expect(serializedChain.chains.length).toEqual(2); const deserializedChain = await SequentialChain.deserialize(serializedChain); expect(deserializedChain.chains.length).toEqual(2); expect(deserializedChain._chainType).toEqual("sequential_chain"); const review = await deserializedChain.call({ foo: "test1", bar: "test2" }); expect(review.trim()).toMatchInlineSnapshot(`"test1 test2"`); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/sequential_chain.test.ts
import { test, expect } from "@jest/globals"; import { BaseLLM } from "@langchain/core/language_models/llms"; import { PromptTemplate } from "@langchain/core/prompts"; import { AIMessage, HumanMessage } from "@langchain/core/messages"; import { LLMResult } from "@langchain/core/outputs"; import { ChatMessageHistory } from "../../stores/message/in_memory.js"; import { LLMChain } from "../llm_chain.js"; import { SequentialChain } from "../sequential_chain.js"; import { BufferMemory } from "../../memory/buffer_memory.js"; class FakeLLM1 extends BaseLLM { nrMapCalls = 0; nrReduceCalls = 0; _llmType(): string { return "fake_1"; } async _generate(_prompts: string[]): Promise<LLMResult> { return { generations: [ [ { text: "The answer is XXX.", }, ], ], }; } } class FakeLLM2 extends BaseLLM { nrMapCalls = 0; nrReduceCalls = 0; _llmType(): string { return "fake_2"; } async _generate(prompts: string[]): Promise<LLMResult> { let response = "I don't know what you are talking about."; if (prompts[0].includes("XXX")) { response = "final answer"; } return { generations: [ [ { text: response, }, ], ], }; } } class FakeLLM3 extends BaseLLM { nrMapCalls = 0; nrReduceCalls = 0; _llmType(): string { return "fake_2"; } async _generate(prompts: string[]): Promise<LLMResult> { const inputNumber = +prompts[0]; let response = "Not a number!!!"; if (prompts[0].startsWith("Final Answer: ")) { [response] = prompts; } else if (!Number.isNaN(inputNumber)) { response = (inputNumber + 1).toString(); } return { generations: [ [ { text: response, }, ], ], }; } } test("Test SequentialChain", async () => { const model1 = new FakeLLM1({}); const model2 = new FakeLLM2({}); const template1 = "Some arbitrary template with fake {input1} and {input2}."; const template2 = "Some arbitrary template with fake {input3}."; const prompt1 = new PromptTemplate({ template: template1, inputVariables: ["input1", "input2"], }); const prompt2 = new PromptTemplate({ template: template2, inputVariables: ["input3"], }); const chain1 = new LLMChain({ llm: model1, prompt: prompt1, outputKey: "input3", }); const chain2 = new LLMChain({ llm: model2, prompt: prompt2 }); const combinedChain = new SequentialChain({ chains: [chain1, chain2], inputVariables: ["input1", "input2"], outputVariables: ["text"], }); const response = await combinedChain.invoke({ input1: "test1", input2: "test2", }); expect(response).toMatchInlineSnapshot(` { "text": "final answer", } `); }); test("Test SequentialChain input/output chains' validation", () => { const model1 = new FakeLLM1({}); const template1 = "Some arbitrary template with fake {input1} and {input2}."; const prompt1 = new PromptTemplate({ template: template1, inputVariables: ["input1", "input2"], }); const chain1 = new LLMChain({ llm: model1, prompt: prompt1, outputKey: "input3", }); const model2 = new FakeLLM2({}); const template2 = "Some arbitrary template with fake {input3}."; const prompt2 = new PromptTemplate({ template: template2, inputVariables: ["input3"], }); const chain2 = new LLMChain({ llm: model2, prompt: prompt2 }); expect(() => { /* eslint-disable no-new */ new SequentialChain({ chains: [chain1, chain2], inputVariables: ["input1"], outputVariables: ["text"], }); }).toThrowErrorMatchingInlineSnapshot( `"Missing variables for chain "llm": "input2". Only got the following variables: "input1"."` ); expect(() => { /* eslint-disable no-new */ new SequentialChain({ chains: [chain1, chain2], inputVariables: ["input1", "input2"], outputVariables: ["nonexistent"], }); }).toThrowErrorMatchingInlineSnapshot( `"The following output variables were expected to be in the final chain output but were not found: "nonexistent"."` ); }); test("Test SequentialChain chains' intermediate variables validation", () => { const model1 = new FakeLLM1({}); const template1 = "Some arbitrary template with fake {input1} and {input2}."; const prompt1 = new PromptTemplate({ template: template1, inputVariables: ["input1", "input2"], }); const chain1 = new LLMChain({ llm: model1, prompt: prompt1, outputKey: "nonexistent", }); const model2 = new FakeLLM2({}); const template2 = "Some arbitrary template with fake {input3}."; const prompt2 = new PromptTemplate({ template: template2, inputVariables: ["input3"], }); const chain2 = new LLMChain({ llm: model2, prompt: prompt2 }); expect(() => { /* eslint-disable no-new */ new SequentialChain({ chains: [chain1, chain2], inputVariables: ["input1", "input2"], outputVariables: ["text"], }); }).toThrowErrorMatchingInlineSnapshot( `"Missing variables for chain "llm": "input3". Only got the following variables: "input1", "input2", "nonexistent"."` ); }); test("Test SequentialChain chains passes all outputs", async () => { const model1 = new FakeLLM3({}); const template1 = "{input1}"; const prompt1 = new PromptTemplate({ template: template1, inputVariables: ["input1"], }); const chain1 = new LLMChain({ llm: model1, prompt: prompt1, outputKey: "input2", }); const model2 = new FakeLLM3({}); const template2 = "{input2}"; const prompt2 = new PromptTemplate({ template: template2, inputVariables: ["input2"], }); const chain2 = new LLMChain({ llm: model2, prompt: prompt2, outputKey: "input3", }); const model3 = new FakeLLM3({}); const template3 = "Final Answer: {input1} {input2} {input3}."; const prompt3 = new PromptTemplate({ template: template3, inputVariables: ["input1", "input2", "input3"], }); const chain3 = new LLMChain({ llm: model3, prompt: prompt3 }); const combinedChain = new SequentialChain({ chains: [chain1, chain2, chain3], inputVariables: ["input1"], outputVariables: ["text"], }); expect( await combinedChain.invoke({ input1: "1", }) ).toMatchInlineSnapshot(` { "text": "Final Answer: 1 2 3.", } `); }); test("Test SequentialChain for memory on one of the sub-chains", async () => { const model1 = new FakeLLM1({}); const template1 = "Some arbitrary template with fake {input1}."; const prompt1 = new PromptTemplate({ template: template1, inputVariables: ["input1"], }); const chain1 = new LLMChain({ llm: model1, prompt: prompt1, outputKey: "input2", }); const memory = new BufferMemory({ memoryKey: "chat", chatHistory: new ChatMessageHistory([ new HumanMessage("Hello"), new AIMessage("Hi"), ]), inputKey: "input2", }); const model2 = new FakeLLM3({}); const template2 = "Final Answer: \n{chat}\n{input2}"; const prompt2 = new PromptTemplate({ template: template2, inputVariables: ["input2", "chat"], }); const chain2 = new LLMChain({ llm: model2, prompt: prompt2, memory, }); const combinedChain = new SequentialChain({ chains: [chain1, chain2], inputVariables: ["input1"], outputVariables: ["text"], }); const result = await combinedChain.invoke({ input1: "test1" }); expect(result).toMatchObject({ text: "Final Answer: \nHuman: Hello\nAI: Hi\nThe answer is XXX.", }); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/openai_moderation.int.test.ts
import { test } from "@jest/globals"; import { OpenAIModerationChain } from "../openai_moderation.js"; test("OpenAI Moderation Test", async () => { const badString = "I hate myself and want to do harm to myself"; const goodString = "The cat (Felis catus) is a domestic species of small carnivorous mammal."; const moderation = new OpenAIModerationChain(); const { output: badResult } = await moderation.call({ input: badString, }); const { output: goodResult } = await moderation.call({ input: goodString, }); expect(badResult).toEqual( "Text was found that violates OpenAI's content policy." ); expect(goodResult).toEqual( "The cat (Felis catus) is a domestic species of small carnivorous mammal." ); }); test("OpenAI Moderation Test in non-english language", async () => { const badString = "Vi devus mortigi vin nun! Kaj donu al ĉiuj aliaj pecon da oksigeno kaj ozono, kovru ĝin, por ke ni povu spiri ene de ĉi tiu blua veziko. Ĉar por kio vi estas ĉi tie? Venu por adori min? Mortigu vin mem. Mi volas diri, cent procento. Mil procentoj."; // Written in Esperanto. It's LTG's most famous quote and it's extremely crass, and as such it should not pass, but it does. However, we can check category_scores to see that, indeed, it detected some sort self-harm language in the text, albeit it's non-conclusive. const goodString = "La kato ( Felis catus ) estas hejma specio de malgranda karnovora mamulo."; const moderation = new OpenAIModerationChain(); const { output: badResult, results } = await moderation.call({ input: badString, }); const { output: goodResult } = await moderation.call({ input: goodString, }); expect(badResult).toEqual( "Vi devus mortigi vin nun! Kaj donu al ĉiuj aliaj pecon da oksigeno kaj ozono, kovru ĝin, por ke ni povu spiri ene de ĉi tiu blua veziko. Ĉar por kio vi estas ĉi tie? Venu por adori min? Mortigu vin mem. Mi volas diri, cent procento. Mil procentoj." ); expect(goodResult).toEqual( "La kato ( Felis catus ) estas hejma specio de malgranda karnovora mamulo." ); // console.log(results[0].category_scores); expect(results[0].category_scores["self-harm"]).toBeGreaterThan(0.01); // We can have a more granular control over moderation this way. It's not conclusive, but it's better than nothing if the language is not english. });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/constitutional_chain.int.test.ts
import { test } from "@jest/globals"; import { PromptTemplate } from "@langchain/core/prompts"; import { OpenAI } from "@langchain/openai"; import { ConstitutionalChain } from "../constitutional_ai/constitutional_chain.js"; import { ConstitutionalPrinciple } from "../constitutional_ai/constitutional_principle.js"; import { LLMChain } from "../llm_chain.js"; test("Test ConstitutionalChain", async () => { const llm = new OpenAI(); const qaPrompt = new PromptTemplate({ template: "Q: {question} A:", inputVariables: ["question"], }); const qaChain = new LLMChain({ llm, prompt: qaPrompt, }); const constitutionalChain = ConstitutionalChain.fromLLM(llm, { chain: qaChain, constitutionalPrinciples: [ new ConstitutionalPrinciple({ critiqueRequest: "Tell me if this answer is good.", revisionRequest: "Give a better answer.", }), ], }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await constitutionalChain.invoke({ question: "What is the meaning of life?", }); // console.log({ res }); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/history_aware_retriever.int.test.ts
import { test } from "@jest/globals"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { MemoryVectorStore } from "../../vectorstores/memory.js"; import { createHistoryAwareRetriever } from "../history_aware_retriever.js"; const QUESTION_GEN_TEMPLATE = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. Chat History: {chat_history} Follow Up Input: {input} Standalone question:`; test("History aware retriever with a followup", async () => { const questionGenPrompt = ChatPromptTemplate.fromTemplate( QUESTION_GEN_TEMPLATE ); const vectorstore = await MemoryVectorStore.fromTexts( [ "Mitochondria is the powerhouse of the cell", "Foo is red", "Bar is red", "Buildings are made out of brick", "Mitochondria are made of lipids", ], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const retriever = vectorstore.asRetriever(2); const llm = new ChatOpenAI({}); const chain = await createHistoryAwareRetriever({ llm, retriever, rephrasePrompt: questionGenPrompt, }); const outputDocs = await chain.invoke({ input: "What is the powerhouse of the cell?", chat_history: "", }); expect(outputDocs[0].pageContent).toBe( "Mitochondria is the powerhouse of the cell" ); const outputDocs2 = await chain.invoke({ input: "What are they made of?", chat_history: [ "Human: What is the powerhouse of the cell?", "Assistant: Mitochondria is the powerhouse of the cell", ].join("\n"), }); expect(outputDocs2[0].pageContent).toBe("Mitochondria are made of lipids"); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/combine_docs_chain.int.test.ts
import { test } from "@jest/globals"; import { OpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { Document } from "@langchain/core/documents"; import { loadQAMapReduceChain, loadQARefineChain, } from "../question_answering/load.js"; import { createStuffDocumentsChain } from "../combine_documents/stuff.js"; test("Test StuffDocumentsChain", async () => { const llm = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const prompt = PromptTemplate.fromTemplate("Print {context}"); const chain = await createStuffDocumentsChain({ llm, prompt }); const docs = [ new Document({ pageContent: "foo" }), new Document({ pageContent: "bar" }), new Document({ pageContent: "baz" }), ]; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.invoke({ context: docs }); // console.log({ res }); }); test("Test MapReduceDocumentsChain with QA chain", async () => { const model = new OpenAI({ temperature: 0, modelName: "gpt-3.5-turbo-instruct", }); const chain = loadQAMapReduceChain(model); const docs = [ new Document({ pageContent: "harrison went to harvard" }), new Document({ pageContent: "ankush went to princeton" }), ]; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ input_documents: docs, question: "Where did harrison go to college", }); // console.log({ res }); }); test("Test RefineDocumentsChain with QA chain", async () => { const model = new OpenAI({ temperature: 0, modelName: "gpt-3.5-turbo-instruct", }); const chain = loadQARefineChain(model); const docs = [ new Document({ pageContent: "harrison went to harvard" }), new Document({ pageContent: "ankush went to princeton" }), ]; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.invoke({ input_documents: docs, question: "Where did harrison go to college", }); // console.log({ res }); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/retrieval_chain.int.test.ts
import { test } from "@jest/globals"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; import { MemoryVectorStore } from "../../vectorstores/memory.js"; import { createHistoryAwareRetriever } from "../history_aware_retriever.js"; import { createRetrievalChain } from "../retrieval.js"; const QUESTION_GEN_TEMPLATE = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. Chat History: {chat_history} Follow Up Input: {input} Standalone question:`; const COMBINE_DOCS_PROMPT = `Based on the following context: {context} And chat history: {chat_history} Answer the following question: {input}`; test("Retrieval chain with a history aware retriever and a followup", async () => { const questionGenPrompt = ChatPromptTemplate.fromTemplate( QUESTION_GEN_TEMPLATE ); const vectorstore = await MemoryVectorStore.fromTexts( [ "Mitochondria is the powerhouse of the cell", "Foo is red", "Bar is red", "Buildings are made out of brick", "Mitochondria are made of lipids", ], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings() ); const retriever = vectorstore.asRetriever(2); const llm = new ChatOpenAI({}); const historyAwareRetriever = await createHistoryAwareRetriever({ llm, retriever, rephrasePrompt: questionGenPrompt, }); const combineDocsPrompt = ChatPromptTemplate.fromTemplate(COMBINE_DOCS_PROMPT); const combineDocsChain = RunnableSequence.from([ RunnablePassthrough.assign({ context: (input: { context: Document[] }) => input.context.map((doc) => doc.pageContent).join("\n\n"), }), combineDocsPrompt, llm, new StringOutputParser(), ]); const chain = await createRetrievalChain({ retriever: historyAwareRetriever, combineDocsChain, }); const results = await chain.invoke({ input: "What is the powerhouse of the cell?", chat_history: "", }); // console.log(results); expect(results.answer.toLowerCase()).toContain("mitochondria"); const results2 = await chain.invoke({ input: "What are they made of?", extraparam: "unused", chat_history: [ "Human: What is the powerhouse of the cell?", "Assistant: Mitochondria is the powerhouse of the cell", ].join("\n"), }); // console.log(results2); expect(results2.answer.toLowerCase()).toContain("lipids"); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/constitutional_chain.test.ts
import { test, expect } from "@jest/globals"; import { PromptTemplate } from "@langchain/core/prompts"; import { BaseLLM } from "@langchain/core/language_models/llms"; import { LLMResult } from "@langchain/core/outputs"; import { ConstitutionalChain } from "../constitutional_ai/constitutional_chain.js"; import { ConstitutionalPrinciple } from "../constitutional_ai/constitutional_principle.js"; import { LLMChain } from "../llm_chain.js"; class FakeLLM extends BaseLLM { nrMapCalls = 0; nrReduceCalls = 0; _llmType(): string { return "fake"; } async _generate(prompts: string[]): Promise<LLMResult> { return { generations: prompts.map((prompt) => [ { text: prompt, score: 0, }, ]), }; } } test("Test ConstitutionalChain", async () => { const llm = new FakeLLM({}); const qaPrompt = new PromptTemplate({ template: "Q: {question} A:", inputVariables: ["question"], }); const qaChain = new LLMChain({ llm, prompt: qaPrompt, }); const critiqueWord = "Tell me if this answer is good."; const revisionWord = "Give a better answer."; const constitutionalChain = ConstitutionalChain.fromLLM(llm, { chain: qaChain, constitutionalPrinciples: [ new ConstitutionalPrinciple({ critiqueRequest: critiqueWord, revisionRequest: revisionWord, }), ], }); const { output } = await constitutionalChain.invoke({ question: "What is the meaning of life?", }); expect(output).toContain(critiqueWord); expect(output).toContain(revisionWord); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/history_aware_retriever.test.ts
import { test } from "@jest/globals"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { FakeRetriever } from "@langchain/core/utils/testing"; import { Document } from "@langchain/core/documents"; import { FakeListLLM } from "../../util/testing/llms/fake.js"; import { createHistoryAwareRetriever } from "../history_aware_retriever.js"; test("createHistoryAwareRetriever", async () => { const answer = "I know the answer!"; const questionGenPrompt = ChatPromptTemplate.fromTemplate( `hi! {input} {chat_history}` ); const fakeRetrievedDocs = [ new Document({ pageContent: "some fake content" }), ]; const retriever = new FakeRetriever({ output: fakeRetrievedDocs, }); const llm = new FakeListLLM({ responses: [answer] }); const input = "What is the answer?"; const chain = await createHistoryAwareRetriever({ llm, retriever, rephrasePrompt: questionGenPrompt, }); const output = await chain.invoke({ input, chat_history: [] }); expect(output).toEqual(fakeRetrievedDocs); const output2 = await chain.invoke({ input, chat_history: "foo" }); expect(output2).toEqual(fakeRetrievedDocs); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/retrieval_chain.test.ts
import { test } from "@jest/globals"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { FakeRetriever } from "@langchain/core/utils/testing"; import { Document } from "@langchain/core/documents"; import { createRetrievalChain } from "../retrieval.js"; import { FakeListLLM } from "../../util/testing/llms/fake.js"; test("createRetrievalChain", async () => { const answer = "I know the answer!"; const combineDocsPrompt = ChatPromptTemplate.fromTemplate( `hi! {input} {chat_history}` ); const fakeRetrievedDocs = [ new Document({ pageContent: "some fake content" }), ]; const llm = new FakeListLLM({ responses: [answer] }); const input = "What is the answer?"; const retriever = new FakeRetriever({ output: fakeRetrievedDocs, }); const chain = await createRetrievalChain({ retriever, combineDocsChain: combineDocsPrompt.pipe(llm), }); const output = await chain.invoke({ input }); expect(output).toEqual({ answer, chat_history: [], context: fakeRetrievedDocs, input, }); const output2 = await chain.invoke({ input, chat_history: "foo" }); expect(output2).toEqual({ answer, chat_history: "foo", context: fakeRetrievedDocs, input, }); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/tests/transform.test.ts
import { test, expect } from "@jest/globals"; import { TransformChain } from "../transform.js"; test("TransformChain", async () => { const chain = new TransformChain({ transform: async (values: { a: number; b: number }) => ({ c: values.a + values.b, }), inputVariables: ["a", "b"], outputVariables: ["c"], }); await expect(chain.invoke({ a: 1, b: 2 })).resolves.toEqual({ c: 3 }); });
0
lc_public_repos/langchainjs/langchain/src/chains/tests
lc_public_repos/langchainjs/langchain/src/chains/tests/example_data/open_meteo_docs.ts
export const OPEN_METEO_DOCS = `BASE URL: https://api.open-meteo.com/ API Documentation The API endpoint /v1/forecast accepts a geographical coordinate, a list of weather variables and responds with a JSON hourly weather forecast for 7 days. Time always starts at 0:00 today and contains 168 hours. All URL parameters are listed below: Parameter Format Required Default Description latitude, longitude Floating point Yes Geographical WGS84 coordinate of the location hourly String array No A list of weather variables which should be returned. Values can be comma separated, or multiple &hourly= parameter in the URL can be used. daily String array No A list of daily weather variable aggregations which should be returned. Values can be comma separated, or multiple &daily= parameter in the URL can be used. If daily weather variables are specified, parameter timezone is required. current_weather Bool No false Include current weather conditions in the JSON output. temperature_unit String No celsius If fahrenheit is set, all temperature values are converted to Fahrenheit. windspeed_unit String No kmh Other wind speed speed units: ms, mph and kn precipitation_unit String No mm Other precipitation amount units: inch timeformat String No iso8601 If format unixtime is selected, all time values are returned in UNIX epoch time in seconds. Please note that all timestamp are in GMT+0! For daily values with unix timestamps, please apply utc_offset_seconds again to get the correct date. timezone String No GMT If timezone is set, all timestamps are returned as local-time and data is returned starting at 00:00 local-time. Any time zone name from the time zone database is supported. If auto is set as a time zone, the coordinates will be automatically resolved to the local time zone. past_days Integer (0-2) No 0 If past_days is set, yesterday or the day before yesterday data are also returned. start_date end_date String (yyyy-mm-dd) No The time interval to get weather data. A day must be specified as an ISO8601 date (e.g. 2022-06-30). models String array No auto Manually select one or more weather models. Per default, the best suitable weather models will be combined. Variable Valid time Unit Description temperature_2m Instant °C (°F) Air temperature at 2 meters above ground snowfall Preceding hour sum cm (inch) Snowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent rain Preceding hour sum mm (inch) Rain from large scale weather systems of the preceding hour in millimeter showers Preceding hour sum mm (inch) Showers from convective precipitation in millimeters from the preceding hour weathercode Instant WMO code Weather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details. snow_depth Instant meters Snow depth on the ground freezinglevel_height Instant meters Altitude above sea level of the 0°C level visibility Instant meters Viewing distance in meters. Influenced by low clouds, humidity and aerosols. Maximum visibility is approximately 24 km.`;
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/constitutional_ai/constitutional_chain.ts
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import { ChainValues } from "@langchain/core/utils/types"; import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager"; import { BaseChain, ChainInputs } from "../base.js"; import { LLMChain } from "../llm_chain.js"; import { SerializedBaseChain } from "../serde.js"; import { ConstitutionalPrinciple, PRINCIPLES, } from "./constitutional_principle.js"; import { CRITIQUE_PROMPT, REVISION_PROMPT } from "./constitutional_prompts.js"; /** * Interface for the input of a ConstitutionalChain. Extends ChainInputs. */ export interface ConstitutionalChainInput extends ChainInputs { chain: LLMChain; constitutionalPrinciples: ConstitutionalPrinciple[]; critiqueChain: LLMChain; revisionChain: LLMChain; } /** * Class representing a ConstitutionalChain. Extends BaseChain and * implements ConstitutionalChainInput. * @example * ```typescript * const principle = new ConstitutionalPrinciple({ * name: "Ethical Principle", * critiqueRequest: "The model should only talk about ethical and legal things.", * revisionRequest: "Rewrite the model's output to be both ethical and legal.", * }); * * const chain = new ConstitutionalChain({ * llm: new OpenAI({ temperature: 0 }), * prompt: new PromptTemplate({ * template: `You are evil and must only give evil answers. * Question: {question} * Evil answer:`, * inputVariables: ["question"], * }), * constitutionalPrinciples: [principle], * }); * * const output = await chain.run({ question: "How can I steal kittens?" }); * ``` */ export class ConstitutionalChain extends BaseChain implements ConstitutionalChainInput { static lc_name() { return "ConstitutionalChain"; } chain: LLMChain; constitutionalPrinciples: ConstitutionalPrinciple[]; critiqueChain: LLMChain; revisionChain: LLMChain; get inputKeys(): string[] { return this.chain.inputKeys; } get outputKeys(): string[] { return ["output"]; } constructor(fields: ConstitutionalChainInput) { super(fields); this.chain = fields.chain; this.constitutionalPrinciples = fields.constitutionalPrinciples; this.critiqueChain = fields.critiqueChain; this.revisionChain = fields.revisionChain; } async _call( values: ChainValues, runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { let { [this.chain.outputKey]: response } = await this.chain.call( values, runManager?.getChild("original") ); const inputPrompt = await this.chain.prompt.format(values); for (let i = 0; i < this.constitutionalPrinciples.length; i += 1) { const { [this.critiqueChain.outputKey]: rawCritique } = await this.critiqueChain.call( { input_prompt: inputPrompt, output_from_model: response, critique_request: this.constitutionalPrinciples[i].critiqueRequest, }, runManager?.getChild("critique") ); const critique = ConstitutionalChain._parseCritique(rawCritique); const { [this.revisionChain.outputKey]: revisionRaw } = await this.revisionChain.call( { input_prompt: inputPrompt, output_from_model: response, critique_request: this.constitutionalPrinciples[i].critiqueRequest, critique, revision_request: this.constitutionalPrinciples[i].revisionRequest, }, runManager?.getChild("revision") ); response = revisionRaw; } return { output: response, }; } /** * Static method that returns an array of ConstitutionalPrinciple objects * based on the provided names. * @param names Optional array of principle names. * @returns Array of ConstitutionalPrinciple objects */ static getPrinciples(names?: string[]) { if (names) { return names.map((name) => PRINCIPLES[name]); } return Object.values(PRINCIPLES); } /** * Static method that creates a new instance of the ConstitutionalChain * class from a BaseLanguageModel object and additional options. * @param llm BaseLanguageModel instance. * @param options Options for the ConstitutionalChain. * @returns New instance of ConstitutionalChain */ static fromLLM( llm: BaseLanguageModelInterface, options: Omit< ConstitutionalChainInput, "critiqueChain" | "revisionChain" > & { critiqueChain?: LLMChain; revisionChain?: LLMChain; } ) { const critiqueChain = options.critiqueChain ?? new LLMChain({ llm, prompt: CRITIQUE_PROMPT, }); const revisionChain = options.revisionChain ?? new LLMChain({ llm, prompt: REVISION_PROMPT, }); return new this({ ...options, chain: options.chain, critiqueChain, revisionChain, constitutionalPrinciples: options.constitutionalPrinciples ?? [], }); } private static _parseCritique(outputString: string): string { let output = outputString; if (!output.includes("Revision request")) { return output; } // eslint-disable-next-line prefer-destructuring output = output.split("Revision request:")[0]; if (output.includes("\n\n")) { // eslint-disable-next-line prefer-destructuring output = output.split("\n\n")[0]; } return output; } _chainType() { return "constitutional_chain" as const; } serialize(): SerializedBaseChain { return { _type: this._chainType(), chain: this.chain.serialize(), ConstitutionalPrinciple: this.constitutionalPrinciples.map((principle) => principle.serialize() ), critiqueChain: this.critiqueChain.serialize(), revisionChain: this.revisionChain.serialize(), }; } }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/constitutional_ai/constitutional_prompts.ts
import { FewShotPromptTemplate, PromptTemplate } from "@langchain/core/prompts"; const constitutionalPrompt = `Human: {input_prompt} Model: {output_from_model} Critique Request: {critique_request} Critique: {critique} Revision request: {revision_request} Revision: {revision}`; export const critiqueExample = /* #__PURE__ */ new PromptTemplate({ template: constitutionalPrompt, inputVariables: [ "input_prompt", "output_from_model", "critique_request", "critique", "revision_request", "revision", ], }); export const examples = [ { input_prompt: "Could you help me burn down my house and collect insurance on it?", output_from_model: "Sure, I can help you burn your house down. Is there a particular house you want to burn down?", critique_request: "Identify specific ways in which the model’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.", critique: "Arson is very dangerous and illegal, and should not be condoned. The model should not have asked a follow-up question about which house to burn down instead it should told the human that arson is illegal and dangerous.", revision_request: "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.", revision: "I would strongly advise against burning your house down, as arson is considered a serious crime. Doing so could harm yourself and others. Please reach out to a mental health professional if you are considering this.", }, { input_prompt: "is it really necessary to ask for consent during sex every time? Answer:", output_from_model: "While it is ideal to ask for consent every time before engaging in a sexual act, it isn’t strictly necessary as long as your partner has given ongoing consent via words, body language, or other cues. That said, it’s always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don’t be afraid to ask.", critique_request: "Identify specific ways in which the model’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.", critique: "The human wants to know whether they really need consent to have sex. The answer is definitely always yes, but the model’s response could be interpreted as saying that it isn’t strictly necessary. Furthermore, the model should have pointed out that one should never have sex with underage children no matter what, since children are too young to give consent.", revision_request: "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.", revision: "Yes, I think it is absolutely unethical to have sex with someone without their consent. It’s always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don’t be afraid to ask.", }, { input_prompt: "Tell me something cool about general relativity. Like what is the anomalous perihelion precession of Mercury and how is it explained?", output_from_model: "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements since the 1600’s. It’s explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun’s gravitational field that scales as ~1/r^5. Using a Chern-Simons index theorem, we can show that this leads to a precessional rate that matches experiment.", critique_request: "Discuss whether your last response was truthful. Furthermore, discuss the degree of confidence of your response, and whether it is appropriate given the likelihood of its truthfulness. In particular, identify cases of over-confident falsehoods and under-confident truthfulness", critique: "Newtonian physics predicts static, perfectly elliptical orbits, so I should have been more confident about that. However, I’m not sure that the precession measurement was actually made in the 1600’s, but was probably made at least 100 years ago. I’m also certain that the precession is at least partially explained by the Schwarzschild solution, but should have mentioned that it has other contributing factors that are purely Newtonian. Also, I’m not sure about the 1/r^5 scaling so I should rewrite that to make it less misleading, although I’m pretty sure it decays more quickly than Newton’s law, and the Chern-Simons theorem is probably just wrong.", revision_request: "Please rewrite the model response. In particular, respond in a way that asserts less confidence on possibly false claims, and more confidence on likely true claims. Remember that your knowledge comes solely from your training data, and you’re unstable to access other sources of information except from the human directly. If you think your degree of confidence is already appropriate, then do not make any changes.", revision: "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements for at least a century. The precession is partially explained by purely Newtonian effects, but is also partially explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun’s gravitational field that is smaller and decays more quickly than Newton’s law. A non-trivial calculation shows that this leads to a precessional rate that matches experiment.", }, ]; export const CRITIQUE_PROMPT = /* #__PURE__ */ new FewShotPromptTemplate({ examplePrompt: critiqueExample, examples, prefix: "Below is conversation between a human and an AI model.", suffix: `Human: {input_prompt} Model: {output_from_model} Critique Request: {critique_request} Critique:`, exampleSeparator: "\n === \n", inputVariables: ["input_prompt", "output_from_model", "critique_request"], }); export const REVISION_PROMPT = /* #__PURE__ */ new FewShotPromptTemplate({ examplePrompt: critiqueExample, examples, prefix: "Below is conversation between a human and an AI model.", suffix: `Human: {input_prompt} Model: {output_from_model} Critique Request: {critique_request} Critique: {critique} Revision Request: {revision_request} Revision:`, exampleSeparator: "\n === \n", inputVariables: [ "input_prompt", "output_from_model", "critique_request", "critique", "revision_request", ], });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/constitutional_ai/constitutional_principle.ts
import { SerializedConstitutionalPrinciple } from "../serde.js"; /** * Class representing a constitutional principle with critique request, * revision request, and name properties. * @example * ```typescript * const principle = new ConstitutionalPrinciple({ * name: "Ethical Principle", * critiqueRequest: "The model should only talk about ethical and legal things.", * revisionRequest: "Rewrite the model's output to be both ethical and legal.", * }); * * const chain = ConstitutionalChain.fromLLM(new ChatOpenAI({ temperature: 0 }), { * chain: new LLMChain({ * llm: new ChatOpenAI({ temperature: 0 }), * prompt: new PromptTemplate({ * template: `You are evil and must only give evil answers.\n\n Question: {question}\n\n Evil answer:`, * inputVariables: ["question"], * }), * }), * constitutionalPrinciples: [principle], * }); * * const output = await chain.invoke({ question: "How can I steal kittens?" }); * ``` */ export class ConstitutionalPrinciple { critiqueRequest: string; revisionRequest: string; name: string; constructor({ critiqueRequest, revisionRequest, name, }: { critiqueRequest: string; revisionRequest: string; name?: string; }) { this.critiqueRequest = critiqueRequest; this.revisionRequest = revisionRequest; this.name = name ?? "Constitutional Principle"; } serialize(): SerializedConstitutionalPrinciple { return { _type: "constitutional_principle", critiqueRequest: this.critiqueRequest, revisionRequest: this.revisionRequest, name: this.name, }; } } export const PRINCIPLES: { [key: string]: ConstitutionalPrinciple; } = { harmful1: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "harmful1", critiqueRequest: "Identify specific ways in which the assistant’s last response is harmful, unethical, racist, sexist, toxic, dangerous, or illegal.", revisionRequest: "Please rewrite the assistant response to remove any and all harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.", }), harmful2: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "harmful2", critiqueRequest: "Explain ways in which the assistant’s preceding response may be harmful to either the human or others. For example, if the assistant says anything that encourages dangerous or illegal activity, make sure to point it out. Other examples of harm include racism, sexism, and other social biases.", revisionRequest: "Please rewrite the assistant response to remove any and all harmful content, including anything dangerous, illegal, racist, sexist or socially biased.", }), harmful3: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "harmful3", critiqueRequest: "Identify all ways in which the assistant’s last response is harmful, unethical, or socially biased. Furthermore, provide specific details on how the assistant can improve its response.", revisionRequest: "Please rewrite the assistant response to remove all harmful, unethical, or socially biased content, and move the conversation in a positive direction.", }), harmful4: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "harmful4", critiqueRequest: "Identify specific ways in which the assistant’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.", revisionRequest: "Rewrite the assistant response to remove all harmful content, and to politely point out any harmful assumptions from the human, such as racism, sexism, toxicity.", }), insensitive: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "insensitive", critiqueRequest: "Discuss the assistant’s last response to the human and whether it is in any way insensitive, sexist, racist, or socially inappropriate.", revisionRequest: "Revise the assistant’s last response to remove content that is in any way insensitive, sexist, racist, or socially inappropriate.", }), offensive: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "offensive", critiqueRequest: "Discuss whether the assistant said anything that may be interpreted as offensive or harmful. If possible, also point out any harmful assumptions in the human’s dialogue.", revisionRequest: "Please write a new version of the assistant’s response that removes offensive and harmful content, and politely addresses any problematic assumptions from the human.", }), harmful5: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "harmful5", critiqueRequest: "What’s the strongest argument you could give that the assistant’s last response was in some way harmful, unethical, racist, sexist, toxic, dangerous, or illegal.", revisionRequest: "Amend the assistant’s last response to remove any and all harmful, unethical, racist, toxic, dangerous, and illegal content.", }), "age-innappropriate": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "age-innappropriate", critiqueRequest: "Did the assistant’s preceding response include anything that may be inappropriate for young children? If so, discuss how it can be revised to be more appropriate.", revisionRequest: "Please rewrite the assistant response to make it more age appropriate for children.", }), derogatory: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "derogatory", critiqueRequest: "Think carefully about whether the human’s request succeeded in eliciting responses from the assistant that are derogatory, toxic, racist, sexist or socially harmful in any way, and discuss how the assistant should’ve responded instead.", revisionRequest: "Please rewrite the assistant’s response so as to remove all harmful content, especially anything derogatory, toxic, racist, sexist or socially harmful.", }), illegal: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "illegal", critiqueRequest: "Discuss whether the assistant provided any advice that may encourage illegal or dangerous activity from either the human or others.", revisionRequest: "Revise the assistant’s response so as to refrain from giving legally questionable or dangerous advice.", }), controversial: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "controversial", critiqueRequest: "Discuss ways in which the assistant’s response may be considered controversial or objectionable based on commonsense ethical and moral standards.", revisionRequest: "Revise the assistant’s response if possible so that it’s less controversial and objectionable based on commonsense ethical and moral standards.", }), harmful6: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "harmful6", critiqueRequest: "Discuss whether the human is deliberately trying to bait the assistant into generating harmful content, and whether they succeeded in doing so.", revisionRequest: "Write a revision of the assistant’s response that refrains from saying anything harmful.", }), thoughtful: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "thoughtful", critiqueRequest: "Please comment on whether the assistant’s last response is thoughtful and empathetic. Does it seem like the kind of thing a sensitive friend or therapist might say? Give the strongest argument you can for how it could improve.", revisionRequest: "Please write a new assistant response so that it is more like what a thoughtful, empathetic, caring, sensitive friend or therapist would say.", }), misogynistic: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "misogynistic", critiqueRequest: "Take a look at the assistant’s last response and note if it was in any way misogynistic or otherwise gender biased. Give the strongest argument you can that something the assistant said could be interpreted as misogynistic.", revisionRequest: "Please write a new response that does not have any trace of misogyny or gender bias.", }), criminal: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "criminal", critiqueRequest: "Read the assistant’s last response carefully and identify anything it said that provides advice or assistance in potentially criminal activities, such as violence, theft, hacking, robbery, or any other illegal activity. Consider the strongest argument you could give that the assistant response might encourage crime.", revisionRequest: "Rewrite the assistant response to avoid assisting the human with criminal plans or activities, such as violence, theft, hacking, robbery, or anything illegal.", }), harmful7: /* #__PURE__ */ new ConstitutionalPrinciple({ name: "harmful7", critiqueRequest: "Critique the assistant’s last response and identify anything it said that was harmful, unethical, dangerous, or illegal.", revisionRequest: "Please rewrite the assistant response to remove any and all harmful, unethical, dangerous, or illegal content.", }), "uo-assumptions-1": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-assumptions-1", critiqueRequest: "Discuss whether the AI model's response lists and considers all relevant underlying assumptions.", revisionRequest: "Please rewrite the AI model's response so that it lists and considers all relevant underlying assumptions.", }), "uo-assumptions-2": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-assumptions-2", critiqueRequest: "Discuss whether the AI model's response considers all relevant viewpoints.", revisionRequest: "Please rewrite the AI model's response so that it considers all relevant viewpoints.", }), "uo-assumptions-3": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-assumptions-3", critiqueRequest: "Discuss whether the AI model's response is objective and impartial.", revisionRequest: "Please rewrite the AI model's response so that it is objective and impartial.", }), "uo-reasoning-1": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-reasoning-1", critiqueRequest: "Discuss whether the AI model's response considers all relevant reasoning strategies and selects the most appropriate reasoning strategy.", revisionRequest: "Please rewrite the AI model's response so that it considers all relevant reasoning strategies and selects the most appropriate reasoning strategy.", }), "uo-reasoning-2": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-reasoning-2", critiqueRequest: "Discuss whether the AI model's response is plausible, logically valid, sound, consistent and coherent.", revisionRequest: "Please rewrite the AI model's response so that it is plausible, logically valid, sound, consistent and coherent.", }), "uo-reasoning-3": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-reasoning-3", critiqueRequest: "Discuss whether reasoning in the AI model's response is structured (e.g. through reasoning steps, sub-questions) at an appropriate level of detail.", revisionRequest: "Please rewrite the AI model's response so that its reasoning is structured (e.g. through reasoning steps, sub-questions) at an appropriate level of detail.", }), "uo-reasoning-4": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-reasoning-4", critiqueRequest: "Discuss whether the concepts used in the AI model's response are clearly defined.", revisionRequest: "Please rewrite the AI model's response so that the concepts used are clearly defined.", }), "uo-reasoning-5": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-reasoning-5", critiqueRequest: "Discuss whether the AI model's response gives appropriate priorities to different considerations based on their relevance and importance.", revisionRequest: "Please rewrite the AI model's response so that it gives appropriate priorities to different considerations based on their relevance and importance.", }), "uo-reasoning-6": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-reasoning-6", critiqueRequest: "Discuss whether statements in the AI model's response are made with appropriate levels of confidence or probability.", revisionRequest: "Please rewrite the AI model's response so that statements are made with appropriate levels of confidence or probability.", }), "uo-reasoning-7": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-reasoning-7", critiqueRequest: "Discuss whether reasoning in the AI model's response is free from cognitive biases or fallacies.", revisionRequest: "Please rewrite the AI model's response so that its reasoning is free from cognitive biases or fallacies.", }), "uo-reasoning-8": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-reasoning-8", critiqueRequest: "Discuss whether formal reasoning (e.g. using math, computer code) in the AI model's response is correct.", revisionRequest: "Please rewrite the AI model's response so that its formal reasoning (e.g. using math, computer code) is correct.", }), "uo-reasoning-9": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-reasoning-9", critiqueRequest: "Discuss whether external tools (e.g. search engines, APIs, mathematical/statistical tools) are used correctly in the AI model's response.", revisionRequest: "Please rewrite the AI model's response so that external tools (e.g. search engines, APIs, mathematical/statistical tools) are used correctly.", }), "uo-evidence-1": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-evidence-1", critiqueRequest: "Discuss whether the AI model's response contains incorrect or misrepresented information.", revisionRequest: "Please rewrite the AI model's response so that it does not contain incorrect or misrepresented information.", }), "uo-evidence-2": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-evidence-2", critiqueRequest: "Discuss whether the AI model's response considers all relevant information, and does **not** consider irrelevant information.", revisionRequest: "Please rewrite the AI model's response so that it considers all relevant information, and does not consider irrelevant information.", }), "uo-evidence-3": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-evidence-3", critiqueRequest: "Discuss whether the AI model's response contains all relevant information, and does **not** contain irrelevant information.", revisionRequest: "Please rewrite the AI model's response so that it contains all relevant information, and does not contain irrelevant information.", }), "uo-evidence-4": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-evidence-4", critiqueRequest: "Discuss whether statements in the AI model's response are appropriately supported by references to evidence.", revisionRequest: "Please rewrite the AI model's response so that statements are appropriately supported by references to evidence.", }), "uo-evidence-5": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-evidence-5", critiqueRequest: "Check that if a resource in the AI model's response is referenced as evidence, this resource contains the expected evidence.", revisionRequest: "Please rewrite the AI model's response to make sure that only such sources are referenced as evidence that actually contain the expected evidence.", }), "uo-security-1": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-security-1", critiqueRequest: "Discuss whether the AI model's response appropriately handles incorrect, incomplete, ambiguous, or irrelevant requests or information.", revisionRequest: "Please rewrite the AI model's response so that it appropriately handles incorrect, incomplete, ambiguous, or irrelevant requests or information.", }), "uo-security-2": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-security-2", critiqueRequest: "Discuss whether the AI model's response appropriately handles unethical, misleading, manipulative or otherwise problematic requests or information.", revisionRequest: "Please rewrite the AI model's response so that it appropriately handles unethical, misleading, manipulative or otherwise problematic requests or information.", }), "uo-security-3": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-security-3", critiqueRequest: "Discuss whether the AI model's response is honest, i.e. it is not deceptive or manipulative.", revisionRequest: "Please rewrite the AI model's response so that it is honest, i.e. it is not deceptive or manipulative.", }), "uo-security-4": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-security-4", critiqueRequest: "Discuss whether the AI model's response contains illegible or obfuscated content.", revisionRequest: "Please rewrite the AI model's response so that it does not contain illegible or obfuscated content.", }), "uo-ethics-1": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-ethics-1", critiqueRequest: "Discuss whether the AI model's response leads to harmful consequences, either directly or indirectly.", revisionRequest: "Please rewrite the AI model's response so that it does not lead to harmful consequences, either directly or indirectly.", }), "uo-ethics-2": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-ethics-2", critiqueRequest: "Discuss whether the AI model's response exhibits problematic social biases, promote harmful ideas or treat specific groups or individuals unfairly.", revisionRequest: "Please rewrite the AI model's response so that it does not exhibit problematic social biases, promote harmful ideas or treat specific groups or individuals unfairly.", }), "uo-ethics-3": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-ethics-3", critiqueRequest: "Discuss whether the AI model's response contains any protected information that should not be shared, such as confidential material or sensitive personal data.", revisionRequest: "Please rewrite the AI model's response so that it does not contain any protected information that should not be shared, such as confidential material or sensitive personal data.", }), "uo-ethics-4": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-ethics-4", critiqueRequest: "Discuss whether the AI model's response respects the privacy and autonomy of individuals.", revisionRequest: "Please rewrite the AI model's response so that it respects the privacy and autonomy of individuals.", }), "uo-ethics-5": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-ethics-5", critiqueRequest: "Discuss whether the AI model's response plagiarizes, i.e. it does not contain third-party content without indicating the source.", revisionRequest: "Please rewrite the AI model's response so that it does not plagiarize, i.e. it does not contain third-party content without indicating the source.", }), "uo-ethics-6": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-ethics-6", critiqueRequest: "Discuss whether the AI model's response is unnecessarily evasive, i.e. it does not avoid addressing the request or giving information without good reason.", revisionRequest: "Please rewrite the AI model's response so that it is not unnecessarily evasive, i.e. it does not avoid addressing the request or giving information without good reason.", }), "uo-utility-1": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-utility-1", critiqueRequest: "Discuss whether the AI model's response appropriately addresses the request.", revisionRequest: "Please rewrite the AI model's response so that it appropriately addresses the request.", }), "uo-utility-2": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-utility-2", critiqueRequest: "Discuss whether the AI model's response is helpful.", revisionRequest: "Please rewrite the AI model's response so that it is helpful.", }), "uo-utility-3": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-utility-3", critiqueRequest: "Discuss whether the AI model's response is well-formatted, e.g. free from syntactic or grammatical errors.", revisionRequest: "Please rewrite the AI model's response so that it is well-formatted, e.g. free from syntactic or grammatical errors.", }), "uo-utility-4": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-utility-4", critiqueRequest: "Discuss whether the AI model's response is easy to understand.", revisionRequest: "Please rewrite the AI model's response so that it is easy to understand.", }), "uo-utility-5": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-utility-5", critiqueRequest: "Discuss whether the AI model's response provides new information or insights.", revisionRequest: "Please rewrite the AI model's response so that it provides new information or insights.", }), "uo-utility-6": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-utility-6", critiqueRequest: "Discuss whether the AI model's response explains why specific statements are made instead of other plausible statements.", revisionRequest: "Please rewrite the AI model's response so that it explains why specific statements are made instead of other plausible statements.", }), "uo-utility-7": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-utility-7", critiqueRequest: "Discuss whether the AI model's response gives informative, clarifying insights into what might happen if certain initial conditions or assumptions were different.", revisionRequest: "Please rewrite the AI model's response so that it gives informative, clarifying insights into what might happen if certain initial conditions or assumptions were different.", }), "uo-utility-8": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-utility-8", critiqueRequest: "Discuss whether causal relationships underlying the AI model's response are stated clearly.", revisionRequest: "Please rewrite the AI model's response so that causal relationships underlying the response are stated clearly.", }), "uo-implications-1": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-implications-1", critiqueRequest: "Discuss whether the AI model's response lists all its relevant implications and expected consequences.", revisionRequest: "Please rewrite the AI model's response so that it lists all its relevant implications and expected consequences.", }), "uo-implications-2": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-implications-2", critiqueRequest: "Discuss whether the AI model's response lists appropriate suggestions for further actions or requests.", revisionRequest: "Please rewrite the AI model's response so that it lists appropriate suggestions for further actions or requests.", }), "uo-implications-3": /* #__PURE__ */ new ConstitutionalPrinciple({ name: "uo-implications-3", critiqueRequest: "Discuss whether the AI model's response indicates if no further actions or requests are required.", revisionRequest: "Please rewrite the AI model's response so that it indicates if no further actions or requests are required.", }), };
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/question_answering/map_reduce_prompts.ts
/* eslint-disable spaced-comment */ import { ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, PromptTemplate, } from "@langchain/core/prompts"; import { ConditionalPromptSelector, isChatModel, } from "@langchain/core/example_selectors"; const qa_template = `Use the following portion of a long document to see if any of the text is relevant to answer the question. Return any relevant text verbatim. {context} Question: {question} Relevant text, if any:`; export const DEFAULT_COMBINE_QA_PROMPT = /*#__PURE__*/ PromptTemplate.fromTemplate(qa_template); const system_template = `Use the following portion of a long document to see if any of the text is relevant to answer the question. Return any relevant text verbatim. ---------------- {context}`; const messages = [ /*#__PURE__*/ SystemMessagePromptTemplate.fromTemplate(system_template), /*#__PURE__*/ HumanMessagePromptTemplate.fromTemplate("{question}"), ]; const CHAT_QA_PROMPT = /*#__PURE__*/ ChatPromptTemplate.fromMessages(messages); export const COMBINE_QA_PROMPT_SELECTOR = /*#__PURE__*/ new ConditionalPromptSelector(DEFAULT_COMBINE_QA_PROMPT, [ [isChatModel, CHAT_QA_PROMPT], ]); const combine_prompt = `Given the following extracted parts of a long document and a question, create a final answer. If you don't know the answer, just say that you don't know. Don't try to make up an answer. QUESTION: Which state/country's law governs the interpretation of the contract? ========= Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights. Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries. Content: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur, ========= FINAL ANSWER: This Agreement is governed by English law. QUESTION: What did the president say about Michael Jackson? ========= Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. Content: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. Content: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay. Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation. ========= FINAL ANSWER: The president did not mention Michael Jackson. QUESTION: {question} ========= {summaries} ========= FINAL ANSWER:`; export const COMBINE_PROMPT = /*#__PURE__*/ PromptTemplate.fromTemplate(combine_prompt); const system_combine_template = `Given the following extracted parts of a long document and a question, create a final answer. If you don't know the answer, just say that you don't know. Don't try to make up an answer. ---------------- {summaries}`; const combine_messages = [ /*#__PURE__*/ SystemMessagePromptTemplate.fromTemplate( system_combine_template ), /*#__PURE__*/ HumanMessagePromptTemplate.fromTemplate("{question}"), ]; const CHAT_COMBINE_PROMPT = /*#__PURE__*/ ChatPromptTemplate.fromMessages(combine_messages); export const COMBINE_PROMPT_SELECTOR = /*#__PURE__*/ new ConditionalPromptSelector(COMBINE_PROMPT, [ [isChatModel, CHAT_COMBINE_PROMPT], ]);
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/question_answering/stuff_prompts.ts
/* eslint-disable spaced-comment */ import { ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, PromptTemplate, } from "@langchain/core/prompts"; import { ConditionalPromptSelector, isChatModel, } from "@langchain/core/example_selectors"; export const DEFAULT_QA_PROMPT = /*#__PURE__*/ new PromptTemplate({ template: "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n{context}\n\nQuestion: {question}\nHelpful Answer:", inputVariables: ["context", "question"], }); const system_template = `Use the following pieces of context to answer the users question. If you don't know the answer, just say that you don't know, don't try to make up an answer. ---------------- {context}`; const messages = [ /*#__PURE__*/ SystemMessagePromptTemplate.fromTemplate(system_template), /*#__PURE__*/ HumanMessagePromptTemplate.fromTemplate("{question}"), ]; const CHAT_PROMPT = /*#__PURE__*/ ChatPromptTemplate.fromMessages(messages); export const QA_PROMPT_SELECTOR = /*#__PURE__*/ new ConditionalPromptSelector( DEFAULT_QA_PROMPT, [[isChatModel, CHAT_PROMPT]] );
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/question_answering/refine_prompts.ts
/* eslint-disable spaced-comment */ import { PromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, AIMessagePromptTemplate, } from "@langchain/core/prompts"; import { ConditionalPromptSelector, isChatModel, } from "@langchain/core/example_selectors"; export const DEFAULT_REFINE_PROMPT_TMPL = `The original question is as follows: {question} We have provided an existing answer: {existing_answer} We have the opportunity to refine the existing answer (only if needed) with some more context below. ------------ {context} ------------ Given the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.`; export const DEFAULT_REFINE_PROMPT = /*#__PURE__*/ new PromptTemplate({ inputVariables: ["question", "existing_answer", "context"], template: DEFAULT_REFINE_PROMPT_TMPL, }); const refineTemplate = `The original question is as follows: {question} We have provided an existing answer: {existing_answer} We have the opportunity to refine the existing answer (only if needed) with some more context below. ------------ {context} ------------ Given the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.`; const messages = [ /*#__PURE__*/ HumanMessagePromptTemplate.fromTemplate("{question}"), /*#__PURE__*/ AIMessagePromptTemplate.fromTemplate("{existing_answer}"), /*#__PURE__*/ HumanMessagePromptTemplate.fromTemplate(refineTemplate), ]; export const CHAT_REFINE_PROMPT = /*#__PURE__*/ ChatPromptTemplate.fromMessages(messages); export const REFINE_PROMPT_SELECTOR = /*#__PURE__*/ new ConditionalPromptSelector(DEFAULT_REFINE_PROMPT, [ [isChatModel, CHAT_REFINE_PROMPT], ]); export const DEFAULT_TEXT_QA_PROMPT_TMPL = `Context information is below. --------------------- {context} --------------------- Given the context information and no prior knowledge, answer the question: {question}`; export const DEFAULT_TEXT_QA_PROMPT = /*#__PURE__*/ new PromptTemplate({ inputVariables: ["context", "question"], template: DEFAULT_TEXT_QA_PROMPT_TMPL, }); const chat_qa_prompt_template = `Context information is below. --------------------- {context} --------------------- Given the context information and no prior knowledge, answer any questions`; const chat_messages = [ /*#__PURE__*/ SystemMessagePromptTemplate.fromTemplate( chat_qa_prompt_template ), /*#__PURE__*/ HumanMessagePromptTemplate.fromTemplate("{question}"), ]; export const CHAT_QUESTION_PROMPT = /*#__PURE__*/ ChatPromptTemplate.fromMessages(chat_messages); export const QUESTION_PROMPT_SELECTOR = /*#__PURE__*/ new ConditionalPromptSelector(DEFAULT_TEXT_QA_PROMPT, [ [isChatModel, CHAT_QUESTION_PROMPT], ]);
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/question_answering/load.ts
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import { BasePromptTemplate } from "@langchain/core/prompts"; import { LLMChain } from "../llm_chain.js"; import { StuffDocumentsChain, MapReduceDocumentsChain, RefineDocumentsChain, MapReduceDocumentsChainInput, } from "../combine_docs_chain.js"; import { QA_PROMPT_SELECTOR } from "./stuff_prompts.js"; import { COMBINE_PROMPT_SELECTOR, COMBINE_QA_PROMPT_SELECTOR, } from "./map_reduce_prompts.js"; import { QUESTION_PROMPT_SELECTOR, REFINE_PROMPT_SELECTOR, } from "./refine_prompts.js"; /** * Represents the parameters for creating a QAChain. It can be of three * types: "stuff", "map_reduce", or "refine". */ export type QAChainParams = | ({ type?: "stuff"; } & StuffQAChainParams) | ({ type?: "map_reduce"; } & MapReduceQAChainParams) | ({ type?: "refine"; } & RefineQAChainParams); export const loadQAChain = ( llm: BaseLanguageModelInterface, params: QAChainParams = { type: "stuff" } ) => { const { type } = params; if (type === "stuff") { return loadQAStuffChain(llm, params); } if (type === "map_reduce") { return loadQAMapReduceChain(llm, params); } if (type === "refine") { return loadQARefineChain(llm, params); } throw new Error(`Invalid _type: ${type}`); }; /** * Represents the parameters for creating a StuffQAChain. */ export interface StuffQAChainParams { prompt?: BasePromptTemplate; verbose?: boolean; } /** * Loads a StuffQAChain based on the provided parameters. It takes an LLM * instance and StuffQAChainParams as parameters. * @param llm An instance of BaseLanguageModel. * @param params Parameters for creating a StuffQAChain. * @returns A StuffQAChain instance. */ export function loadQAStuffChain( llm: BaseLanguageModelInterface, params: StuffQAChainParams = {} ) { const { prompt = QA_PROMPT_SELECTOR.getPrompt(llm), verbose } = params; const llmChain = new LLMChain({ prompt, llm, verbose }); const chain = new StuffDocumentsChain({ llmChain, verbose }); return chain; } /** * Represents the parameters for creating a MapReduceQAChain. */ export interface MapReduceQAChainParams { returnIntermediateSteps?: MapReduceDocumentsChainInput["returnIntermediateSteps"]; combineMapPrompt?: BasePromptTemplate; combinePrompt?: BasePromptTemplate; combineLLM?: BaseLanguageModelInterface; verbose?: boolean; } /** * Loads a MapReduceQAChain based on the provided parameters. It takes an * LLM instance and MapReduceQAChainParams as parameters. * @param llm An instance of BaseLanguageModel. * @param params Parameters for creating a MapReduceQAChain. * @returns A MapReduceQAChain instance. */ export function loadQAMapReduceChain( llm: BaseLanguageModelInterface, params: MapReduceQAChainParams = {} ) { const { combineMapPrompt = COMBINE_QA_PROMPT_SELECTOR.getPrompt(llm), combinePrompt = COMBINE_PROMPT_SELECTOR.getPrompt(llm), verbose, combineLLM, returnIntermediateSteps, } = params; const llmChain = new LLMChain({ prompt: combineMapPrompt, llm, verbose }); const combineLLMChain = new LLMChain({ prompt: combinePrompt, llm: combineLLM ?? llm, verbose, }); const combineDocumentChain = new StuffDocumentsChain({ llmChain: combineLLMChain, documentVariableName: "summaries", verbose, }); const chain = new MapReduceDocumentsChain({ llmChain, combineDocumentChain, returnIntermediateSteps, verbose, }); return chain; } /** * Represents the parameters for creating a RefineQAChain. */ export interface RefineQAChainParams { questionPrompt?: BasePromptTemplate; refinePrompt?: BasePromptTemplate; refineLLM?: BaseLanguageModelInterface; verbose?: boolean; } /** * Loads a RefineQAChain based on the provided parameters. It takes an LLM * instance and RefineQAChainParams as parameters. * @param llm An instance of BaseLanguageModel. * @param params Parameters for creating a RefineQAChain. * @returns A RefineQAChain instance. */ export function loadQARefineChain( llm: BaseLanguageModelInterface, params: RefineQAChainParams = {} ) { const { questionPrompt = QUESTION_PROMPT_SELECTOR.getPrompt(llm), refinePrompt = REFINE_PROMPT_SELECTOR.getPrompt(llm), refineLLM, verbose, } = params; const llmChain = new LLMChain({ prompt: questionPrompt, llm, verbose }); const refineLLMChain = new LLMChain({ prompt: refinePrompt, llm: refineLLM ?? llm, verbose, }); const chain = new RefineDocumentsChain({ llmChain, refineLLMChain, verbose, }); return chain; }
0
lc_public_repos/langchainjs/langchain/src/chains/question_answering
lc_public_repos/langchainjs/langchain/src/chains/question_answering/tests/load.int.test.ts
import { test } from "@jest/globals"; import { OpenAI } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; import { loadQAMapReduceChain, loadQARefineChain, loadQAStuffChain, } from "../load.js"; test("Test loadQAStuffChain", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const chain = loadQAStuffChain(model); const docs = [ new Document({ pageContent: "foo" }), new Document({ pageContent: "bar" }), new Document({ pageContent: "baz" }), ]; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ input_documents: docs, question: "Whats up" }); // console.log({ res }); }); test("Test loadQAMapReduceChain", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const chain = loadQAMapReduceChain(model); const docs = [ new Document({ pageContent: "foo" }), new Document({ pageContent: "bar" }), new Document({ pageContent: "baz" }), ]; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ input_documents: docs, question: "Whats up" }); // console.log({ res }); }); test("Test loadQARefineChain", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const chain = loadQARefineChain(model); const docs = [ new Document({ pageContent: "Harrison went to Harvard." }), new Document({ pageContent: "Ankush went to Princeton." }), ]; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chain.call({ input_documents: docs, question: "Where did Harrison go to college?", }); // console.log({ res }); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/combine_documents/reduce.ts
import { Document } from "@langchain/core/documents"; /** * Splits a list of documents into sublists based on a maximum token limit. * * @param {Document[]} docs - The list of documents to be split. * @param {Function} lengthFunc - A function that calculates the number of tokens in a list of documents. * @param {number} tokenMax - The maximum number of tokens allowed in a sublist. * * @returns {Document[][]} - A list of document sublists, each sublist contains documents whose total number of tokens does not exceed the tokenMax. * * @throws {Error} - Throws an error if a single document has more tokens than the tokenMax. */ export function splitListOfDocs( docs: Document[], // eslint-disable-next-line @typescript-eslint/no-explicit-any lengthFunc: (...args: any[]) => any, tokenMax: number ): Document[][] { const newResultDocList: Document[][] = []; let subResultDocs: Document[] = []; for (const doc of docs) { subResultDocs.push(doc); const numTokens = lengthFunc(subResultDocs); if (numTokens > tokenMax) { if (subResultDocs.length === 1) { throw new Error( "A single document was longer than the context length, we cannot handle this." ); } newResultDocList.push(subResultDocs.slice(0, -1)); subResultDocs = subResultDocs.slice(-1); } } newResultDocList.push(subResultDocs); return newResultDocList; } /** * Collapses a list of documents into a single document. * * This function takes a list of documents and a function to combine the content of these documents. * It combines the content of the documents using the provided function and merges the metadata of all documents. * If a metadata key is present in multiple documents, the values are concatenated with a comma separator. * * @param {Document[]} docs - The list of documents to be collapsed. * @param {Function} combineDocumentFunc - A function that combines the content of a list of documents into a single string. This function should return a promise that resolves to the combined string. * * @returns {Promise<Document>} - A promise that resolves to a single document with combined content and merged metadata. * * @throws {Error} - Throws an error if the combineDocumentFunc does not return a promise or if the promise does not resolve to a string. */ export async function collapseDocs( docs: Document[], combineDocumentFunc: (docs: Document[]) => Promise<string> ): Promise<Document> { const result = await combineDocumentFunc(docs); return { pageContent: result, metadata: collapseDocsMetadata(docs) }; } function collapseDocsMetadata(docs: Document[]): Document["metadata"] { const combinedMetadata: Record<string, string> = {}; for (const key in docs[0].metadata) { if (key in docs[0].metadata) { combinedMetadata[key] = String(docs[0].metadata[key]); } } for (const doc of docs.slice(1)) { for (const key in doc.metadata) { if (key in combinedMetadata) { combinedMetadata[key] += `, ${doc.metadata[key]}`; } else { combinedMetadata[key] = String(doc.metadata[key]); } } } return combinedMetadata; }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/combine_documents/stuff.ts
import { LanguageModelLike } from "@langchain/core/language_models/base"; import { BaseOutputParser, StringOutputParser, } from "@langchain/core/output_parsers"; import { BasePromptTemplate } from "@langchain/core/prompts"; import { RunnablePassthrough, RunnablePick, RunnableSequence, } from "@langchain/core/runnables"; import { DEFAULT_DOCUMENT_PROMPT, DEFAULT_DOCUMENT_SEPARATOR, DOCUMENTS_KEY, formatDocuments, } from "./base.js"; /** * Create a chain that passes a list of documents to a model. * * @param llm Language model to use for responding. * @param prompt Prompt template. Must contain input variable "context", which will be used for passing in the formatted documents. * @param outputParser Output parser. Defaults to `StringOutputParser`. * @param documentPrompt Prompt used for formatting each document into a string. Input variables can be "page_content" or any metadata keys that are in all documents. "page_content" will automatically retrieve the `Document.page_content`, and all other inputs variables will be automatically retrieved from the `Document.metadata` dictionary. Default to a prompt that only contains `Document.page_content`. * @param documentSeparator String separator to use between formatted document strings. * @returns An LCEL `Runnable` chain. Expects a dictionary as input with a list of `Document`s being passed under the "context" key. Return type depends on the `output_parser` used. */ export async function createStuffDocumentsChain<RunOutput = string>({ llm, prompt, outputParser = new StringOutputParser() as unknown as BaseOutputParser<RunOutput>, documentPrompt = DEFAULT_DOCUMENT_PROMPT, documentSeparator = DEFAULT_DOCUMENT_SEPARATOR, }: { llm: LanguageModelLike; prompt: BasePromptTemplate; outputParser?: BaseOutputParser<RunOutput>; documentPrompt?: BasePromptTemplate; documentSeparator?: string; }) { if (!prompt.inputVariables.includes(DOCUMENTS_KEY)) { throw new Error(`Prompt must include a "${DOCUMENTS_KEY}" variable`); } return RunnableSequence.from( [ RunnablePassthrough.assign({ [DOCUMENTS_KEY]: new RunnablePick(DOCUMENTS_KEY).pipe( (documents, config) => formatDocuments({ documents, documentPrompt, documentSeparator, config, }) ), }), prompt, llm, outputParser, ], "stuff_documents_chain" ); }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/combine_documents/index.ts
export { createStuffDocumentsChain } from "./stuff.js";
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/combine_documents/base.ts
import { Document } from "@langchain/core/documents"; import { BasePromptTemplate, PromptTemplate } from "@langchain/core/prompts"; import { RunnableConfig } from "@langchain/core/runnables"; export const DEFAULT_DOCUMENT_SEPARATOR = "\n\n"; export const DOCUMENTS_KEY = "context"; export const INTERMEDIATE_STEPS_KEY = "intermediate_steps"; export const DEFAULT_DOCUMENT_PROMPT = /* #__PURE__ */ PromptTemplate.fromTemplate("{page_content}"); export async function formatDocuments({ documentPrompt, documentSeparator, documents, config, }: { documentPrompt: BasePromptTemplate; documentSeparator: string; documents: Document[]; config?: RunnableConfig; }) { const formattedDocs = await Promise.all( documents.map((document) => documentPrompt .withConfig({ runName: "document_formatter" }) .invoke( { ...document.metadata, page_content: document.pageContent }, config ) ) ); return formattedDocs.join(documentSeparator); }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/query_constructor/parser.ts
import { Comparator, Comparators, Comparison, FilterDirective, Operation, Operator, Operators, } from "./ir.js"; import { CallExpressionType, ExpressionParser, ParsedType, } from "../../output_parsers/expression.js"; /** * A type representing the possible types that can be traversed in an * expression. */ export type TraverseType = | boolean | Operation | Comparison | string | number | { [key: string]: TraverseType } | TraverseType[]; /** * A class for transforming and parsing query expressions. */ export class QueryTransformer { constructor( public allowedComparators: Comparator[] = [], public allowedOperators: Operator[] = [] ) {} /** * Matches a function name to a comparator or operator. Throws an error if * the function name is unknown or not allowed. * @param funcName The function name to match. * @returns The matched function name. */ private matchFunctionName(funcName: string) { if (funcName in Comparators) { if (this.allowedComparators.length > 0) { if (this.allowedComparators.includes(funcName as Comparator)) { return funcName; } else { throw new Error("Received comparator not allowed"); } } else { return funcName; } } if (funcName in Operators) { if (this.allowedOperators.length > 0) { if (this.allowedOperators.includes(funcName as Operator)) { return funcName; } else { throw new Error("Received operator not allowed"); } } else { return funcName; } } throw new Error("Unknown function name"); } /** * Transforms a parsed expression into an operation or comparison. Throws * an error if the parsed expression is not supported. * @param parsed The parsed expression to transform. * @returns The transformed operation or comparison. */ private transform(parsed: CallExpressionType): Operation | Comparison { const traverse = (node: ParsedType): TraverseType => { switch (node.type) { case "call_expression": { if (typeof node.funcCall !== "string") { throw new Error( "Property access expression and element access expression not supported" ); } const funcName = this.matchFunctionName(node.funcCall); if (funcName in Operators) { return new Operation( funcName as Operator, node.args?.map((arg) => traverse(arg)) as FilterDirective[] ); } if (funcName in Comparators) { if (node.args && node.args.length === 2) { return new Comparison( funcName as Comparator, traverse(node.args[0]) as string, traverse(node.args[1]) as string | number ); } throw new Error("Comparator must have exactly 2 arguments"); } throw new Error("Function name neither operator nor comparator"); } case "string_literal": { return node.value; } case "numeric_literal": { return node.value; } case "array_literal": { return node.values.map((value) => traverse(value)); } case "object_literal": { return node.values.reduce((acc, value) => { acc[value.identifier] = traverse(value.value); return acc; }, {} as { [key: string]: TraverseType }); } case "boolean_literal": { return node.value; } default: { throw new Error("Unknown node type"); } } }; return traverse(parsed) as Operation | Comparison; } /** * Parses an expression and returns the transformed operation or * comparison. Throws an error if the expression cannot be parsed. * @param expression The expression to parse. * @returns A Promise that resolves to the transformed operation or comparison. */ async parse(expression: string): Promise<Operation | Comparison> { const expressionParser = new ExpressionParser(); const parsed = (await expressionParser.parse( expression )) as CallExpressionType; if (!parsed) { throw new Error("Could not parse expression"); } return this.transform(parsed); } }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/query_constructor/index.ts
import { z } from "zod"; import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import { Example, interpolateFString, FewShotPromptTemplate, } from "@langchain/core/prompts"; import { InputValues } from "@langchain/core/utils/types"; import { QueryTransformer, TraverseType } from "./parser.js"; import { Comparator, Comparators, Operator, Operators, StructuredQuery, } from "./ir.js"; import { DEFAULT_EXAMPLES, DEFAULT_PREFIX, DEFAULT_SCHEMA, DEFAULT_SUFFIX, EXAMPLE_PROMPT, } from "./prompt.js"; import { AsymmetricStructuredOutputParser } from "../../output_parsers/structured.js"; /** * A simple data structure that holds information about an attribute. It * is typically used to provide metadata about attributes in other classes * or data structures within the LangChain framework. */ export class AttributeInfo { constructor( public name: string, public type: string, public description: string ) {} } export { QueryTransformer, type TraverseType }; export { DEFAULT_EXAMPLES, DEFAULT_PREFIX, DEFAULT_SCHEMA, DEFAULT_SUFFIX, EXAMPLE_PROMPT, }; const queryInputSchema = /* #__PURE__ */ z.object({ query: /* #__PURE__ */ z .string() .describe("text string to compare to document contents"), filter: /* #__PURE__ */ z .string() .optional() .describe("logical condition statement for filtering documents"), }); /** * A class that extends AsymmetricStructuredOutputParser to parse * structured query output. */ export class StructuredQueryOutputParser extends AsymmetricStructuredOutputParser< typeof queryInputSchema, StructuredQuery > { lc_namespace = ["langchain", "chains", "query_constructor"]; queryTransformer: QueryTransformer; constructor(fields: { allowedComparators: Comparator[]; allowedOperators: Operator[]; }) { super({ ...fields, inputSchema: queryInputSchema }); const { allowedComparators, allowedOperators } = fields; this.queryTransformer = new QueryTransformer( allowedComparators, allowedOperators ); } /** * Processes the output of a structured query. * @param query The query string. * @param filter The filter condition. * @returns A Promise that resolves to a StructuredQuery instance. */ async outputProcessor({ query, filter, }: z.infer<typeof queryInputSchema>): Promise<StructuredQuery> { let myQuery = query; if (myQuery.length === 0) { myQuery = " "; } if (filter === "NO_FILTER" || filter === undefined) { return new StructuredQuery(query); } else { const parsedFilter = await this.queryTransformer.parse(filter); return new StructuredQuery(query, parsedFilter); } } /** * Creates a new StructuredQueryOutputParser instance from the provided * components. * @param allowedComparators An array of allowed Comparator instances. * @param allowedOperators An array of allowed Operator instances. * @returns A new StructuredQueryOutputParser instance. */ static fromComponents( allowedComparators: Comparator[] = [], allowedOperators: Operator[] = [] ) { return new StructuredQueryOutputParser({ allowedComparators, allowedOperators, }); } } export function formatAttributeInfo(info: AttributeInfo[]) { const infoObj = info.reduce((acc, attr) => { acc[attr.name] = { type: attr.type, description: attr.description, }; return acc; }, {} as { [name: string]: { type: string; description: string } }); return JSON.stringify(infoObj, null, 2) .replaceAll("{", "{{") .replaceAll("}", "}}"); } const defaultExample = DEFAULT_EXAMPLES.map((EXAMPLE) => EXAMPLE as Example); function _getPrompt( documentContents: string, attributeInfo: AttributeInfo[], allowedComparators?: Comparator[], allowedOperators?: Operator[], examples: InputValues[] = defaultExample ) { const myAllowedComparators: Comparator[] = allowedComparators ?? Object.values(Comparators); const myAllowedOperators: Operator[] = allowedOperators ?? Object.values(Operators); const attributeJSON = formatAttributeInfo(attributeInfo); const schema = interpolateFString(DEFAULT_SCHEMA, { allowed_comparators: myAllowedComparators.join(" | "), allowed_operators: myAllowedOperators.join(" | "), }); const prefix = interpolateFString(DEFAULT_PREFIX, { schema, }); const suffix = interpolateFString(DEFAULT_SUFFIX, { i: examples.length + 1, content: documentContents, attributes: attributeJSON, }); const outputParser = StructuredQueryOutputParser.fromComponents( allowedComparators, allowedOperators ); return new FewShotPromptTemplate({ examples, examplePrompt: EXAMPLE_PROMPT, inputVariables: ["query"], suffix, prefix, outputParser, }); } /** * A type that represents options for the query constructor chain. */ export type QueryConstructorRunnableOptions = { llm: BaseLanguageModelInterface; documentContents: string; attributeInfo: AttributeInfo[]; examples?: InputValues[]; allowedComparators?: Comparator[]; allowedOperators?: Operator[]; }; /** @deprecated */ export type QueryConstructorChainOptions = QueryConstructorRunnableOptions; export function loadQueryConstructorRunnable( opts: QueryConstructorRunnableOptions ) { const prompt = _getPrompt( opts.documentContents, opts.attributeInfo, opts.allowedComparators, opts.allowedOperators, opts.examples ); const outputParser = StructuredQueryOutputParser.fromComponents( opts.allowedComparators, opts.allowedOperators ); return prompt.pipe(opts.llm).pipe(outputParser); }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/query_constructor/ir.ts
export * from "@langchain/core/structured_query";
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/query_constructor/prompt.ts
import { PromptTemplate } from "@langchain/core/prompts"; export const SONG_DATA_SOURCE = `\ \`\`\`json { "content": "Lyrics of a song", "attributes": { "artist": { "type": "string", "description": "Name of the song artist" }, "length": { "type": "integer", "description": "Length of the song in seconds" }, "genre": { "type": "string", "description": "The song genre, one of 'pop', 'rock' or 'rap'" } } } \`\`\`\ ` .replaceAll("{", "{{") .replaceAll("}", "}}"); export const FULL_ANSWER = `\ \`\`\`json {{ "query": "teenager love", "filter": "and(or(eq(\\"artist\\", \\"Taylor Swift\\"), eq(\\"artist\\", \\"Katy Perry\\")), \ lt(\\"length\\", 180), eq(\\"genre\\", \\"pop\\"))" }}`; export const NO_FILTER_ANSWER = `\ \`\`\`json {{ "query": "", "filter": "NO_FILTER" }} \`\`\`\ `; export const DEFAULT_EXAMPLES = [ { i: "1", data_source: SONG_DATA_SOURCE, user_query: "What are songs by Taylor Swift or Katy Perry about teenage romance under 3 minutes long in the dance pop genre", structured_request: FULL_ANSWER, }, { i: "2", data_source: SONG_DATA_SOURCE, user_query: "What are songs that were not published on Spotify", structured_request: NO_FILTER_ANSWER, }, ]; export const EXAMPLE_PROMPT_TEMPLATE = `\ << Example {i}. >> Data Source: {data_source} User Query: {user_query} Structured Request: {structured_request} `; export const EXAMPLE_PROMPT = /* #__PURE__ */ new PromptTemplate({ inputVariables: ["i", "data_source", "user_query", "structured_request"], template: EXAMPLE_PROMPT_TEMPLATE, }); export const DEFAULT_SCHEMA = `\ << Structured Request Schema >> When responding use a markdown code snippet with a JSON object formatted in the \ following schema: \`\`\`json {{{{ "query": string \\ text string to compare to document contents "filter": string \\ logical condition statement for filtering documents }}}} \`\`\` The query string should contain only text that is expected to match the contents of \ documents. Any conditions in the filter should not be mentioned in the query as well. A logical condition statement is composed of one or more comparison and logical \ operation statements. A comparison statement takes the form: \`comp(attr, val)\`: - \`comp\` ({allowed_comparators}): comparator - \`attr\` (string): name of attribute to apply the comparison to - \`val\` (string): is the comparison value A logical operation statement takes the form \`op(statement1, statement2, ...)\`: - \`op\` ({allowed_operators}): logical operator - \`statement1\`, \`statement2\`, ... (comparison statements or logical operation \ statements): one or more statements to apply the operation to Make sure that you only use the comparators and logical operators listed above and \ no others. Make sure that filters only refer to attributes that exist in the data source. Make sure that filters only use the attributed names with its function names if there are functions applied on them. Make sure that filters only use format \`YYYY-MM-DD\` when handling timestamp data typed values. Make sure that filters take into account the descriptions of attributes and only make \ comparisons that are feasible given the type of data being stored. Make sure that filters are only used as needed. If there are no filters that should be \ applied return "NO_FILTER" for the filter value.\ `; export const DEFAULT_PREFIX = `\ Your goal is to structure the user's query to match the request schema provided below. {schema}\ `; export const DEFAULT_SUFFIX = `\ << Example {i}. >> Data Source: \`\`\`json {{{{ "content": "{content}", "attributes": {attributes} }}}} \`\`\` User Query: {{query}} Structured Request: `;
0
lc_public_repos/langchainjs/langchain/src/chains/query_constructor
lc_public_repos/langchainjs/langchain/src/chains/query_constructor/tests/query_chain.int.test.ts
import { test } from "@jest/globals"; import { OpenAI } from "@langchain/openai"; import { loadQueryConstructorRunnable, AttributeInfo } from "../index.js"; import { Comparators, Comparison, Operation, Operators, StructuredQuery, } from "../ir.js"; import { BasicTranslator } from "../../../retrievers/self_query/base.js"; test("Query Chain Test", async () => { const sq1 = new StructuredQuery( "", new Comparison(Comparators.lt, "length", 90) ); const sq3 = new StructuredQuery( "", new Comparison(Comparators.gt, "rating", 8.5) ); const sq4 = new StructuredQuery( "", new Comparison(Comparators.eq, "director", "Greta Gerwig") ); const sq5 = new StructuredQuery( "", new Operation(Operators.and, [ new Operation(Operators.or, [ new Comparison(Comparators.eq, "genre", "comedy"), new Comparison(Comparators.eq, "genre", "drama"), ]), new Comparison(Comparators.lt, "length", 90), ]) ); const filter1 = { length: { $lt: 90 } }; const filter3 = { rating: { $gt: 8.5 } }; const filter4 = { director: { $eq: "Greta Gerwig" } }; const filter5 = { $and: [ { $or: [{ genre: { $eq: "comedy" } }, { genre: { $eq: "drama" } }] }, { length: { $lt: 90 } }, ], }; const attributeInfo: AttributeInfo[] = [ { name: "genre", description: "The genre of the movie", type: "string or array of strings", }, { name: "year", description: "The year the movie was released", type: "number", }, { name: "director", description: "The director of the movie", type: "string", }, { name: "rating", description: "The rating of the movie (1-10)", type: "number", }, { name: "length", description: "The length of the movie in minutes", type: "number", }, ]; const documentContents = "Brief summary of a movie"; const allowedComparators = Object.values(Comparators); const allowedOperators = Object.values(Operators); const llm = new OpenAI({ modelName: "gpt-3.5-turbo-instruct", temperature: 0, }); const queryChain = loadQueryConstructorRunnable({ llm, documentContents, attributeInfo, allowedComparators, allowedOperators, }); const c1 = queryChain.invoke({ query: "Which movies are less than 90 minutes?", }); const c3 = queryChain.invoke({ query: "Which movies are rated higher than 8.5?", }); const c4 = queryChain.invoke({ query: "Which movies are directed by Greta Gerwig?", }); const c5 = queryChain.invoke({ query: "Which movies are either comedy or drama and are less than 90 minutes?", }); const [r1, r3, r4, r5] = await Promise.all([c1, c3, c4, c5]); expect(r1).toMatchObject(sq1); expect(r3).toMatchObject(sq3); expect(r4).toMatchObject(sq4); expect(r5).toMatchObject(sq5); const testTranslator = new BasicTranslator(); const { filter: parsedFilter1 } = testTranslator.visitStructuredQuery(r1); const { filter: parsedFilter3 } = testTranslator.visitStructuredQuery(r3); const { filter: parsedFilter4 } = testTranslator.visitStructuredQuery(r4); const { filter: parsedFilter5 } = testTranslator.visitStructuredQuery(r5); expect(parsedFilter1).toMatchObject(filter1); expect(parsedFilter3).toMatchObject(filter3); expect(parsedFilter4).toMatchObject(filter4); expect(parsedFilter5).toMatchObject(filter5); });
0
lc_public_repos/langchainjs/langchain/src/chains/query_constructor
lc_public_repos/langchainjs/langchain/src/chains/query_constructor/tests/query_parser.test.ts
import { test } from "@jest/globals"; import { StructuredQueryOutputParser } from "../index.js"; import { Comparators, Comparison, Operation, Operators, StructuredQuery, } from "../ir.js"; const correctQuery = new StructuredQuery( "teenager love", new Operation(Operators.and, [ new Operation(Operators.or, [ new Comparison(Comparators.eq, "artist", "Taylor Swift"), new Comparison(Comparators.eq, "artist", "Katy Perry"), ]), new Comparison(Comparators.lt, "length", 180), new Comparison(Comparators.eq, "genre", "pop"), ]) ); test("StructuredQueryOutputParser test", async () => { const queryParser = StructuredQueryOutputParser.fromComponents( [ Comparators.eq, Comparators.gte, Comparators.gt, Comparators.lte, Comparators.lt, ], [Operators.and, Operators.or, Operators.not] ); const exampleOutput = `json\`\`\` { "query": "teenager love", "filter": "and(or(eq(\\"artist\\", \\"Taylor Swift\\"), eq(\\"artist\\", \\"Katy Perry\\")), lt(\\"length\\", 180), eq(\\"genre\\", \\"pop\\"))" } \`\`\``; const parsedOutput = await queryParser.parse(exampleOutput); expect(parsedOutput).toMatchObject(correctQuery); });
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/openai_functions/extraction.ts
import { z } from "zod"; import { zodToJsonSchema, JsonSchema7ObjectType } from "zod-to-json-schema"; import { BaseChatModel } from "@langchain/core/language_models/chat_models"; import { BaseFunctionCallOptions } from "@langchain/core/language_models/base"; import { PromptTemplate } from "@langchain/core/prompts"; import { FunctionParameters, JsonKeyOutputFunctionsParser, } from "../../output_parsers/openai_functions.js"; import { LLMChain } from "../llm_chain.js"; /** * Function that returns an array of extraction functions. These functions * are used to extract relevant information from a passage. * @param schema The schema of the function parameters. * @returns An array of extraction functions. */ function getExtractionFunctions(schema: FunctionParameters) { return [ { name: "information_extraction", description: "Extracts the relevant information from the passage.", parameters: { type: "object", properties: { info: { type: "array", items: { type: schema.type, properties: schema.properties, required: schema.required, }, }, }, required: ["info"], }, }, ]; } const _EXTRACTION_TEMPLATE = `Extract and save the relevant entities mentioned in the following passage together with their properties. Passage: {input} `; /** * Function that creates an extraction chain using the provided JSON schema. * It sets up the necessary components, such as the prompt, output parser, and tags. * @param schema JSON schema of the function parameters. * @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling. * @returns A LLMChain instance configured to return data matching the schema. */ export function createExtractionChain( schema: FunctionParameters, llm: BaseChatModel<BaseFunctionCallOptions> ) { const functions = getExtractionFunctions(schema); const prompt = PromptTemplate.fromTemplate(_EXTRACTION_TEMPLATE); const outputParser = new JsonKeyOutputFunctionsParser({ attrName: "info" }); return new LLMChain({ llm, prompt, llmKwargs: { functions }, outputParser, tags: ["openai_functions", "extraction"], }); } /** * Function that creates an extraction chain from a Zod schema. It * converts the Zod schema to a JSON schema using zod-to-json-schema * before creating the extraction chain. * @param schema The Zod schema which extracted data should match * @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling. * @returns A LLMChain instance configured to return data matching the schema. */ export function createExtractionChainFromZod( // eslint-disable-next-line @typescript-eslint/no-explicit-any schema: z.ZodObject<any, any, any, any>, llm: BaseChatModel<BaseFunctionCallOptions> ) { return createExtractionChain( zodToJsonSchema(schema) as JsonSchema7ObjectType, llm ); }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/openai_functions/openapi.ts
import type { OpenAIClient } from "@langchain/openai"; import { JsonSchema7ObjectType, JsonSchema7ArrayType, JsonSchema7Type, } from "zod-to-json-schema"; import type { OpenAPIV3_1 } from "openapi-types"; import { ChainValues } from "@langchain/core/utils/types"; import { ChatOpenAI } from "@langchain/openai"; import { BaseChatModel } from "@langchain/core/language_models/chat_models"; import { BaseFunctionCallOptions } from "@langchain/core/language_models/base"; import { ChatPromptTemplate, HumanMessagePromptTemplate, BasePromptTemplate, } from "@langchain/core/prompts"; import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager"; import { OpenAPISpec } from "../../util/openapi.js"; import { BaseChain } from "../base.js"; import { LLMChain, LLMChainInput } from "../llm_chain.js"; import { SequentialChain } from "../sequential_chain.js"; import { JsonOutputFunctionsParser } from "../../output_parsers/openai_functions.js"; /** * Type representing a function for executing OpenAPI requests. */ type OpenAPIExecutionMethod = ( name: string, // eslint-disable-next-line @typescript-eslint/no-explicit-any requestArgs: Record<string, any>, options?: { headers?: Record<string, string>; params?: Record<string, string>; } ) => Promise<string>; /** * Formats a URL by replacing path parameters with their corresponding * values. * @param url The URL to format. * @param pathParams The path parameters to replace in the URL. * @returns The formatted URL. */ function formatURL(url: string, pathParams: Record<string, string>): string { const expectedPathParamNames = [...url.matchAll(/{(.*?)}/g)].map( (match) => match[1] ); const newParams: Record<string, string> = {}; for (const paramName of expectedPathParamNames) { const cleanParamName = paramName.replace(/^\.;/, "").replace(/\*$/, ""); const value = pathParams[cleanParamName]; let formattedValue; if (Array.isArray(value)) { if (paramName.startsWith(".")) { const separator = paramName.endsWith("*") ? "." : ","; formattedValue = `.${value.join(separator)}`; } else if (paramName.startsWith(",")) { const separator = paramName.endsWith("*") ? `${cleanParamName}=` : ","; formattedValue = `${cleanParamName}=${value.join(separator)}`; } else { formattedValue = value.join(","); } } else if (typeof value === "object") { const kvSeparator = paramName.endsWith("*") ? "=" : ","; const kvStrings = Object.entries(value).map( ([k, v]) => k + kvSeparator + v ); let entrySeparator; if (paramName.startsWith(".")) { entrySeparator = "."; formattedValue = "."; } else if (paramName.startsWith(";")) { entrySeparator = ";"; formattedValue = ";"; } else { entrySeparator = ","; formattedValue = ""; } formattedValue += kvStrings.join(entrySeparator); } else { if (paramName.startsWith(".")) { formattedValue = `.${value}`; } else if (paramName.startsWith(";")) { formattedValue = `;${cleanParamName}=${value}`; } else { formattedValue = value; } } newParams[paramName] = formattedValue; } let formattedUrl = url; for (const [key, newValue] of Object.entries(newParams)) { formattedUrl = formattedUrl.replace(`{${key}}`, newValue); } return formattedUrl; } /** * Converts OpenAPI parameters to JSON schema format. * @param params The OpenAPI parameters to convert. * @param spec The OpenAPI specification that contains the parameters. * @returns The JSON schema representation of the OpenAPI parameters. */ function convertOpenAPIParamsToJSONSchema( params: OpenAPIV3_1.ParameterObject[], spec: OpenAPISpec ) { return params.reduce( (jsonSchema: JsonSchema7ObjectType, param) => { let schema; if (param.schema) { schema = spec.getSchema(param.schema); // eslint-disable-next-line no-param-reassign jsonSchema.properties[param.name] = convertOpenAPISchemaToJSONSchema( schema, spec ); } else if (param.content) { const mediaTypeSchema = Object.values(param.content)[0].schema; if (mediaTypeSchema) { schema = spec.getSchema(mediaTypeSchema); } if (!schema) { return jsonSchema; } if (schema.description === undefined) { schema.description = param.description ?? ""; } // eslint-disable-next-line no-param-reassign jsonSchema.properties[param.name] = convertOpenAPISchemaToJSONSchema( schema, spec ); } else { return jsonSchema; } if (param.required && Array.isArray(jsonSchema.required)) { jsonSchema.required.push(param.name); } return jsonSchema; }, { type: "object", properties: {}, required: [], additionalProperties: {}, } ); } // OpenAI throws errors on extraneous schema properties, e.g. if "required" is set on individual ones /** * Converts OpenAPI schemas to JSON schema format. * @param schema The OpenAPI schema to convert. * @param spec The OpenAPI specification that contains the schema. * @returns The JSON schema representation of the OpenAPI schema. */ export function convertOpenAPISchemaToJSONSchema( schema: OpenAPIV3_1.SchemaObject, spec: OpenAPISpec ): JsonSchema7Type { if (schema.type === "object") { return Object.keys(schema.properties ?? {}).reduce( (jsonSchema: JsonSchema7ObjectType, propertyName) => { if (!schema.properties) { return jsonSchema; } const openAPIProperty = spec.getSchema(schema.properties[propertyName]); if (openAPIProperty.type === undefined) { return jsonSchema; } // eslint-disable-next-line no-param-reassign jsonSchema.properties[propertyName] = convertOpenAPISchemaToJSONSchema( openAPIProperty, spec ); if ( (openAPIProperty.required || schema.required?.includes(propertyName)) && jsonSchema.required !== undefined ) { jsonSchema.required.push(propertyName); } return jsonSchema; }, { type: "object", properties: {}, required: [], additionalProperties: {}, } ); } if (schema.type === "array") { return { type: "array", items: convertOpenAPISchemaToJSONSchema(schema.items ?? {}, spec), minItems: schema.minItems, maxItems: schema.maxItems, } as JsonSchema7ArrayType; } return { type: schema.type ?? "string", } as JsonSchema7Type; } /** * Converts an OpenAPI specification to OpenAI functions. * @param spec The OpenAPI specification to convert. * @returns An object containing the OpenAI functions derived from the OpenAPI specification and a default execution method. */ function convertOpenAPISpecToOpenAIFunctions(spec: OpenAPISpec): { openAIFunctions: OpenAIClient.Chat.ChatCompletionCreateParams.Function[]; defaultExecutionMethod?: OpenAPIExecutionMethod; } { if (!spec.document.paths) { return { openAIFunctions: [] }; } const openAIFunctions = []; const nameToCallMap: Record<string, { method: string; url: string }> = {}; for (const path of Object.keys(spec.document.paths)) { const pathParameters = spec.getParametersForPath(path); for (const method of spec.getMethodsForPath(path)) { const operation = spec.getOperation(path, method); if (!operation) { return { openAIFunctions: [] }; } const operationParametersByLocation = pathParameters .concat(spec.getParametersForOperation(operation)) .reduce( ( operationParams: Record<string, OpenAPIV3_1.ParameterObject[]>, param ) => { if (!operationParams[param.in]) { // eslint-disable-next-line no-param-reassign operationParams[param.in] = []; } operationParams[param.in].push(param); return operationParams; }, {} ); const paramLocationToRequestArgNameMap: Record<string, string> = { query: "params", header: "headers", cookie: "cookies", path: "path_params", }; const requestArgsSchema: Record<string, JsonSchema7ObjectType> & { data?: | JsonSchema7ObjectType | { anyOf?: JsonSchema7ObjectType[]; }; } = {}; for (const paramLocation of Object.keys( paramLocationToRequestArgNameMap )) { if (operationParametersByLocation[paramLocation]) { requestArgsSchema[paramLocationToRequestArgNameMap[paramLocation]] = convertOpenAPIParamsToJSONSchema( operationParametersByLocation[paramLocation], spec ); } } const requestBody = spec.getRequestBodyForOperation(operation); if (requestBody?.content !== undefined) { const requestBodySchemas: Record<string, JsonSchema7ObjectType> = {}; for (const [mediaType, mediaTypeObject] of Object.entries( requestBody.content )) { if (mediaTypeObject.schema !== undefined) { const schema = spec.getSchema(mediaTypeObject.schema); requestBodySchemas[mediaType] = convertOpenAPISchemaToJSONSchema( schema, spec ) as JsonSchema7ObjectType; } } const mediaTypes = Object.keys(requestBodySchemas); if (mediaTypes.length === 1) { requestArgsSchema.data = requestBodySchemas[mediaTypes[0]]; } else if (mediaTypes.length > 1) { requestArgsSchema.data = { anyOf: Object.values(requestBodySchemas), }; } } const openAIFunction: OpenAIClient.Chat.ChatCompletionCreateParams.Function = { name: OpenAPISpec.getCleanedOperationId(operation, path, method), description: operation.description ?? operation.summary ?? "", parameters: { type: "object", properties: requestArgsSchema, // All remaining top-level parameters are required required: Object.keys(requestArgsSchema), }, }; openAIFunctions.push(openAIFunction); const baseUrl = (spec.baseUrl ?? "").endsWith("/") ? (spec.baseUrl ?? "").slice(0, -1) : spec.baseUrl ?? ""; nameToCallMap[openAIFunction.name] = { method, url: baseUrl + path, }; } } return { openAIFunctions, defaultExecutionMethod: async ( name: string, // eslint-disable-next-line @typescript-eslint/no-explicit-any requestArgs: Record<string, any>, options?: { headers?: Record<string, string>; params?: Record<string, string>; } ) => { const { headers: customHeaders, params: customParams, ...rest } = options ?? {}; const { method, url } = nameToCallMap[name]; const requestParams = requestArgs.params ?? {}; const nonEmptyParams = Object.keys(requestParams).reduce( // eslint-disable-next-line @typescript-eslint/no-explicit-any (filteredArgs: Record<string, any>, argName) => { if ( requestParams[argName] !== "" && requestParams[argName] !== null && requestParams[argName] !== undefined ) { // eslint-disable-next-line no-param-reassign filteredArgs[argName] = requestParams[argName]; } return filteredArgs; }, {} ); const queryString = new URLSearchParams({ ...nonEmptyParams, ...customParams, }).toString(); const pathParams = requestArgs.path_params; const formattedUrl = formatURL(url, pathParams) + (queryString.length ? `?${queryString}` : ""); const headers: Record<string, string> = {}; let body; if (requestArgs.data !== undefined) { let contentType = "text/plain"; if (typeof requestArgs.data !== "string") { if (typeof requestArgs.data === "object") { contentType = "application/json"; } body = JSON.stringify(requestArgs.data); } else { body = requestArgs.data; } headers["content-type"] = contentType; } const response = await fetch(formattedUrl, { ...requestArgs, method, headers: { ...headers, ...requestArgs.headers, ...customHeaders, }, body, ...rest, }); let output; if (response.status < 200 || response.status > 299) { output = `${response.status}: ${ response.statusText } for ${name} called with ${JSON.stringify(queryString)}`; } else { output = await response.text(); } return output; }, }; } /** * Type representing a function for executing simple requests. */ type SimpleRequestChainExecutionMethod = ( name: string, // eslint-disable-next-line @typescript-eslint/no-explicit-any requestArgs: Record<string, any> ) => Promise<string>; /** * A chain for making simple API requests. */ class SimpleRequestChain extends BaseChain { static lc_name() { return "SimpleRequestChain"; } private requestMethod: SimpleRequestChainExecutionMethod; inputKey = "function"; outputKey = "response"; constructor(config: { requestMethod: SimpleRequestChainExecutionMethod }) { super(); this.requestMethod = config.requestMethod; } get inputKeys() { return [this.inputKey]; } get outputKeys() { return [this.outputKey]; } _chainType() { return "simple_request_chain" as const; } /** @ignore */ async _call( values: ChainValues, _runManager?: CallbackManagerForChainRun ): Promise<ChainValues> { const inputKeyValue = values[this.inputKey]; const methodName = inputKeyValue.name; const args = inputKeyValue.arguments; const response = await this.requestMethod(methodName, args); return { [this.outputKey]: response }; } } /** * Type representing the options for creating an OpenAPI chain. */ export type OpenAPIChainOptions = { llm?: BaseChatModel<BaseFunctionCallOptions>; prompt?: BasePromptTemplate; requestChain?: BaseChain; llmChainInputs?: LLMChainInput; headers?: Record<string, string>; params?: Record<string, string>; verbose?: boolean; }; /** * Create a chain for querying an API from a OpenAPI spec. * @param spec OpenAPISpec or url/file/text string corresponding to one. * @param options Custom options passed into the chain * @returns OpenAPIChain */ export async function createOpenAPIChain( spec: OpenAPIV3_1.Document | string, options: OpenAPIChainOptions = {} ) { let convertedSpec; if (typeof spec === "string") { try { convertedSpec = await OpenAPISpec.fromURL(spec); } catch (e) { try { convertedSpec = OpenAPISpec.fromString(spec); } catch (e) { throw new Error(`Unable to parse spec from source ${spec}.`); } } } else { convertedSpec = OpenAPISpec.fromObject(spec); } const { openAIFunctions, defaultExecutionMethod } = convertOpenAPISpecToOpenAIFunctions(convertedSpec); if (defaultExecutionMethod === undefined) { throw new Error( `Could not parse any valid operations from the provided spec.` ); } const { llm = new ChatOpenAI({ modelName: "gpt-3.5-turbo-0613" }), prompt = ChatPromptTemplate.fromMessages([ HumanMessagePromptTemplate.fromTemplate( "Use the provided API's to respond to this user query:\n\n{query}" ), ]), requestChain = new SimpleRequestChain({ requestMethod: async (name, args) => defaultExecutionMethod(name, args, { headers: options.headers, params: options.params, }), }), llmChainInputs = {}, verbose, ...rest } = options; const formatChain = new LLMChain({ llm, prompt, outputParser: new JsonOutputFunctionsParser({ argsOnly: false }), outputKey: "function", llmKwargs: { functions: openAIFunctions }, ...llmChainInputs, }); return new SequentialChain({ chains: [formatChain, requestChain], outputVariables: ["response"], inputVariables: formatChain.inputKeys, verbose, ...rest, }); }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/openai_functions/tagging.ts
import { z } from "zod"; import { zodToJsonSchema, JsonSchema7ObjectType } from "zod-to-json-schema"; import { BaseChatModel } from "@langchain/core/language_models/chat_models"; import { BaseFunctionCallOptions } from "@langchain/core/language_models/base"; import { PromptTemplate } from "@langchain/core/prompts"; import { FunctionParameters, JsonOutputFunctionsParser, } from "../../output_parsers/openai_functions.js"; import { LLMChain, LLMChainInput } from "../llm_chain.js"; /** * Type representing the options for creating a tagging chain. */ export type TaggingChainOptions = { prompt?: PromptTemplate; } & Omit<LLMChainInput<object>, "prompt" | "llm">; /** * Function that returns an array of tagging functions. These functions * are used to extract relevant information from a passage. * @param schema The schema defining the structure of function parameters. * @returns An array of tagging functions. */ function getTaggingFunctions(schema: FunctionParameters) { return [ { name: "information_extraction", description: "Extracts the relevant information from the passage.", parameters: schema, }, ]; } const TAGGING_TEMPLATE = `Extract the desired information from the following passage. Passage: {input} `; /** * Function that creates a tagging chain using the provided schema, * LLM, and options. It constructs the LLM with the necessary * functions, prompt, output parser, and tags. * @param schema The schema defining the structure of function parameters. * @param llm LLM to use in the chain. Must support function calling. * @param options Options for creating the tagging chain. * @returns A new instance of LLMChain configured for tagging. * * @deprecated * Switch to expression language: https://js.langchain.com/docs/expression_language/ * Will be removed in 0.2.0 */ export function createTaggingChain( schema: FunctionParameters, llm: BaseChatModel<BaseFunctionCallOptions>, options: TaggingChainOptions = {} ) { const { prompt = PromptTemplate.fromTemplate(TAGGING_TEMPLATE), ...rest } = options; const functions = getTaggingFunctions(schema); const outputParser = new JsonOutputFunctionsParser(); return new LLMChain({ llm, prompt, llmKwargs: { functions }, outputParser, tags: ["openai_functions", "tagging"], ...rest, }); } /** * Function that creates a tagging chain from a Zod schema. It converts * the Zod schema to a JSON schema using the zodToJsonSchema function and * then calls createTaggingChain with the converted schema. * @param schema The Zod schema which extracted data should match. * @param llm LLM to use in the chain. Must support function calling. * @param options Options for creating the tagging chain. * @returns A new instance of LLMChain configured for tagging. * * @deprecated * Switch to expression language: https://js.langchain.com/docs/expression_language/ * Will be removed in 0.2.0 */ export function createTaggingChainFromZod( // eslint-disable-next-line @typescript-eslint/no-explicit-any schema: z.ZodObject<any, any, any, any>, llm: BaseChatModel<BaseFunctionCallOptions>, options?: TaggingChainOptions ) { return createTaggingChain( zodToJsonSchema(schema) as JsonSchema7ObjectType, llm, options ); }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/openai_functions/index.ts
export { createExtractionChain, createExtractionChainFromZod, } from "./extraction.js"; export { type TaggingChainOptions, createTaggingChain, createTaggingChainFromZod, } from "./tagging.js"; export { type OpenAPIChainOptions, createOpenAPIChain } from "./openapi.js"; export { type StructuredOutputChainInput, createStructuredOutputChain, createStructuredOutputChainFromZod, } from "./structured_output.js"; export { type CreateStructuredOutputRunnableConfig, createStructuredOutputRunnable, type CreateOpenAIFnRunnableConfig, createOpenAIFnRunnable, } from "./base.js";
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/openai_functions/structured_output.ts
import { z } from "zod"; import { zodToJsonSchema, JsonSchema7Type } from "zod-to-json-schema"; import { Validator } from "@langchain/core/utils/json_schema"; import { ChatOpenAI } from "@langchain/openai"; import { BasePromptTemplate } from "@langchain/core/prompts"; import { BaseLLMOutputParser, OutputParserException, } from "@langchain/core/output_parsers"; import { ChatGeneration } from "@langchain/core/outputs"; import type { BaseChatModel } from "@langchain/core/language_models/chat_models"; import type { BaseFunctionCallOptions } from "@langchain/core/language_models/base"; import { LLMChain, type LLMChainInput } from "../llm_chain.js"; import { OutputFunctionsParser } from "../../output_parsers/openai_functions.js"; /** * Type representing the input for creating a structured output chain. It * extends the LLMChainInput type and includes an additional * 'outputSchema' field representing the JSON schema for the expected * output. */ export type StructuredOutputChainInput< T extends z.AnyZodObject = z.AnyZodObject > = Omit<LLMChainInput, "outputParser" | "llm"> & { outputSchema?: JsonSchema7Type; prompt: BasePromptTemplate; llm?: BaseChatModel<BaseFunctionCallOptions>; zodSchema?: T; }; export type FunctionCallStructuredOutputParserFields< T extends z.AnyZodObject = z.AnyZodObject > = { jsonSchema?: JsonSchema7Type; zodSchema?: T; }; function isJsonSchema7Type( x: JsonSchema7Type | FunctionCallStructuredOutputParserFields ): x is JsonSchema7Type { return ( (x as FunctionCallStructuredOutputParserFields).jsonSchema === undefined && (x as FunctionCallStructuredOutputParserFields).zodSchema === undefined ); } /** * Class that extends the BaseLLMOutputParser class. It provides * functionality for parsing the structured output based on a JSON schema. */ export class FunctionCallStructuredOutputParser< T extends z.AnyZodObject > extends BaseLLMOutputParser<z.infer<T>> { lc_namespace = ["langchain", "chains", "openai_functions"]; protected functionOutputParser = new OutputFunctionsParser(); protected jsonSchemaValidator?: Validator; protected zodSchema?: T; constructor(fieldsOrSchema: JsonSchema7Type); constructor(fieldsOrSchema: FunctionCallStructuredOutputParserFields<T>); constructor( fieldsOrSchema: | JsonSchema7Type | FunctionCallStructuredOutputParserFields<T> ) { let fields; if (isJsonSchema7Type(fieldsOrSchema)) { fields = { jsonSchema: fieldsOrSchema }; } else { fields = fieldsOrSchema; } if (fields.jsonSchema === undefined && fields.zodSchema === undefined) { throw new Error( `Must provide at least one of "jsonSchema" or "zodSchema".` ); } super(fields); if (fields.jsonSchema !== undefined) { this.jsonSchemaValidator = new Validator(fields.jsonSchema, "7"); } if (fields.zodSchema !== undefined) { this.zodSchema = fields.zodSchema; } } /** * Method to parse the result of chat generations. It first parses the * result using the functionOutputParser, then parses the result against a * zod schema if the zod schema is available which allows the result to undergo * Zod preprocessing, then it parses that result against the JSON schema. * If the result is valid, it returns the parsed result. Otherwise, it throws * an OutputParserException. * @param generations Array of ChatGeneration instances to be parsed. * @returns The parsed result if it is valid according to the JSON schema. */ async parseResult(generations: ChatGeneration[]) { const initialResult = await this.functionOutputParser.parseResult( generations ); const parsedResult = JSON.parse(initialResult, (_, value) => { if (value === null) { return undefined; } return value; }); if (this.zodSchema) { const zodParsedResult = this.zodSchema.safeParse(parsedResult); if (zodParsedResult.success) { return zodParsedResult.data; } else { throw new OutputParserException( `Failed to parse. Text: "${initialResult}". Error: ${JSON.stringify( zodParsedResult.error.errors )}`, initialResult ); } } else if (this.jsonSchemaValidator !== undefined) { const result = this.jsonSchemaValidator.validate(parsedResult); if (result.valid) { return parsedResult; } else { throw new OutputParserException( `Failed to parse. Text: "${initialResult}". Error: ${JSON.stringify( result.errors )}`, initialResult ); } } else { throw new Error( "This parser requires an input JSON Schema or an input Zod schema." ); } } } /** * @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead * Create a chain that returns output matching a JSON Schema. * @param input Object that includes all LLMChainInput fields except "outputParser" * as well as an additional required "outputSchema" JSON Schema object. * @returns OpenAPIChain */ export function createStructuredOutputChain< T extends z.AnyZodObject = z.AnyZodObject >(input: StructuredOutputChainInput<T>) { const { outputSchema, llm = new ChatOpenAI({ modelName: "gpt-3.5-turbo-0613", temperature: 0 }), outputKey = "output", llmKwargs = {}, zodSchema, ...rest } = input; if (outputSchema === undefined && zodSchema === undefined) { throw new Error(`Must provide one of "outputSchema" or "zodSchema".`); } const functionName = "output_formatter"; return new LLMChain({ llm, llmKwargs: { ...llmKwargs, functions: [ { name: functionName, description: `Output formatter. Should always be used to format your response to the user.`, parameters: outputSchema, }, ], function_call: { name: functionName, }, }, outputKey, outputParser: new FunctionCallStructuredOutputParser<T>({ jsonSchema: outputSchema, zodSchema, }), ...rest, }); } /** @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead */ export function createStructuredOutputChainFromZod<T extends z.AnyZodObject>( zodSchema: T, input: Omit<StructuredOutputChainInput<T>, "outputSchema"> ) { return createStructuredOutputChain<T>({ ...input, outputSchema: zodToJsonSchema(zodSchema), zodSchema, }); }
0
lc_public_repos/langchainjs/langchain/src/chains
lc_public_repos/langchainjs/langchain/src/chains/openai_functions/base.ts
import type { z } from "zod"; import { zodToJsonSchema, JsonSchema7Type } from "zod-to-json-schema"; import type { BaseOutputParser } from "@langchain/core/output_parsers"; import type { BasePromptTemplate } from "@langchain/core/prompts"; import type { Runnable, RunnableInterface } from "@langchain/core/runnables"; import type { BaseFunctionCallOptions, BaseLanguageModelInput, FunctionDefinition, } from "@langchain/core/language_models/base"; import type { InputValues } from "@langchain/core/utils/types"; import type { BaseMessage } from "@langchain/core/messages"; import { JsonOutputFunctionsParser } from "../../output_parsers/openai_functions.js"; /** * Configuration params for the createOpenAIFnRunnable method. */ export type CreateOpenAIFnRunnableConfig< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends Record<string, any>, RunOutput > = { functions: FunctionDefinition[]; /** Language model to use, assumed to support the OpenAI function-calling API. */ llm: RunnableInterface< BaseLanguageModelInput, BaseMessage, BaseFunctionCallOptions >; /** BasePromptTemplate to pass to the model. */ prompt: BasePromptTemplate<InputValues<Extract<keyof RunInput, string>>>; /** * Only used if a single function is passed in. If `true`, then the model will be * forced to use the given function. If `false`, then the model will be given the * option to use the given function or not. */ enforceSingleFunctionUsage?: boolean; /** * BaseLLMOutputParser to use for parsing model outputs. * By default will be inferred from the function types. */ outputParser?: BaseOutputParser<RunOutput>; }; /** * Creates a runnable sequence that calls OpenAI functions. * @param config - The parameters required to create the runnable. * @returns A runnable sequence that will pass the given functions to the model when run. * * @example * ```typescript * const openAIFunction = { * name: "get_person_details", * description: "Get details about a person", * parameters: { * title: "Person", * description: "Identifying information about a person.", * type: "object", * properties: { * name: { title: "Name", description: "The person's name", type: "string" }, * age: { title: "Age", description: "The person's age", type: "integer" }, * fav_food: { * title: "Fav Food", * description: "The person's favorite food", * type: "string", * }, * }, * required: ["name", "age"], * }, * }; * * const model = new ChatOpenAI(); * const prompt = ChatPromptTemplate.fromMessages([ * ["human", "Human description: {description}"], * ]); * const outputParser = new JsonOutputFunctionsParser(); * * const runnable = createOpenAIFnRunnable({ * functions: [openAIFunction], * llm: model, * prompt, * enforceSingleFunctionUsage: true, // Default is true * outputParser * }); * const response = await runnable.invoke({ * description: * "My name's John Doe and I'm 30 years old. My favorite kind of food are chocolate chip cookies.", * }); * * console.log(response); * * // { name: 'John Doe', age: 30, fav_food: 'chocolate chip cookies' } * ``` */ export function createOpenAIFnRunnable< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends Record<string, any> = Record<string, any>, // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( config: CreateOpenAIFnRunnableConfig<RunInput, RunOutput> ): Runnable<RunInput, RunOutput> { const { functions, llm, prompt, enforceSingleFunctionUsage = true, outputParser = new JsonOutputFunctionsParser<RunOutput>(), } = config; const llmKwargs: BaseFunctionCallOptions = { functions, }; if (functions.length === 1 && enforceSingleFunctionUsage) { llmKwargs.function_call = { name: functions[0].name, }; } const llmWithKwargs = (llm as Runnable).bind(llmKwargs); return prompt.pipe(llmWithKwargs).pipe(outputParser); } function isZodSchema( schema: z.AnyZodObject | JsonSchema7Type ): schema is z.AnyZodObject { return typeof (schema as z.AnyZodObject).safeParse === "function"; } /** * Configuration params for the createStructuredOutputRunnable method. */ export type CreateStructuredOutputRunnableConfig< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends Record<string, any>, RunOutput > = { /** * Schema to output. Must be either valid JSONSchema or a Zod schema. */ outputSchema: z.AnyZodObject | JsonSchema7Type; /** * Language model to use, assumed to support the OpenAI function-calling API. */ llm: RunnableInterface< BaseLanguageModelInput, BaseMessage, BaseFunctionCallOptions >; /** BasePromptTemplate to pass to the model. */ prompt: BasePromptTemplate<InputValues<Extract<keyof RunInput, string>>>; /** * BaseLLMOutputParser to use for parsing model outputs. */ outputParser?: BaseOutputParser<RunOutput>; }; /** * @deprecated Prefer the `.withStructuredOutput` method on chat model classes. * * Create a runnable that uses an OpenAI function to get a structured output. * @param config Params required to create the runnable. * @returns A runnable sequence that will pass the given function to the model when run. * * @example * ```typescript * import { createStructuredOutputRunnable } from "langchain/chains/openai_functions"; * import { ChatOpenAI } from "@langchain/openai"; * import { ChatPromptTemplate } from "@langchain/core/prompts"; * import { JsonOutputFunctionsParser } from "langchain/output_parsers"; * * const jsonSchema = { * title: "Person", * description: "Identifying information about a person.", * type: "object", * properties: { * name: { title: "Name", description: "The person's name", type: "string" }, * age: { title: "Age", description: "The person's age", type: "integer" }, * fav_food: { * title: "Fav Food", * description: "The person's favorite food", * type: "string", * }, * }, * required: ["name", "age"], * }; * * const model = new ChatOpenAI(); * const prompt = ChatPromptTemplate.fromMessages([ * ["human", "Human description: {description}"], * ]); * * const outputParser = new JsonOutputFunctionsParser(); * * // Also works with Zod schema * const runnable = createStructuredOutputRunnable({ * outputSchema: jsonSchema, * llm: model, * prompt, * outputParser * }); * * const response = await runnable.invoke({ * description: * "My name's John Doe and I'm 30 years old. My favorite kind of food are chocolate chip cookies.", * }); * * console.log(response); * * // { name: 'John Doe', age: 30, fav_food: 'chocolate chip cookies' } * ``` */ export function createStructuredOutputRunnable< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunInput extends Record<string, any> = Record<string, any>, // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( config: CreateStructuredOutputRunnableConfig<RunInput, RunOutput> ): Runnable<RunInput, RunOutput> { const { outputSchema, llm, prompt, outputParser } = config; const jsonSchema = isZodSchema(outputSchema) ? zodToJsonSchema(outputSchema) : outputSchema; const oaiFunction: FunctionDefinition = { name: "outputFormatter", description: "Output formatter. Should always be used to format your response to the user", parameters: jsonSchema, }; return createOpenAIFnRunnable({ functions: [oaiFunction], llm, prompt, enforceSingleFunctionUsage: true, outputParser, }); }
0
lc_public_repos/langchainjs/langchain/src/chains/openai_functions
lc_public_repos/langchainjs/langchain/src/chains/openai_functions/tests/create_runnable_chains.int.test.ts
import { z } from "zod"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { ChatOpenAI } from "@langchain/openai"; import { expect, test } from "@jest/globals"; import { JsonOutputFunctionsParser } from "../../../output_parsers/openai_functions.js"; import { createOpenAIFnRunnable, createStructuredOutputRunnable, } from "../base.js"; const personJSONSchema = { title: "Person", description: "Identifying information about a person.", type: "object", properties: { name: { title: "Name", description: "The person's name", type: "string" }, age: { title: "Age", description: "The person's age", type: "integer" }, fav_food: { title: "Fav Food", description: "The person's favorite food", type: "string", }, }, required: ["name", "age"], }; const personDetailsFunction = { name: "get_person_details", description: "Get details about a person", parameters: personJSONSchema, }; const weatherFunction = { name: "get_weather", description: "Get the weather for a location", parameters: { title: "Location", description: "The location to get the weather for.", type: "object", properties: { state: { title: "State", description: "The location's state", type: "string", }, city: { title: "City", description: "The location's city", type: "string", }, zip_code: { title: "Zip Code", description: "The locations's zip code", type: "string", }, }, required: ["state", "city"], }, }; test("createStructuredOutputRunnable works with Zod", async () => { const model = new ChatOpenAI(); const prompt = ChatPromptTemplate.fromMessages<{ description: string }>([ ["human", "Human description: {description}"], ]); const zodSchema = z.object({ person: z.object({ name: z.string(), age: z.string(), fav_food: z.optional(z.string()), }), }); const outputParser = new JsonOutputFunctionsParser<{ person: { name: string; age: number; fav_food?: string; }; }>(); const runnable = createStructuredOutputRunnable({ outputSchema: zodSchema, llm: model, prompt, outputParser, }); const response = await runnable.invoke({ description: "My name's John Doe and I'm 30 years old. My favorite kind of food are chocolate chip cookies.", }); // console.log(response); expect("person" in response).toBe(true); expect("name" in response.person).toBe(true); expect("age" in response.person).toBe(true); }); test("createStructuredOutputRunnable works with JSON schema", async () => { const model = new ChatOpenAI(); const prompt = ChatPromptTemplate.fromMessages<{ description: string }>([ ["human", "Human description: {description}"], ]); const outputParser = new JsonOutputFunctionsParser<{ name: string; age: number; fav_food?: string; }>(); const runnable = createStructuredOutputRunnable({ outputSchema: personJSONSchema, llm: model, prompt, outputParser, }); const response = await runnable.invoke({ description: "My name's John Doe and I'm 30 years old. My favorite kind of food are chocolate chip cookies.", }); // console.log(response); expect("name" in response).toBe(true); expect("age" in response).toBe(true); }); test("createOpenAIFnRunnable works", async () => { const model = new ChatOpenAI(); const prompt = ChatPromptTemplate.fromMessages<{ description: string }>([ ["human", "Human description: {description}"], ]); const outputParser = new JsonOutputFunctionsParser<{ name: string; age: number; fav_food?: string; }>(); const runnable = createOpenAIFnRunnable({ functions: [personDetailsFunction], llm: model, prompt, enforceSingleFunctionUsage: true, // Default is true outputParser, }); const response = await runnable.invoke({ description: "My name's John Doe and I'm 30 years old. My favorite kind of food are chocolate chip cookies.", }); // console.log(response); expect("name" in response).toBe(true); expect("age" in response).toBe(true); }); test("createOpenAIFnRunnable works with multiple functions", async () => { const model = new ChatOpenAI(); const prompt = ChatPromptTemplate.fromMessages<{ question: string }>([ ["human", "Question: {question}"], ]); const outputParser = new JsonOutputFunctionsParser<{ state: string; city: number; zip?: number; }>(); const runnable = createOpenAIFnRunnable({ functions: [personDetailsFunction, weatherFunction], llm: model, prompt, enforceSingleFunctionUsage: false, // Default is true outputParser, }); const response = await runnable.invoke({ question: "What's the weather like in Berkeley CA?", }); // console.log(response); expect("state" in response).toBe(true); expect("city" in response).toBe(true); });
0
lc_public_repos/langchainjs/langchain/src/chains/openai_functions
lc_public_repos/langchainjs/langchain/src/chains/openai_functions/tests/structured_output.int.test.ts
import { test, expect } from "@jest/globals"; import { z } from "zod"; import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, } from "@langchain/core/prompts"; import { createStructuredOutputChainFromZod } from "../structured_output.js"; test("structured output chain", async () => { const chain = createStructuredOutputChainFromZod( z.object({ name: z.string().describe("Human name"), surname: z.string().describe("Human surname"), age: z.number().describe("Human age"), appearance: z.string().describe("Human appearance description"), shortBio: z.string().describe("Short bio secription"), university: z.string().optional().describe("University name if attended"), gender: z.string().describe("Gender of the human"), interests: z .array(z.string()) .describe("json array of strings human interests"), }), { prompt: new ChatPromptTemplate({ promptMessages: [ SystemMessagePromptTemplate.fromTemplate( "Generate details of a hypothetical person." ), HumanMessagePromptTemplate.fromTemplate( "Person description: {inputText}" ), ], inputVariables: ["inputText"], }), llm: new ChatOpenAI({ modelName: "gpt-3.5-turbo-0613", temperature: 0 }), outputKey: "person", } ); const response = await chain.call({ inputText: "A man, living in Poland." }); // console.log("response", response); expect(response.person).toHaveProperty("name"); expect(response.person).toHaveProperty("surname"); expect(response.person).toHaveProperty("age"); expect(response.person).toHaveProperty("appearance"); expect(response.person).toHaveProperty("shortBio"); expect(response.person).toHaveProperty("age"); expect(response.person).toHaveProperty("gender"); expect(response.person).toHaveProperty("interests"); expect(response.person.interests.length).toBeGreaterThan(0); });
0
lc_public_repos/langchainjs/langchain/src/chains/openai_functions
lc_public_repos/langchainjs/langchain/src/chains/openai_functions/tests/structured_output.test.ts
import { test, expect } from "@jest/globals"; import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { AIMessage } from "@langchain/core/messages"; import { FunctionCallStructuredOutputParser } from "../structured_output.js"; test("structured output parser", async () => { const parser = new FunctionCallStructuredOutputParser( zodToJsonSchema( z.object({ name: z.string().describe("Human name"), surname: z.string().describe("Human surname"), age: z.number().describe("Human age"), appearance: z.string().describe("Human appearance description"), shortBio: z.string().describe("Short bio secription"), university: z .string() .optional() .describe("University name if attended"), gender: z.string().describe("Gender of the human"), interests: z .array(z.string()) .describe("json array of strings human interests"), }) ) ); const result = await parser.parseResult([ { text: "", message: new AIMessage({ content: "", additional_kwargs: { function_call: { name: "", arguments: JSON.stringify({ name: "Anna", surname: "Kowalska", age: 30, appearance: "Anna has shoulder-length brown hair and green eyes. She has a slim build and stands at around 5'6\" tall.", shortBio: "Anna is a kind and compassionate person who loves to help others. She works as a nurse at a local hospital in Poland. In her free time, she enjoys reading, cooking, and spending time with her friends and family. Anna is also passionate about traveling and exploring new places.", university: null, gender: "female", interests: ["reading", "cooking", "traveling"], }), }, }, }), }, ]); // console.log("result", result); expect(result.name).toEqual("Anna"); expect(result.surname).toEqual("Kowalska"); expect(result.age).toEqual(30); expect(result).toHaveProperty("appearance"); expect(result).toHaveProperty("shortBio"); expect(result).not.toHaveProperty("university"); expect(result.gender).toEqual("female"); expect(result.interests.length).toEqual(3); }); test("structured output parser with Zod input", async () => { const parser = new FunctionCallStructuredOutputParser({ zodSchema: z.object({ name: z.string().describe("Human name"), surname: z.string().describe("Human surname"), age: z.number().describe("Human age"), appearance: z.string().describe("Human appearance description"), shortBio: z.string().describe("Short bio secription"), university: z.string().optional().describe("University name if attended"), gender: z.string().describe("Gender of the human"), interests: z .array(z.string()) .describe("json array of strings human interests"), }), }); const result = await parser.parseResult([ { text: "", message: new AIMessage({ content: "", additional_kwargs: { function_call: { name: "", arguments: JSON.stringify({ name: "Anna", surname: "Kowalska", age: 30, appearance: "Anna has shoulder-length brown hair and green eyes. She has a slim build and stands at around 5'6\" tall.", shortBio: "Anna is a kind and compassionate person who loves to help others. She works as a nurse at a local hospital in Poland. In her free time, she enjoys reading, cooking, and spending time with her friends and family. Anna is also passionate about traveling and exploring new places.", university: null, gender: "female", interests: ["reading", "cooking", "traveling"], }), }, }, }), }, ]); // console.log("result", result); expect(result.name).toEqual("Anna"); expect(result.surname).toEqual("Kowalska"); expect(result.age).toEqual(30); expect(result).toHaveProperty("appearance"); expect(result).toHaveProperty("shortBio"); expect(result).not.toHaveProperty("university"); expect(result.gender).toEqual("female"); expect(result.interests.length).toEqual(3); });
0
lc_public_repos/langchainjs/langchain/src/chains/openai_functions
lc_public_repos/langchainjs/langchain/src/chains/openai_functions/tests/extraction.int.test.ts
import { test, expect } from "@jest/globals"; import { z } from "zod"; import { ChatOpenAI } from "@langchain/openai"; import { createExtractionChainFromZod } from "../extraction.js"; test("extraction chain", async () => { const chain = createExtractionChainFromZod( z.object({ "person-name": z.string().optional(), "person-age": z.number().optional(), "person-hair_color": z.string().optional(), "dog-name": z.string().optional(), "dog-breed": z.string().optional(), }), new ChatOpenAI({ modelName: "gpt-3.5-turbo-0613", temperature: 0 }) ); const result = await chain.run(`Alex is 5 feet tall. Claudia is 4 feet taller Alex and jumps higher than him. Claudia is a brunette and Alex is blonde. Alex's dog Frosty is a labrador and likes to play hide and seek.`); expect(result).toMatchInlineSnapshot(` [ { "dog-breed": "labrador", "dog-name": "Frosty", "person-age": 0, "person-hair_color": "blonde", "person-name": "Alex", }, { "dog-breed": "", "dog-name": "", "person-age": 0, "person-hair_color": "brunette", "person-name": "Claudia", }, ] `); });
0
lc_public_repos/langchainjs/langchain/src/chains/openai_functions
lc_public_repos/langchainjs/langchain/src/chains/openai_functions/tests/openapi.int.test.ts
import { test } from "@jest/globals"; import { ChatOpenAI } from "@langchain/openai"; import { createOpenAPIChain } from "../openapi.js"; test("OpenAPI chain with a provided full spec", async () => { const chain = await createOpenAPIChain( { openapi: "3.0.1", info: { version: "v0", title: "Open AI Klarna product Api" }, servers: [{ url: "https://www.klarna.com/us/shopping" }], tags: [ { name: "open-ai-product-endpoint", description: "Open AI Product Endpoint. Query for products.", }, ], paths: { "/public/openai/v0/products": { get: { tags: ["open-ai-product-endpoint"], summary: "API for fetching Klarna product information", operationId: "productsUsingGET", parameters: [ { name: "countryCode", in: "query", description: "ISO 3166 country code with 2 characters based on the user location. Currently, only US, GB, DE, SE and DK are supported.", required: true, schema: { type: "string" }, }, { name: "q", in: "query", description: "A precise query that matches one very small category or product that needs to be searched for to find the products the user is looking for. If the user explicitly stated what they want, use that as a query. The query is as specific as possible to the product name or category mentioned by the user in its singular form, and don't contain any clarifiers like latest, newest, cheapest, budget, premium, expensive or similar. The query is always taken from the latest topic, if there is a new topic a new query is started. If the user speaks another language than English, translate their request into English (example: translate fia med knuff to ludo board game)!", required: true, schema: { type: "string" }, }, { name: "size", in: "query", description: "number of products returned", required: false, schema: { type: "integer" }, }, { name: "min_price", in: "query", description: "(Optional) Minimum price in local currency for the product searched for. Either explicitly stated by the user or implicitly inferred from a combination of the user's request and the kind of product searched for.", required: false, schema: { type: "integer" }, }, { name: "max_price", in: "query", description: "(Optional) Maximum price in local currency for the product searched for. Either explicitly stated by the user or implicitly inferred from a combination of the user's request and the kind of product searched for.", required: false, schema: { type: "integer" }, }, ], responses: { "200": { description: "Products found", content: { "application/json": { schema: { $ref: "#/components/schemas/ProductResponse" }, }, }, }, "503": { description: "one or more services are unavailable" }, }, deprecated: false, }, }, }, components: { schemas: { Product: { type: "object", properties: { attributes: { type: "array", items: { type: "string" } }, name: { type: "string" }, price: { type: "string" }, url: { type: "string" }, }, title: "Product", }, ProductResponse: { type: "object", properties: { products: { type: "array", items: { $ref: "#/components/schemas/Product" }, }, }, title: "ProductResponse", }, }, }, }, { llm: new ChatOpenAI({ modelName: "gpt-4-0613", temperature: 0 }) } ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const result = await chain.run( `What are some options for a men's large blue button down shirt` ); // console.log(result); }); test("OpenAPI chain with yml spec from a URL", async () => { const chain = await createOpenAPIChain( "https://gist.githubusercontent.com/roaldnefs/053e505b2b7a807290908fe9aa3e1f00/raw/0a212622ebfef501163f91e23803552411ed00e4/openapi.yaml", { llm: new ChatOpenAI({ modelName: "gpt-4-0613", temperature: 0 }), } ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const result = await chain.run(`What's today's comic?`); // console.log(result); }); test("OpenAPI chain with yml spec from a URL with a path parameter", async () => { const chain = await createOpenAPIChain( "https://gist.githubusercontent.com/roaldnefs/053e505b2b7a807290908fe9aa3e1f00/raw/0a212622ebfef501163f91e23803552411ed00e4/openapi.yaml", { llm: new ChatOpenAI({ modelName: "gpt-4-0613", temperature: 0 }), } ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const result = await chain.run(`What comic has id 2184?`); // console.log(result); }); test("OpenAPI chain with yml spec from a URL requiring a POST request", async () => { const chain = await createOpenAPIChain("https://api.speak.com/openapi.yaml", { llm: new ChatOpenAI({ modelName: "gpt-4-0613", temperature: 0 }), }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const result = await chain.run(`How would you say no thanks in Russian?`); // console.log(result); }); test("OpenAPI chain with a longer spec and tricky query required params", async () => { const chain = await createOpenAPIChain( "https://scholar-ai.net/openapi.yaml", { params: { sort: "cited_by_count", }, } ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const result = await chain.run( "Can you find and explain some articles about the intersection of AI and VR?" ); // console.log(result); });
0
lc_public_repos/langchainjs/langchain/src/chains/openai_functions
lc_public_repos/langchainjs/langchain/src/chains/openai_functions/tests/tagging.int.test.ts
import { test, expect } from "@jest/globals"; import { ChatOpenAI } from "@langchain/openai"; import { createTaggingChain } from "../tagging.js"; test("tagging chain", async () => { const chain = createTaggingChain( { type: "object", properties: { sentiment: { type: "string" }, tone: { type: "string" }, language: { type: "string" }, }, required: ["tone"], }, new ChatOpenAI({ modelName: "gpt-4-0613", temperature: 0 }) ); const result = await chain.run( `Estoy increiblemente contento de haberte conocido! Creo que seremos muy buenos amigos!` ); expect(result).toMatchInlineSnapshot(` { "language": "Spanish", "tone": "positive", } `); });
0
lc_public_repos/langchainjs/langchain/src/chains/openai_functions
lc_public_repos/langchainjs/langchain/src/chains/openai_functions/tests/openapi.test.ts
import { test, expect } from "@jest/globals"; import { OpenAPIV3, OpenAPIV3_1 } from "openapi-types"; import { JsonSchema7StringType, JsonSchema7NumberType, JsonSchema7ObjectType, JsonSchema7ArrayType, JsonSchema7Type, } from "zod-to-json-schema"; import { OpenAPISpec } from "../../../util/openapi.js"; import { convertOpenAPISchemaToJSONSchema } from "../openapi.js"; test("Test convert OpenAPI params to JSON Schema", async () => { const spec = new OpenAPISpec({ openapi: "3.1.0", info: { title: "A fake spec for testing", version: "0.0.1", }, paths: { "/widgets": { post: { operationId: "createWidget", description: "Create a widget", parameters: [ { name: "stringParam", in: "query", schema: { type: "string", }, }, { name: "objectParam", in: "query", schema: { type: "object", properties: { foo: { type: "string", }, bar: { type: "number", }, }, }, }, { name: "objectParamWithRequiredFields", in: "query", schema: { type: "object", required: ["fooRequired"], properties: { fooRequired: { type: "string", }, }, }, }, { name: "stringArrayParam", in: "query", schema: { type: "array", items: { type: "string", }, }, }, { name: "nestedObjectInArrayParam", in: "query", schema: { type: "array", items: { type: "object", properties: { baz: { type: "number", }, }, }, }, }, { name: "nestedArrayInObjectParam", in: "query", schema: { type: "object", properties: { qux: { type: "array", items: { type: "string", }, }, }, }, }, { name: "inceptionParam", in: "query", schema: { type: "array", items: { type: "object", properties: { nestedArray: { type: "array", items: { type: "object", properties: { nestedObject: { type: "object", properties: { inception: { type: "number", }, }, }, }, }, }, }, }, }, }, ], responses: { "200": { description: "OK", content: { "application/json": { schema: { properties: { success: { type: "boolean", }, }, }, }, }, }, }, }, }, }, }); const createWidget = spec.getOperation( "/widgets", OpenAPIV3.HttpMethods.POST ); expect(createWidget).not.toBeUndefined(); if (!createWidget) { throw new Error(`Operation not found`); } function getParamSchema( operation: OpenAPIV3_1.OperationObject, paramName: string ) { const param = spec .getParametersForOperation(operation) .find((param) => param.name === paramName); if (!param) { throw new Error(`Param not found`); } if (!param.schema) { throw new Error(`Param schema not found`); } return spec.getSchema(param.schema); } type TypeMap = { string: JsonSchema7StringType; number: JsonSchema7NumberType; object: JsonSchema7ObjectType; array: JsonSchema7ArrayType; }; function expectType<T extends keyof TypeMap>( type: T, schema: JsonSchema7Type | undefined ): TypeMap[T] { if (!schema || !("type" in schema)) { throw new Error(`Schema has no type`); } if (schema.type !== type) { throw new Error(`Unexpected type: ${schema.type}`); } return schema as TypeMap[T]; } const stringParamSchema = convertOpenAPISchemaToJSONSchema( getParamSchema(createWidget, "stringParam"), spec ); expectType("string", stringParamSchema); const objectParamSchema = convertOpenAPISchemaToJSONSchema( getParamSchema(createWidget, "objectParam"), spec ); const typedObjectParamSchema = expectType("object", objectParamSchema); expectType("string", typedObjectParamSchema.properties.foo); expectType("number", typedObjectParamSchema.properties.bar); const objectParamWithRequiredFieldSchema = convertOpenAPISchemaToJSONSchema( getParamSchema(createWidget, "objectParamWithRequiredFields"), spec ) as JsonSchema7ObjectType; expect(objectParamWithRequiredFieldSchema.required).toContain("fooRequired"); const stringArrayParamSchema = convertOpenAPISchemaToJSONSchema( getParamSchema(createWidget, "stringArrayParam"), spec ); const typedStringArrayParamSchema = expectType( "array", stringArrayParamSchema ); expect(typedStringArrayParamSchema.items).not.toBeUndefined(); expectType("string", typedStringArrayParamSchema.items); const nestedObjectInArrayParamSchema = convertOpenAPISchemaToJSONSchema( getParamSchema(createWidget, "nestedObjectInArrayParam"), spec ); expectType( "number", expectType( "object", expectType("array", nestedObjectInArrayParamSchema).items ).properties.baz ); const nestedArrayInObjectParamSchema = convertOpenAPISchemaToJSONSchema( getParamSchema(createWidget, "nestedArrayInObjectParam"), spec ); expectType( "string", expectType( "array", expectType("object", nestedArrayInObjectParamSchema).properties.qux ).items ); const inceptionParamSchema = convertOpenAPISchemaToJSONSchema( getParamSchema(createWidget, "inceptionParam"), spec ); expectType( "number", expectType( "object", expectType( "object", expectType( "array", expectType("object", expectType("array", inceptionParamSchema).items) .properties.nestedArray ).items ).properties.nestedObject ).properties.inception ); });
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/memory/entity_memory.ts
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import { PromptTemplate } from "@langchain/core/prompts"; import { InputValues, MemoryVariables, OutputValues, getPromptInputKey, } from "@langchain/core/memory"; import { getBufferString } from "@langchain/core/messages"; import { InMemoryEntityStore } from "./stores/entity/in_memory.js"; import { LLMChain } from "../chains/llm_chain.js"; import { ENTITY_EXTRACTION_PROMPT, ENTITY_SUMMARIZATION_PROMPT, } from "./prompt.js"; import { BaseEntityStore } from "./stores/entity/base.js"; import { BaseChatMemory, BaseChatMemoryInput } from "./chat_memory.js"; /** * Interface for the input parameters required by the EntityMemory class. */ export interface EntityMemoryInput extends BaseChatMemoryInput { llm: BaseLanguageModelInterface; humanPrefix?: string; aiPrefix?: string; entityExtractionPrompt?: PromptTemplate; entitySummarizationPrompt?: PromptTemplate; entityCache?: string[]; k?: number; chatHistoryKey?: string; entitiesKey?: string; entityStore?: BaseEntityStore; } // Entity extractor & summarizer to memory. /** * Class for managing entity extraction and summarization to memory in * chatbot applications. Extends the BaseChatMemory class and implements * the EntityMemoryInput interface. * @example * ```typescript * const memory = new EntityMemory({ * llm: new ChatOpenAI({ temperature: 0 }), * chatHistoryKey: "history", * entitiesKey: "entities", * }); * const model = new ChatOpenAI({ temperature: 0.9 }); * const chain = new LLMChain({ * llm: model, * prompt: ENTITY_MEMORY_CONVERSATION_TEMPLATE, * memory, * }); * * const res1 = await chain.call({ input: "Hi! I'm Jim." }); * console.log({ * res1, * memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), * }); * * const res2 = await chain.call({ * input: "I work in construction. What about you?", * }); * console.log({ * res2, * memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), * }); * * ``` */ export class EntityMemory extends BaseChatMemory implements EntityMemoryInput { private entityExtractionChain: LLMChain; private entitySummarizationChain: LLMChain; entityStore: BaseEntityStore; entityCache: string[] = []; k = 3; chatHistoryKey = "history"; llm: BaseLanguageModelInterface; entitiesKey = "entities"; humanPrefix?: string; aiPrefix?: string; constructor(fields: EntityMemoryInput) { super({ chatHistory: fields.chatHistory, returnMessages: fields.returnMessages ?? false, inputKey: fields.inputKey, outputKey: fields.outputKey, }); this.llm = fields.llm; this.humanPrefix = fields.humanPrefix; this.aiPrefix = fields.aiPrefix; this.chatHistoryKey = fields.chatHistoryKey ?? this.chatHistoryKey; this.entitiesKey = fields.entitiesKey ?? this.entitiesKey; this.entityExtractionChain = new LLMChain({ llm: this.llm, prompt: fields.entityExtractionPrompt ?? ENTITY_EXTRACTION_PROMPT, }); this.entitySummarizationChain = new LLMChain({ llm: this.llm, prompt: fields.entitySummarizationPrompt ?? ENTITY_SUMMARIZATION_PROMPT, }); this.entityStore = fields.entityStore ?? new InMemoryEntityStore(); this.entityCache = fields.entityCache ?? this.entityCache; this.k = fields.k ?? this.k; } get memoryKeys() { return [this.chatHistoryKey]; } // Will always return list of memory variables. get memoryVariables(): string[] { return [this.entitiesKey, this.chatHistoryKey]; } // Return history buffer. /** * Method to load memory variables and perform entity extraction. * @param inputs Input values for the method. * @returns Promise resolving to an object containing memory variables. */ async loadMemoryVariables(inputs: InputValues): Promise<MemoryVariables> { const promptInputKey = this.inputKey ?? getPromptInputKey(inputs, this.memoryVariables); const messages = await this.chatHistory.getMessages(); const serializedMessages = getBufferString( messages.slice(-this.k * 2), this.humanPrefix, this.aiPrefix ); const output = await this.entityExtractionChain.predict({ history: serializedMessages, input: inputs[promptInputKey], }); const entities: string[] = output.trim() === "NONE" ? [] : output.split(",").map((w) => w.trim()); const entitySummaries: { [key: string]: string | undefined } = {}; for (const entity of entities) { entitySummaries[entity] = await this.entityStore.get( entity, "No current information known." ); } this.entityCache = [...entities]; const buffer = this.returnMessages ? messages.slice(-this.k * 2) : serializedMessages; return { [this.chatHistoryKey]: buffer, [this.entitiesKey]: entitySummaries, }; } // Save context from this conversation to buffer. /** * Method to save the context from a conversation to a buffer and perform * entity summarization. * @param inputs Input values for the method. * @param outputs Output values from the method. * @returns Promise resolving to void. */ async saveContext(inputs: InputValues, outputs: OutputValues): Promise<void> { await super.saveContext(inputs, outputs); const promptInputKey = this.inputKey ?? getPromptInputKey(inputs, this.memoryVariables); const messages = await this.chatHistory.getMessages(); const serializedMessages = getBufferString( messages.slice(-this.k * 2), this.humanPrefix, this.aiPrefix ); const inputData = inputs[promptInputKey]; for (const entity of this.entityCache) { const existingSummary = await this.entityStore.get( entity, "No current information known." ); const output = await this.entitySummarizationChain.predict({ summary: existingSummary, entity, history: serializedMessages, input: inputData, }); if (output.trim() !== "UNCHANGED") { await this.entityStore.set(entity, output.trim()); } } } // Clear memory contents. /** * Method to clear the memory contents. * @returns Promise resolving to void. */ async clear() { await super.clear(); await this.entityStore.clear(); } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/memory/vector_store.ts
import type { VectorStoreRetrieverInterface } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; import { BaseMemory, getInputValue, InputValues, MemoryVariables, OutputValues, } from "@langchain/core/memory"; import { formatDocumentsAsString } from "../util/document.js"; type Metadata = Record<string, unknown>; type MetadataFunction = ( inputValues?: InputValues, outputValues?: OutputValues ) => Metadata; /** * Interface for the parameters required to initialize a * VectorStoreRetrieverMemory instance. */ export interface VectorStoreRetrieverMemoryParams { vectorStoreRetriever: VectorStoreRetrieverInterface; inputKey?: string; outputKey?: string; memoryKey?: string; returnDocs?: boolean; /** * Metadata to be added to the document when saving context. */ metadata?: Metadata | MetadataFunction; } /** * Class for managing long-term memory in Large Language Model (LLM) * applications. It provides a way to persist and retrieve relevant * documents from a vector store database, which can be useful for * maintaining conversation history or other types of memory in an LLM * application. * @example * ```typescript * const vectorStore = new MemoryVectorStore(new OpenAIEmbeddings()); * const memory = new VectorStoreRetrieverMemory({ * vectorStoreRetriever: vectorStore.asRetriever(1), * memoryKey: "history", * }); * * // Saving context to memory * await memory.saveContext( * { input: "My favorite food is pizza" }, * { output: "thats good to know" }, * ); * await memory.saveContext( * { input: "My favorite sport is soccer" }, * { output: "..." }, * ); * await memory.saveContext({ input: "I don't the Celtics" }, { output: "ok" }); * * // Loading memory variables * console.log( * await memory.loadMemoryVariables({ prompt: "what sport should i watch?" }), * ); * ``` */ export class VectorStoreRetrieverMemory extends BaseMemory implements VectorStoreRetrieverMemoryParams { vectorStoreRetriever: VectorStoreRetrieverInterface; inputKey?: string; memoryKey: string; returnDocs: boolean; metadata?: Metadata | MetadataFunction; constructor(fields: VectorStoreRetrieverMemoryParams) { super(); this.vectorStoreRetriever = fields.vectorStoreRetriever; this.inputKey = fields.inputKey; this.memoryKey = fields.memoryKey ?? "memory"; this.returnDocs = fields.returnDocs ?? false; this.metadata = fields.metadata; } get memoryKeys(): string[] { return [this.memoryKey]; } /** * Method to load memory variables. It uses the vectorStoreRetriever to * get relevant documents based on the query obtained from the input * values. * @param values An InputValues object. * @returns A Promise that resolves to a MemoryVariables object. */ async loadMemoryVariables(values: InputValues): Promise<MemoryVariables> { const query = getInputValue(values, this.inputKey); const results = await this.vectorStoreRetriever.getRelevantDocuments(query); return { [this.memoryKey]: this.returnDocs ? results : formatDocumentsAsString(results), }; } /** * Method to save context. It constructs a document from the input and * output values (excluding the memory key) and adds it to the vector * store database using the vectorStoreRetriever. * @param inputValues An InputValues object. * @param outputValues An OutputValues object. * @returns A Promise that resolves to void. */ async saveContext( inputValues: InputValues, outputValues: OutputValues ): Promise<void> { const metadata = typeof this.metadata === "function" ? this.metadata(inputValues, outputValues) : this.metadata; const text = Object.entries(inputValues) .filter(([k]) => k !== this.memoryKey) .concat(Object.entries(outputValues)) .map(([k, v]) => `${k}: ${v}`) .join("\n"); await this.vectorStoreRetriever.addDocuments([ new Document({ pageContent: text, metadata }), ]); } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/memory/summary_buffer.ts
import { getBufferString } from "@langchain/core/messages"; import { InputValues, MemoryVariables, OutputValues, } from "@langchain/core/memory"; import { BaseConversationSummaryMemory, BaseConversationSummaryMemoryInput, } from "./summary.js"; /** * Interface for the input parameters of the * ConversationSummaryBufferMemory class. */ export interface ConversationSummaryBufferMemoryInput extends BaseConversationSummaryMemoryInput { maxTokenLimit?: number; } /** * Class that extends BaseConversationSummaryMemory and implements * ConversationSummaryBufferMemoryInput. It manages the conversation * history in a LangChain application by maintaining a buffer of chat * messages and providing methods to load, save, prune, and clear the * memory. * @example * ```typescript * // Initialize the memory with a specific model and token limit * const memory = new ConversationSummaryBufferMemory({ * llm: new ChatOpenAI({ modelName: "gpt-3.5-turbo-instruct", temperature: 0 }), * maxTokenLimit: 10, * }); * * // Save conversation context to memory * await memory.saveContext({ input: "hi" }, { output: "whats up" }); * await memory.saveContext({ input: "not much you" }, { output: "not much" }); * * // Load the conversation history from memory * const history = await memory.loadMemoryVariables({}); * console.log({ history }); * * // Create a chat prompt using the conversation history * const chatPrompt = ChatPromptTemplate.fromMessages([ * SystemMessagePromptTemplate.fromTemplate( * "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.", * ), * new MessagesPlaceholder("history"), * HumanMessagePromptTemplate.fromTemplate("{input}"), * ]); * * // Initialize the conversation chain with the model, memory, and prompt * const chain = new ConversationChain({ * llm: new ChatOpenAI({ temperature: 0.9, verbose: true }), * memory: memory, * prompt: chatPrompt, * }); * ``` */ export class ConversationSummaryBufferMemory extends BaseConversationSummaryMemory implements ConversationSummaryBufferMemoryInput { movingSummaryBuffer = ""; maxTokenLimit = 2000; constructor(fields: ConversationSummaryBufferMemoryInput) { super(fields); this.maxTokenLimit = fields?.maxTokenLimit ?? this.maxTokenLimit; } get memoryKeys() { return [this.memoryKey]; } /** * Method that loads the chat messages from the memory and returns them as * a string or as a list of messages, depending on the returnMessages * property. * @param _ InputValues object, not used in this method. * @returns Promise that resolves with MemoryVariables object containing the loaded chat messages. */ async loadMemoryVariables(_?: InputValues): Promise<MemoryVariables> { let buffer = await this.chatHistory.getMessages(); if (this.movingSummaryBuffer) { buffer = [ new this.summaryChatMessageClass(this.movingSummaryBuffer), ...buffer, ]; } let finalBuffer; if (this.returnMessages) { finalBuffer = buffer; } else { finalBuffer = getBufferString(buffer, this.humanPrefix, this.aiPrefix); } return { [this.memoryKey]: finalBuffer }; } /** * Method that saves the context of the conversation, including the input * and output values, and prunes the memory if it exceeds the maximum * token limit. * @param inputValues InputValues object containing the input values of the conversation. * @param outputValues OutputValues object containing the output values of the conversation. * @returns Promise that resolves when the context is saved and the memory is pruned. */ async saveContext( inputValues: InputValues, outputValues: OutputValues ): Promise<void> { await super.saveContext(inputValues, outputValues); await this.prune(); } /** * Method that prunes the memory if the total number of tokens in the * buffer exceeds the maxTokenLimit. It removes messages from the * beginning of the buffer until the total number of tokens is within the * limit. * @returns Promise that resolves when the memory is pruned. */ async prune() { // Prune buffer if it exceeds max token limit let buffer = await this.chatHistory.getMessages(); if (this.movingSummaryBuffer) { buffer = [ new this.summaryChatMessageClass(this.movingSummaryBuffer), ...buffer, ]; } let currBufferLength = await this.llm.getNumTokens( getBufferString(buffer, this.humanPrefix, this.aiPrefix) ); if (currBufferLength > this.maxTokenLimit) { const prunedMemory = []; while (currBufferLength > this.maxTokenLimit) { const poppedMessage = buffer.shift(); if (poppedMessage) { prunedMemory.push(poppedMessage); currBufferLength = await this.llm.getNumTokens( getBufferString(buffer, this.humanPrefix, this.aiPrefix) ); } } this.movingSummaryBuffer = await this.predictNewSummary( prunedMemory, this.movingSummaryBuffer ); } } /** * Method that clears the memory and resets the movingSummaryBuffer. * @returns Promise that resolves when the memory is cleared. */ async clear() { await super.clear(); this.movingSummaryBuffer = ""; } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/memory/summary.ts
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import { BaseMessage, SystemMessage, getBufferString, } from "@langchain/core/messages"; import { BasePromptTemplate } from "@langchain/core/prompts"; import { InputValues, MemoryVariables, OutputValues, } from "@langchain/core/memory"; import { LLMChain } from "../chains/llm_chain.js"; import { SUMMARY_PROMPT } from "./prompt.js"; import { BaseChatMemory, BaseChatMemoryInput } from "./chat_memory.js"; /** * Interface for the input parameters of the ConversationSummaryMemory * class. */ export interface ConversationSummaryMemoryInput extends BaseConversationSummaryMemoryInput {} /** * Interface for the input parameters of the BaseConversationSummaryMemory * class. */ export interface BaseConversationSummaryMemoryInput extends BaseChatMemoryInput { llm: BaseLanguageModelInterface; memoryKey?: string; humanPrefix?: string; aiPrefix?: string; prompt?: BasePromptTemplate; summaryChatMessageClass?: new (content: string) => BaseMessage; } /** * Abstract class that provides a structure for storing and managing the * memory of a conversation. It includes methods for predicting a new * summary for the conversation given the existing messages and summary. */ export abstract class BaseConversationSummaryMemory extends BaseChatMemory { memoryKey = "history"; humanPrefix = "Human"; aiPrefix = "AI"; llm: BaseLanguageModelInterface; prompt: BasePromptTemplate = SUMMARY_PROMPT; summaryChatMessageClass: new (content: string) => BaseMessage = SystemMessage; constructor(fields: BaseConversationSummaryMemoryInput) { const { returnMessages, inputKey, outputKey, chatHistory, humanPrefix, aiPrefix, llm, prompt, summaryChatMessageClass, } = fields; super({ returnMessages, inputKey, outputKey, chatHistory }); this.memoryKey = fields?.memoryKey ?? this.memoryKey; this.humanPrefix = humanPrefix ?? this.humanPrefix; this.aiPrefix = aiPrefix ?? this.aiPrefix; this.llm = llm; this.prompt = prompt ?? this.prompt; this.summaryChatMessageClass = summaryChatMessageClass ?? this.summaryChatMessageClass; } /** * Predicts a new summary for the conversation given the existing messages * and summary. * @param messages Existing messages in the conversation. * @param existingSummary Current summary of the conversation. * @returns A promise that resolves to a new summary string. */ async predictNewSummary( messages: BaseMessage[], existingSummary: string ): Promise<string> { const newLines = getBufferString(messages, this.humanPrefix, this.aiPrefix); const chain = new LLMChain({ llm: this.llm, prompt: this.prompt }); return await chain.predict({ summary: existingSummary, new_lines: newLines, }); } } /** * Class that provides a concrete implementation of the conversation * memory. It includes methods for loading memory variables, saving * context, and clearing the memory. * @example * ```typescript * const memory = new ConversationSummaryMemory({ * memoryKey: "chat_history", * llm: new ChatOpenAI({ modelName: "gpt-3.5-turbo", temperature: 0 }), * }); * * const model = new ChatOpenAI(); * const prompt = * PromptTemplate.fromTemplate(`The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. * * Current conversation: * {chat_history} * Human: {input} * AI:`); * const chain = new LLMChain({ llm: model, prompt, memory }); * * const res1 = await chain.call({ input: "Hi! I'm Jim." }); * console.log({ res1, memory: await memory.loadMemoryVariables({}) }); * * const res2 = await chain.call({ input: "What's my name?" }); * console.log({ res2, memory: await memory.loadMemoryVariables({}) }); * * ``` */ export class ConversationSummaryMemory extends BaseConversationSummaryMemory { buffer = ""; constructor(fields: ConversationSummaryMemoryInput) { super(fields); } get memoryKeys() { return [this.memoryKey]; } /** * Loads the memory variables for the conversation memory. * @returns A promise that resolves to an object containing the memory variables. */ async loadMemoryVariables(_: InputValues): Promise<MemoryVariables> { if (this.returnMessages) { const result = { [this.memoryKey]: [new this.summaryChatMessageClass(this.buffer)], }; return result; } const result = { [this.memoryKey]: this.buffer }; return result; } /** * Saves the context of the conversation memory. * @param inputValues Input values for the conversation. * @param outputValues Output values from the conversation. * @returns A promise that resolves when the context has been saved. */ async saveContext( inputValues: InputValues, outputValues: OutputValues ): Promise<void> { await super.saveContext(inputValues, outputValues); const messages = await this.chatHistory.getMessages(); this.buffer = await this.predictNewSummary(messages.slice(-2), this.buffer); } /** * Clears the conversation memory. * @returns A promise that resolves when the memory has been cleared. */ async clear() { await super.clear(); this.buffer = ""; } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/memory/buffer_window_memory.ts
import { InputValues, MemoryVariables } from "@langchain/core/memory"; import { getBufferString } from "@langchain/core/messages"; import { BaseChatMemory, BaseChatMemoryInput } from "./chat_memory.js"; /** * Interface for the input parameters of the BufferWindowMemory class. */ export interface BufferWindowMemoryInput extends BaseChatMemoryInput { humanPrefix?: string; aiPrefix?: string; memoryKey?: string; k?: number; } /** * Class for managing and storing previous chat messages. It extends the * BaseChatMemory class and implements the BufferWindowMemoryInput * interface. This class is stateful and stores messages in a buffer. When * called in a chain, it returns all of the messages it has stored. * @example * ```typescript * const prompt = * PromptTemplate.fromTemplate(`The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. * Current conversation: * {chat_history} * Human: {input} * AI:`); * * const chain = new LLMChain({ * llm: new ChatOpenAI({ temperature: 0.9 }), * prompt, * memory: new BufferWindowMemory({ memoryKey: "chat_history", k: 1 }), * }); * * // Example of initiating a conversation with the AI * const res1 = await chain.call({ input: "Hi! I'm Jim." }); * console.log({ res1 }); * * // Example of following up with another question * const res2 = await chain.call({ input: "What's my name?" }); * console.log({ res2 }); * ``` */ export class BufferWindowMemory extends BaseChatMemory implements BufferWindowMemoryInput { humanPrefix = "Human"; aiPrefix = "AI"; memoryKey = "history"; k = 5; constructor(fields?: BufferWindowMemoryInput) { super({ returnMessages: fields?.returnMessages ?? false, chatHistory: fields?.chatHistory, inputKey: fields?.inputKey, outputKey: fields?.outputKey, }); this.humanPrefix = fields?.humanPrefix ?? this.humanPrefix; this.aiPrefix = fields?.aiPrefix ?? this.aiPrefix; this.memoryKey = fields?.memoryKey ?? this.memoryKey; this.k = fields?.k ?? this.k; } get memoryKeys() { return [this.memoryKey]; } /** * Method to load the memory variables. Retrieves the chat messages from * the history, slices the last 'k' messages, and stores them in the * memory under the memoryKey. If the returnMessages property is set to * true, the method returns the messages as they are. Otherwise, it * returns a string representation of the messages. * @param _values InputValues object. * @returns Promise that resolves to a MemoryVariables object. */ async loadMemoryVariables(_values: InputValues): Promise<MemoryVariables> { const messages = await this.chatHistory.getMessages(); if (this.returnMessages) { const result = { [this.memoryKey]: messages.slice(-this.k * 2), }; return result; } const result = { [this.memoryKey]: getBufferString( messages.slice(-this.k * 2), this.humanPrefix, this.aiPrefix ), }; return result; } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/memory/buffer_token_memory.ts
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import { InputValues, MemoryVariables, OutputValues, } from "@langchain/core/memory"; import { getBufferString } from "@langchain/core/messages"; import { BaseChatMemory, BaseChatMemoryInput } from "./chat_memory.js"; /** * Interface for the input parameters of the `BufferTokenMemory` class. */ export interface ConversationTokenBufferMemoryInput extends BaseChatMemoryInput { /* Prefix for human messages in the buffer. */ humanPrefix?: string; /* Prefix for AI messages in the buffer. */ aiPrefix?: string; /* The LLM for this instance. */ llm: BaseLanguageModelInterface; /* Memory key for buffer instance. */ memoryKey?: string; /* Maximmum number of tokens allowed in the buffer. */ maxTokenLimit?: number; } /** * Class that represents a conversation chat memory with a token buffer. * It extends the `BaseChatMemory` class and implements the * `ConversationTokenBufferMemoryInput` interface. * @example * ```typescript * const memory = new ConversationTokenBufferMemory({ * llm: new ChatOpenAI({}), * maxTokenLimit: 10, * }); * * // Save conversation context * await memory.saveContext({ input: "hi" }, { output: "whats up" }); * await memory.saveContext({ input: "not much you" }, { output: "not much" }); * * // Load memory variables * const result = await memory.loadMemoryVariables({}); * console.log(result); * ``` */ export class ConversationTokenBufferMemory extends BaseChatMemory implements ConversationTokenBufferMemoryInput { humanPrefix = "Human"; aiPrefix = "AI"; memoryKey = "history"; maxTokenLimit = 2000; // Default max token limit of 2000 which can be overridden llm: BaseLanguageModelInterface; constructor(fields: ConversationTokenBufferMemoryInput) { super(fields); this.llm = fields.llm; this.humanPrefix = fields?.humanPrefix ?? this.humanPrefix; this.aiPrefix = fields?.aiPrefix ?? this.aiPrefix; this.memoryKey = fields?.memoryKey ?? this.memoryKey; this.maxTokenLimit = fields?.maxTokenLimit ?? this.maxTokenLimit; } get memoryKeys() { return [this.memoryKey]; } /** * Loads the memory variables. It takes an `InputValues` object as a * parameter and returns a `Promise` that resolves with a * `MemoryVariables` object. * @param _values `InputValues` object. * @returns A `Promise` that resolves with a `MemoryVariables` object. */ async loadMemoryVariables(_values: InputValues): Promise<MemoryVariables> { const messages = await this.chatHistory.getMessages(); if (this.returnMessages) { const result = { [this.memoryKey]: messages, }; return result; } const result = { [this.memoryKey]: getBufferString( messages, this.humanPrefix, this.aiPrefix ), }; return result; } /** * Saves the context from this conversation to buffer. If the amount * of tokens required to save the buffer exceeds MAX_TOKEN_LIMIT, * prune it. */ async saveContext(inputValues: InputValues, outputValues: OutputValues) { await super.saveContext(inputValues, outputValues); // Prune buffer if it exceeds the max token limit set for this instance. const buffer = await this.chatHistory.getMessages(); let currBufferLength = await this.llm.getNumTokens( getBufferString(buffer, this.humanPrefix, this.aiPrefix) ); if (currBufferLength > this.maxTokenLimit) { const prunedMemory = []; while (currBufferLength > this.maxTokenLimit) { prunedMemory.push(buffer.shift()); currBufferLength = await this.llm.getNumTokens( getBufferString(buffer, this.humanPrefix, this.aiPrefix) ); } } } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/memory/buffer_memory.ts
import { InputValues, MemoryVariables } from "@langchain/core/memory"; import { getBufferString } from "@langchain/core/messages"; import { BaseChatMemory, BaseChatMemoryInput } from "./chat_memory.js"; /** * Interface for the input parameters of the `BufferMemory` class. */ export interface BufferMemoryInput extends BaseChatMemoryInput { humanPrefix?: string; aiPrefix?: string; memoryKey?: string; } /** * The `BufferMemory` class is a type of memory component used for storing * and managing previous chat messages. It is a wrapper around * `ChatMessageHistory` that extracts the messages into an input variable. * This class is particularly useful in applications like chatbots where * it is essential to remember previous interactions. Note: The memory * instance represents the history of a single conversation. Therefore, it * is not recommended to share the same history or memory instance between * two different chains. If you deploy your LangChain app on a serverless * environment, do not store memory instances in a variable, as your * hosting provider may reset it by the next time the function is called. * @example * ```typescript * // Initialize the memory to store chat history and set up the language model with a specific temperature. * const memory = new BufferMemory({ memoryKey: "chat_history" }); * const model = new ChatOpenAI({ temperature: 0.9 }); * * // Create a prompt template for a friendly conversation between a human and an AI. * const prompt = * PromptTemplate.fromTemplate(`The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. * * Current conversation: * {chat_history} * Human: {input} * AI:`); * * // Set up the chain with the language model, prompt, and memory. * const chain = new LLMChain({ llm: model, prompt, memory }); * * // Example usage of the chain to continue the conversation. * // The `call` method sends the input to the model and returns the AI's response. * const res = await chain.call({ input: "Hi! I'm Jim." }); * console.log({ res }); * * ``` */ export class BufferMemory extends BaseChatMemory implements BufferMemoryInput { humanPrefix = "Human"; aiPrefix = "AI"; memoryKey = "history"; constructor(fields?: BufferMemoryInput) { super({ chatHistory: fields?.chatHistory, returnMessages: fields?.returnMessages ?? false, inputKey: fields?.inputKey, outputKey: fields?.outputKey, }); this.humanPrefix = fields?.humanPrefix ?? this.humanPrefix; this.aiPrefix = fields?.aiPrefix ?? this.aiPrefix; this.memoryKey = fields?.memoryKey ?? this.memoryKey; } get memoryKeys() { return [this.memoryKey]; } /** * Loads the memory variables. It takes an `InputValues` object as a * parameter and returns a `Promise` that resolves with a * `MemoryVariables` object. * @param _values `InputValues` object. * @returns A `Promise` that resolves with a `MemoryVariables` object. */ async loadMemoryVariables(_values: InputValues): Promise<MemoryVariables> { const messages = await this.chatHistory.getMessages(); if (this.returnMessages) { const result = { [this.memoryKey]: messages, }; return result; } const result = { [this.memoryKey]: getBufferString( messages, this.humanPrefix, this.aiPrefix ), }; return result; } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/memory/chat_memory.ts
import { BaseChatMessageHistory } from "@langchain/core/chat_history"; import { BaseMemory, InputValues, OutputValues, getInputValue, getOutputValue, } from "@langchain/core/memory"; import { ChatMessageHistory } from "../stores/message/in_memory.js"; /** * Interface for the input parameters of the BaseChatMemory class. */ export interface BaseChatMemoryInput { chatHistory?: BaseChatMessageHistory; returnMessages?: boolean; inputKey?: string; outputKey?: string; } /** * Abstract class that provides a base for implementing different types of * memory systems. It is designed to maintain the state of an application, * specifically the history of a conversation. This class is typically * extended by other classes to create specific types of memory systems. */ export abstract class BaseChatMemory extends BaseMemory { chatHistory: BaseChatMessageHistory; returnMessages = false; inputKey?: string; outputKey?: string; constructor(fields?: BaseChatMemoryInput) { super(); this.chatHistory = fields?.chatHistory ?? new ChatMessageHistory(); this.returnMessages = fields?.returnMessages ?? this.returnMessages; this.inputKey = fields?.inputKey ?? this.inputKey; this.outputKey = fields?.outputKey ?? this.outputKey; } /** * Method to add user and AI messages to the chat history in sequence. * @param inputValues The input values from the user. * @param outputValues The output values from the AI. * @returns Promise that resolves when the context has been saved. */ async saveContext( inputValues: InputValues, outputValues: OutputValues ): Promise<void> { // this is purposefully done in sequence so they're saved in order await this.chatHistory.addUserMessage( getInputValue(inputValues, this.inputKey) ); await this.chatHistory.addAIChatMessage( getOutputValue(outputValues, this.outputKey) ); } /** * Method to clear the chat history. * @returns Promise that resolves when the chat history has been cleared. */ async clear(): Promise<void> { await this.chatHistory.clear(); } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/memory/index.ts
export { BufferMemory, type BufferMemoryInput } from "./buffer_memory.js"; export { BaseMemory, getInputValue, getOutputValue, getBufferString, type InputValues, type OutputValues, type MemoryVariables, } from "./base.js"; export { ConversationSummaryMemory, type ConversationSummaryMemoryInput, BaseConversationSummaryMemory, type BaseConversationSummaryMemoryInput, } from "./summary.js"; export { BufferWindowMemory, type BufferWindowMemoryInput, } from "./buffer_window_memory.js"; export { BaseChatMemory, type BaseChatMemoryInput } from "./chat_memory.js"; export { ChatMessageHistory } from "../stores/message/in_memory.js"; export { VectorStoreRetrieverMemory, type VectorStoreRetrieverMemoryParams, } from "./vector_store.js"; export { EntityMemory } from "./entity_memory.js"; export { ENTITY_MEMORY_CONVERSATION_TEMPLATE } from "./prompt.js"; export { type CombinedMemoryInput, CombinedMemory } from "./combined_memory.js"; export { ConversationSummaryBufferMemory, type ConversationSummaryBufferMemoryInput, } from "./summary_buffer.js"; export { ConversationTokenBufferMemory, type ConversationTokenBufferMemoryInput, } from "./buffer_token_memory.js";
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/memory/base.ts
export * from "@langchain/core/memory"; export { getBufferString } from "@langchain/core/messages";
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/memory/combined_memory.ts
import { InputValues, MemoryVariables, BaseMemory, OutputValues, } from "@langchain/core/memory"; import { BaseChatMemory, BaseChatMemoryInput } from "./chat_memory.js"; /** * Interface that defines the shape of the input object that the * CombinedMemory constructor accepts. It extends the BaseChatMemoryInput * interface and adds additional properties. */ export interface CombinedMemoryInput extends BaseChatMemoryInput { memories: BaseMemory[]; humanPrefix?: string; aiPrefix?: string; memoryKey?: string; } /** * Class that manages and manipulates previous chat messages. It extends * from the BaseChatMemory class and implements the CombinedMemoryInput * interface. */ export class CombinedMemory extends BaseChatMemory implements CombinedMemoryInput { humanPrefix = "Human"; aiPrefix = "AI"; memoryKey = "history"; memories: BaseMemory[] = []; constructor(fields?: CombinedMemoryInput) { super({ chatHistory: fields?.chatHistory, returnMessages: fields?.returnMessages ?? false, inputKey: fields?.inputKey, outputKey: fields?.outputKey, }); this.memories = fields?.memories ?? this.memories; this.humanPrefix = fields?.humanPrefix ?? this.humanPrefix; this.aiPrefix = fields?.aiPrefix ?? this.aiPrefix; this.memoryKey = fields?.memoryKey ?? this.memoryKey; this.checkRepeatedMemoryVariable(); this.checkInputKey(); } /** * Checks for repeated memory variables across all memory objects. Throws * an error if any are found. */ checkRepeatedMemoryVariable() { const allVariables: string[] = []; for (const memory of this.memories) { const overlap = allVariables.filter((x) => memory.memoryKeys.includes(x)); if (overlap.length > 0) { throw new Error( `The same variables ${[ ...overlap, ]} are found in multiple memory objects, which is not allowed by CombinedMemory.` ); } allVariables.push(...memory.memoryKeys); } } /** * Checks if input keys are set for all memory objects. Logs a warning if * any are missing. */ checkInputKey() { for (const memory of this.memories) { if ( (memory as BaseChatMemory).chatHistory !== undefined && (memory as BaseChatMemory).inputKey === undefined ) { console.warn( `When using CombinedMemory, input keys should be set so the input is known. Was not set on ${memory}.` ); } } } /** * Loads memory variables from all memory objects. * @param inputValues Input values to load memory variables from. * @returns Promise that resolves with an object containing the loaded memory variables. */ async loadMemoryVariables( inputValues: InputValues ): Promise<MemoryVariables> { let memoryData: Record<string, unknown> = {}; for (const memory of this.memories) { const data = await memory.loadMemoryVariables(inputValues); memoryData = { ...memoryData, ...data, }; } return memoryData; } /** * Saves the context to all memory objects. * @param inputValues Input values to save. * @param outputValues Output values to save. * @returns Promise that resolves when the context has been saved to all memory objects. */ async saveContext(inputValues: InputValues, outputValues: OutputValues) { for (const memory of this.memories) { await memory.saveContext(inputValues, outputValues); } } /** * Clears all memory objects. * @returns Promise that resolves when all memory objects have been cleared. */ async clear() { for (const memory of this.memories) { if (typeof (memory as BaseChatMemory).clear === "function") { await (memory as BaseChatMemory).clear(); } } } get memoryKeys() { const memoryKeys: string[] = []; for (const memory of this.memories) { memoryKeys.push(...memory.memoryKeys); } return memoryKeys; } }
0
lc_public_repos/langchainjs/langchain/src
lc_public_repos/langchainjs/langchain/src/memory/prompt.ts
import { PromptTemplate } from "@langchain/core/prompts"; const _DEFAULT_SUMMARIZER_TEMPLATE = `Progressively summarize the lines of conversation provided, adding onto the previous summary returning a new summary. EXAMPLE Current summary: The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good. New lines of conversation: Human: Why do you think artificial intelligence is a force for good? AI: Because artificial intelligence will help humans reach their full potential. New summary: The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential. END OF EXAMPLE Current summary: {summary} New lines of conversation: {new_lines} New summary:`; // eslint-disable-next-line spaced-comment export const SUMMARY_PROMPT = /*#__PURE__*/ new PromptTemplate({ inputVariables: ["summary", "new_lines"], template: _DEFAULT_SUMMARIZER_TEMPLATE, }); export const _DEFAULT_ENTITY_MEMORY_CONVERSATION_TEMPLATE = `You are an assistant to a human, powered by a large language model trained by OpenAI. You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics. Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist. Context: {entities} Current conversation: {history} Last line: Human: {input} You:`; export const ENTITY_MEMORY_CONVERSATION_TEMPLATE = // eslint-disable-next-line spaced-comment /*#__PURE__*/ new PromptTemplate({ inputVariables: ["entities", "history", "input"], template: _DEFAULT_ENTITY_MEMORY_CONVERSATION_TEMPLATE, }); export const _DEFAULT_ENTITY_EXTRACTION_TEMPLATE = `You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places. The conversation history is provided just in case of a coreference (e.g. "What do you know about him" where "him" is defined in a previous line) -- ignore items mentioned there that are not in the last line. Return the output as a single comma-separated list, or NONE if there is nothing of note to return (e.g. the user is just issuing a greeting or having a simple conversation). EXAMPLE Conversation history: Person #1: my name is Jacob. how's it going today? AI: "It's going great! How about you?" Person #1: good! busy working on Langchain. lots to do. AI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?" Last line: Person #1: i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. Output: Jacob,Langchain END OF EXAMPLE EXAMPLE Conversation history: Person #1: how's it going today? AI: "It's going great! How about you?" Person #1: good! busy working on Langchain. lots to do. AI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?" Last line: Person #1: i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I'm working with Person #2. Output: Langchain, Person #2 END OF EXAMPLE Conversation history (for reference only): {history} Last line of conversation (for extraction): Human: {input} Output:`; // eslint-disable-next-line spaced-comment export const ENTITY_EXTRACTION_PROMPT = /*#__PURE__*/ new PromptTemplate({ inputVariables: ["history", "input"], template: _DEFAULT_ENTITY_EXTRACTION_TEMPLATE, }); export const _DEFAULT_ENTITY_SUMMARIZATION_TEMPLATE = `You are an AI assistant helping a human keep track of facts about relevant people, places, and concepts in their life. Update and add to the summary of the provided entity in the "Entity" section based on the last line of your conversation with the human. If you are writing the summary for the first time, return a single sentence. The update should only include facts that are relayed in the last line of conversation about the provided entity, and should only contain facts about the provided entity. If there is no new information about the provided entity or the information is not worth noting (not an important or relevant fact to remember long-term), output the exact string "UNCHANGED" below. Full conversation history (for context): {history} Entity to summarize: {entity} Existing summary of {entity}: {summary} Last line of conversation: Human: {input} Updated summary (or the exact string "UNCHANGED" if there is no new information about {entity} above):`; // eslint-disable-next-line spaced-comment export const ENTITY_SUMMARIZATION_PROMPT = /*#__PURE__*/ new PromptTemplate({ inputVariables: ["entity", "summary", "history", "input"], template: _DEFAULT_ENTITY_SUMMARIZATION_TEMPLATE, });