index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/format_scratchpad/openai_tools.ts | import { formatToToolMessages as formatToOpenAIToolMessages } from "./tool_calling.js";
export { formatToOpenAIToolMessages };
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/toolkits/index.ts | export { JsonToolkit, createJsonAgent } from "./json/json.js";
export {
RequestsToolkit,
OpenApiToolkit,
createOpenApiAgent,
} from "./openapi/openapi.js";
export {
type VectorStoreInfo,
VectorStoreToolkit,
VectorStoreRouterToolkit,
createVectorStoreAgent,
createVectorStoreRouterAgent,
} from "./vectorstore/vectorstore.js";
export { createRetrieverTool } from "./conversational_retrieval/tool.js";
export {
createConversationalRetrievalAgent,
type ConversationalRetrievalAgentOptions,
} from "./conversational_retrieval/openai_functions.js";
export { OpenAIAgentTokenBufferMemory } from "./conversational_retrieval/token_buffer_memory.js";
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/toolkits/base.ts | export { BaseToolkit as Toolkit } from "@langchain/core/tools";
|
0 | lc_public_repos/langchainjs/langchain/src/agents/toolkits | lc_public_repos/langchainjs/langchain/src/agents/toolkits/sql/sql.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import { type ToolInterface, BaseToolkit } from "@langchain/core/tools";
import { renderTemplate } from "@langchain/core/prompts";
import {
InfoSqlTool,
ListTablesSqlTool,
QueryCheckerTool,
QuerySqlTool,
} from "../../../tools/sql.js";
import { SQL_PREFIX, SQL_SUFFIX } from "./prompt.js";
import { LLMChain } from "../../../chains/llm_chain.js";
import { ZeroShotAgent, ZeroShotCreatePromptArgs } from "../../mrkl/index.js";
import { AgentExecutor } from "../../executor.js";
import { SqlDatabase } from "../../../sql_db.js";
/**
* Interface that extends ZeroShotCreatePromptArgs and adds an optional
* topK parameter for specifying the number of results to return.
*/
export interface SqlCreatePromptArgs extends ZeroShotCreatePromptArgs {
/** Number of results to return. */
topK?: number;
}
/**
* Class that represents a toolkit for working with SQL databases. It
* initializes SQL tools based on the provided SQL database.
* @example
* ```typescript
* const model = new ChatOpenAI({});
* const toolkit = new SqlToolkit(sqlDb, model);
* const executor = createSqlAgent(model, toolkit);
* const result = await executor.invoke({ input: 'List the total sales per country. Which country's customers spent the most?' });
* console.log(`Got output ${result.output}`);
* ```
*/
export class SqlToolkit extends BaseToolkit {
tools: ToolInterface[];
db: SqlDatabase;
dialect = "sqlite";
constructor(db: SqlDatabase, llm?: BaseLanguageModelInterface) {
super();
this.db = db;
this.tools = [
new QuerySqlTool(db),
new InfoSqlTool(db),
new ListTablesSqlTool(db),
new QueryCheckerTool({ llm }),
];
}
}
export function createSqlAgent(
llm: BaseLanguageModelInterface,
toolkit: SqlToolkit,
args?: SqlCreatePromptArgs
) {
const {
prefix = SQL_PREFIX,
suffix = SQL_SUFFIX,
inputVariables = ["input", "agent_scratchpad"],
topK = 10,
} = args ?? {};
const { tools } = toolkit;
const formattedPrefix = renderTemplate(prefix, "f-string", {
dialect: toolkit.dialect,
top_k: topK,
});
const prompt = ZeroShotAgent.createPrompt(tools, {
prefix: formattedPrefix,
suffix,
inputVariables,
});
const chain = new LLMChain({ prompt, llm });
const agent = new ZeroShotAgent({
llmChain: chain,
allowedTools: tools.map((t) => t.name),
});
return AgentExecutor.fromAgentAndTools({
agent,
tools,
returnIntermediateSteps: true,
});
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents/toolkits | lc_public_repos/langchainjs/langchain/src/agents/toolkits/sql/index.ts | export { SqlCreatePromptArgs, SqlToolkit, createSqlAgent } from "./sql.js";
export { SQL_PREFIX, SQL_SUFFIX } from "./prompt.js";
|
0 | lc_public_repos/langchainjs/langchain/src/agents/toolkits | lc_public_repos/langchainjs/langchain/src/agents/toolkits/sql/prompt.ts | export const SQL_PREFIX = `You are an agent designed to interact with a SQL database.
Given an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results using the LIMIT clause.
You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for a the few relevant columns given the question.
You have access to tools for interacting with the database.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
If the question does not seem related to the database, just return "I don't know" as the answer.`;
export const SQL_SUFFIX = `Begin!
Question: {input}
Thought: I should look at the tables in the database to see what I can query.
{agent_scratchpad}`;
|
0 | lc_public_repos/langchainjs/langchain/src/agents/toolkits | lc_public_repos/langchainjs/langchain/src/agents/toolkits/vectorstore/vectorstore.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import type { VectorStoreInterface } from "@langchain/core/vectorstores";
import { ToolInterface, BaseToolkit } from "@langchain/core/tools";
import { VectorStoreQATool } from "../../../tools/vectorstore.js";
import { ZeroShotCreatePromptArgs, ZeroShotAgent } from "../../mrkl/index.js";
import { VECTOR_PREFIX, VECTOR_ROUTER_PREFIX } from "./prompt.js";
import { SUFFIX } from "../../mrkl/prompt.js";
import { LLMChain } from "../../../chains/llm_chain.js";
import { AgentExecutor } from "../../executor.js";
/**
* Interface that defines the information about a vector store, including
* the vector store itself, its name, and description.
*/
export interface VectorStoreInfo {
vectorStore: VectorStoreInterface;
name: string;
description: string;
}
/**
* Class representing a toolkit for working with a single vector store. It
* initializes the vector store QA tool based on the provided vector store
* information and language model.
* @example
* ```typescript
* const toolkit = new VectorStoreToolkit(
* {
* name: "state_of_union_address",
* description: "the most recent state of the Union address",
* vectorStore: new HNSWLib(),
* },
* new ChatOpenAI({ temperature: 0 }),
* );
* const result = await toolkit.invoke({
* input:
* "What did biden say about Ketanji Brown Jackson in the state of the union address?",
* });
* console.log(`Got output ${result.output}`);
* ```
*/
export class VectorStoreToolkit extends BaseToolkit {
tools: ToolInterface[];
llm: BaseLanguageModelInterface;
constructor(
vectorStoreInfo: VectorStoreInfo,
llm: BaseLanguageModelInterface
) {
super();
const description = VectorStoreQATool.getDescription(
vectorStoreInfo.name,
vectorStoreInfo.description
);
this.llm = llm;
this.tools = [
new VectorStoreQATool(vectorStoreInfo.name, description, {
vectorStore: vectorStoreInfo.vectorStore,
llm: this.llm,
}),
];
}
}
/**
* Class representing a toolkit for working with multiple vector stores.
* It initializes multiple vector store QA tools based on the provided
* vector store information and language model.
*/
export class VectorStoreRouterToolkit extends BaseToolkit {
tools: ToolInterface[];
vectorStoreInfos: VectorStoreInfo[];
llm: BaseLanguageModelInterface;
constructor(
vectorStoreInfos: VectorStoreInfo[],
llm: BaseLanguageModelInterface
) {
super();
this.llm = llm;
this.vectorStoreInfos = vectorStoreInfos;
this.tools = vectorStoreInfos.map((vectorStoreInfo) => {
const description = VectorStoreQATool.getDescription(
vectorStoreInfo.name,
vectorStoreInfo.description
);
return new VectorStoreQATool(vectorStoreInfo.name, description, {
vectorStore: vectorStoreInfo.vectorStore,
llm: this.llm,
});
});
}
}
/** @deprecated Create a specific agent with a custom tool instead. */
export function createVectorStoreAgent(
llm: BaseLanguageModelInterface,
toolkit: VectorStoreToolkit,
args?: ZeroShotCreatePromptArgs
) {
const {
prefix = VECTOR_PREFIX,
suffix = SUFFIX,
inputVariables = ["input", "agent_scratchpad"],
} = args ?? {};
const { tools } = toolkit;
const prompt = ZeroShotAgent.createPrompt(tools, {
prefix,
suffix,
inputVariables,
});
const chain = new LLMChain({ prompt, llm });
const agent = new ZeroShotAgent({
llmChain: chain,
allowedTools: tools.map((t) => t.name),
});
return AgentExecutor.fromAgentAndTools({
agent,
tools,
returnIntermediateSteps: true,
});
}
/** @deprecated Create a specific agent with a custom tool instead. */
export function createVectorStoreRouterAgent(
llm: BaseLanguageModelInterface,
toolkit: VectorStoreRouterToolkit,
args?: ZeroShotCreatePromptArgs
) {
const {
prefix = VECTOR_ROUTER_PREFIX,
suffix = SUFFIX,
inputVariables = ["input", "agent_scratchpad"],
} = args ?? {};
const { tools } = toolkit;
const prompt = ZeroShotAgent.createPrompt(tools, {
prefix,
suffix,
inputVariables,
});
const chain = new LLMChain({ prompt, llm });
const agent = new ZeroShotAgent({
llmChain: chain,
allowedTools: tools.map((t) => t.name),
});
return AgentExecutor.fromAgentAndTools({
agent,
tools,
returnIntermediateSteps: true,
});
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents/toolkits | lc_public_repos/langchainjs/langchain/src/agents/toolkits/vectorstore/prompt.ts | export const VECTOR_PREFIX = `You are an agent designed to answer questions about sets of documents.
You have access to tools for interacting with the documents, and the inputs to the tools are questions.
Sometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool to do so.
If the question does not seem relevant to any of the tools provided, just return "I don't know" as the answer.`;
export const VECTOR_ROUTER_PREFIX = `You are an agent designed to answer questions.
You have access to tools for interacting with different sources, and the inputs to the tools are questions.
Your main task is to decide which of the tools is relevant for answering question at hand.
For complex questions, you can break the question down into sub questions and use tools to answers the sub questions.`;
|
0 | lc_public_repos/langchainjs/langchain/src/agents/toolkits | lc_public_repos/langchainjs/langchain/src/agents/toolkits/tests/conversational_retrieval.int.test.ts | import { test } from "@jest/globals";
import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { MemoryVectorStore } from "../../../vectorstores/memory.js";
import { createConversationalRetrievalAgent } from "../conversational_retrieval/openai_functions.js";
import { createRetrieverTool } from "../conversational_retrieval/tool.js";
test("Test ConversationalRetrievalAgent", async () => {
const vectorStore = await MemoryVectorStore.fromTexts(
[
"LangCo made $10000 in July",
"LangCo made $20 in August",
"Foo is red",
"Bar is red",
"Buildings are made out of brick",
"Mitochondria is the powerhouse of the cell",
],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const llm = new ChatOpenAI({});
const tools = [
createRetrieverTool(vectorStore.asRetriever(), {
name: "search_LangCo_knowledge",
description: "Searches for and returns documents regarding LangCo",
}),
];
const executor = await createConversationalRetrievalAgent(llm, tools, {
verbose: true,
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await executor.invoke({
input: "Hi, I'm Bob!",
});
// console.log(result);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result2 = await executor.invoke({
input: "What's my name?",
});
// console.log(result2);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result3 = await executor.invoke({
input: "How much money did LangCo make in July?",
});
// console.log(result3);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result4 = await executor.invoke({
input: "How about in August?",
});
// console.log(result4);
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents/toolkits | lc_public_repos/langchainjs/langchain/src/agents/toolkits/conversational_retrieval/openai_functions.ts | import { StructuredToolInterface } from "@langchain/core/tools";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationSummaryBufferMemory } from "../../../memory/summary_buffer.js";
import { initializeAgentExecutorWithOptions } from "../../initialize.js";
import { OpenAIAgentTokenBufferMemory } from "./token_buffer_memory.js";
export type ConversationalRetrievalAgentOptions = {
rememberIntermediateSteps?: boolean;
memoryKey?: string;
outputKey?: string;
inputKey?: string;
prefix?: string;
verbose?: boolean;
};
/**
* Asynchronous function that creates a conversational retrieval agent
* using a language model, tools, and options. It initializes the buffer
* memory based on the provided options and initializes the AgentExecutor
* with the tools, language model, and memory.
* @param llm Instance of ChatOpenAI used as the language model for the agent.
* @param tools Array of StructuredTool instances used by the agent.
* @param options Optional ConversationalRetrievalAgentOptions to customize the agent.
* @returns A Promise that resolves to an initialized AgentExecutor.
*/
export async function createConversationalRetrievalAgent(
llm: ChatOpenAI,
tools: StructuredToolInterface[],
options?: ConversationalRetrievalAgentOptions
) {
const {
rememberIntermediateSteps = true,
memoryKey = "chat_history",
outputKey = "output",
inputKey = "input",
prefix,
verbose,
} = options ?? {};
let memory;
if (rememberIntermediateSteps) {
memory = new OpenAIAgentTokenBufferMemory({
memoryKey,
llm,
outputKey,
inputKey,
});
} else {
memory = new ConversationSummaryBufferMemory({
memoryKey,
llm,
maxTokenLimit: 12000,
returnMessages: true,
outputKey,
inputKey,
});
}
const executor = await initializeAgentExecutorWithOptions(tools, llm, {
agentType: "openai-functions",
memory,
verbose,
returnIntermediateSteps: rememberIntermediateSteps,
agentArgs: {
prefix:
prefix ??
`Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.`,
},
});
return executor;
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents/toolkits | lc_public_repos/langchainjs/langchain/src/agents/toolkits/conversational_retrieval/tool.ts | import type { BaseRetrieverInterface } from "@langchain/core/retrievers";
import { z } from "zod";
import { CallbackManagerForToolRun } from "@langchain/core/callbacks/manager";
import {
DynamicStructuredTool,
DynamicStructuredToolInput,
} from "@langchain/core/tools";
import { formatDocumentsAsString } from "../../../util/document.js";
/** @deprecated Use "langchain/tools/retriever" instead. */
export function createRetrieverTool(
retriever: BaseRetrieverInterface,
input: Omit<DynamicStructuredToolInput, "func" | "schema">
) {
const func = async (
{ input }: { input: string },
runManager?: CallbackManagerForToolRun
) => {
const docs = await retriever.getRelevantDocuments(
input,
runManager?.getChild("retriever")
);
return formatDocumentsAsString(docs);
};
const schema = z.object({
input: z
.string()
.describe("Natural language query used as input to the retriever"),
});
return new DynamicStructuredTool({ ...input, func, schema });
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents/toolkits | lc_public_repos/langchainjs/langchain/src/agents/toolkits/conversational_retrieval/token_buffer_memory.ts | import { ChatOpenAI } from "@langchain/openai";
import {
InputValues,
MemoryVariables,
OutputValues,
getInputValue,
getOutputValue,
} from "@langchain/core/memory";
import { getBufferString } from "@langchain/core/messages";
import {
BaseChatMemory,
BaseChatMemoryInput,
} from "../../../memory/chat_memory.js";
import { _formatIntermediateSteps } from "../../openai_functions/index.js";
/**
* Type definition for the fields required to initialize an instance of
* OpenAIAgentTokenBufferMemory.
*/
export type OpenAIAgentTokenBufferMemoryFields = BaseChatMemoryInput & {
llm: ChatOpenAI;
humanPrefix?: string;
aiPrefix?: string;
memoryKey?: string;
maxTokenLimit?: number;
returnMessages?: boolean;
outputKey?: string;
intermediateStepsKey?: string;
};
/**
* Memory used to save agent output and intermediate steps.
*/
export class OpenAIAgentTokenBufferMemory extends BaseChatMemory {
humanPrefix = "Human";
aiPrefix = "AI";
llm: ChatOpenAI;
memoryKey = "history";
maxTokenLimit = 12000;
returnMessages = true;
outputKey = "output";
intermediateStepsKey = "intermediateSteps";
constructor(fields: OpenAIAgentTokenBufferMemoryFields) {
super(fields);
this.humanPrefix = fields.humanPrefix ?? this.humanPrefix;
this.aiPrefix = fields.aiPrefix ?? this.aiPrefix;
this.llm = fields.llm;
this.memoryKey = fields.memoryKey ?? this.memoryKey;
this.maxTokenLimit = fields.maxTokenLimit ?? this.maxTokenLimit;
this.returnMessages = fields.returnMessages ?? this.returnMessages;
this.outputKey = fields.outputKey ?? this.outputKey;
this.intermediateStepsKey =
fields.intermediateStepsKey ?? this.intermediateStepsKey;
}
get memoryKeys(): string[] {
return [this.memoryKey];
}
/**
* Retrieves the messages from the chat history.
* @returns Promise that resolves with the messages from the chat history.
*/
async getMessages() {
return this.chatHistory.getMessages();
}
/**
* Loads memory variables from the input values.
* @param _values Input values.
* @returns Promise that resolves with the loaded memory variables.
*/
async loadMemoryVariables(_values: InputValues): Promise<MemoryVariables> {
const buffer = await this.getMessages();
if (this.returnMessages) {
return { [this.memoryKey]: buffer };
} else {
const bufferString = getBufferString(
buffer,
this.humanPrefix,
this.aiPrefix
);
return { [this.memoryKey]: bufferString };
}
}
/**
* Saves the context of the chat, including user input, AI output, and
* intermediate steps. Prunes the chat history if the total token count
* exceeds the maximum limit.
* @param inputValues Input values.
* @param outputValues Output values.
* @returns Promise that resolves when the context has been saved.
*/
async saveContext(
inputValues: InputValues,
outputValues: OutputValues
): Promise<void> {
const inputValue = getInputValue(inputValues, this.inputKey);
const outputValue = getOutputValue(outputValues, this.outputKey);
await this.chatHistory.addUserMessage(inputValue);
const intermediateStepMessages = _formatIntermediateSteps(
outputValues[this.intermediateStepsKey]
);
for (const message of intermediateStepMessages) {
await this.chatHistory.addMessage(message);
}
await this.chatHistory.addAIChatMessage(outputValue);
const currentMessages = await this.chatHistory.getMessages();
let tokenInfo = await this.llm.getNumTokensFromMessages(currentMessages);
if (tokenInfo.totalCount > this.maxTokenLimit) {
const prunedMemory = [];
while (tokenInfo.totalCount > this.maxTokenLimit) {
const retainedMessage = currentMessages.pop();
if (!retainedMessage) {
console.warn(
`Could not prune enough messages from chat history to stay under ${this.maxTokenLimit} tokens.`
);
break;
}
prunedMemory.push(retainedMessage);
tokenInfo = await this.llm.getNumTokensFromMessages(currentMessages);
}
await this.chatHistory.clear();
for (const message of prunedMemory) {
await this.chatHistory.addMessage(message);
}
}
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents/toolkits | lc_public_repos/langchainjs/langchain/src/agents/toolkits/openapi/openapi.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import type { ToolInterface } from "@langchain/core/tools";
import { DynamicTool, BaseToolkit } from "@langchain/core/tools";
import { JsonSpec } from "../../../tools/json.js";
import { AgentExecutor } from "../../executor.js";
import {
OPENAPI_PREFIX,
OPENAPI_SUFFIX,
JSON_EXPLORER_DESCRIPTION,
} from "./prompt.js";
import { LLMChain } from "../../../chains/llm_chain.js";
import { ZeroShotCreatePromptArgs, ZeroShotAgent } from "../../mrkl/index.js";
import {
Headers,
RequestsGetTool,
RequestsPostTool,
} from "../../../tools/requests.js";
import { createJsonAgent, JsonToolkit } from "../json/json.js";
/**
* Represents a toolkit for making HTTP requests. It initializes the
* request tools based on the provided headers.
*/
export class RequestsToolkit extends BaseToolkit {
tools: ToolInterface[];
constructor(headers?: Headers) {
super();
this.tools = [new RequestsGetTool(headers), new RequestsPostTool(headers)];
}
}
/**
* Extends the `RequestsToolkit` class and adds a dynamic tool for
* exploring JSON data. It creates a JSON agent using the `JsonToolkit`
* and the provided language model, and adds the JSON explorer tool to the
* toolkit.
* @example
* ```typescript
* const toolkit = new OpenApiToolkit(
* new JsonSpec({
* }),
* new ChatOpenAI({ temperature: 0 }),
* {
* "Content-Type": "application/json",
* Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
* },
* );
*
* const result = await toolkit.invoke({
* input:
* "Make a POST request to openai /completions. The prompt should be 'tell me a joke.'",
* });
* console.log(`Got output ${result.output}`);
* ```
*/
export class OpenApiToolkit extends RequestsToolkit {
constructor(
jsonSpec: JsonSpec,
llm: BaseLanguageModelInterface,
headers?: Headers
) {
super(headers);
const jsonAgent = createJsonAgent(llm, new JsonToolkit(jsonSpec));
this.tools = [
...this.tools,
new DynamicTool({
name: "json_explorer",
func: async (input: string) => {
const result = await jsonAgent.call({ input });
return result.output as string;
},
description: JSON_EXPLORER_DESCRIPTION,
}),
];
}
}
/**
* @deprecated Create a specific agent with a custom tool instead.
*
* Creates an OpenAPI agent using a language model, an OpenAPI toolkit,
* and optional prompt arguments. It creates a prompt for the agent using
* the OpenAPI tools and the provided prefix and suffix. It then creates a
* ZeroShotAgent with the prompt and the OpenAPI tools, and returns an
* AgentExecutor for executing the agent with the tools.
* @param llm The language model to use.
* @param openApiToolkit The OpenAPI toolkit to use.
* @param args Optional arguments for creating the prompt.
* @returns An AgentExecutor for executing the agent with the tools.
*
* @security **Security Notice** This agent provides access to external APIs.
* Use with caution as this agent can make API calls with arbitrary headers.
* Exposing this agent to users could lead to security vulnerabilities. Consider
* limiting access to what endpoints it can hit, what actions can be taken, and
* more.
*
* @link See https://js.langchain.com/docs/security for more information.
*/
export function createOpenApiAgent(
llm: BaseLanguageModelInterface,
openApiToolkit: OpenApiToolkit,
args?: ZeroShotCreatePromptArgs
) {
const {
prefix = OPENAPI_PREFIX,
suffix = OPENAPI_SUFFIX,
inputVariables = ["input", "agent_scratchpad"],
} = args ?? {};
const { tools } = openApiToolkit;
const prompt = ZeroShotAgent.createPrompt(tools, {
prefix,
suffix,
inputVariables,
});
const chain = new LLMChain({
prompt,
llm,
});
const toolNames = tools.map((tool) => tool.name);
const agent = new ZeroShotAgent({ llmChain: chain, allowedTools: toolNames });
return AgentExecutor.fromAgentAndTools({
agent,
tools,
returnIntermediateSteps: true,
});
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents/toolkits | lc_public_repos/langchainjs/langchain/src/agents/toolkits/openapi/prompt.ts | export const OPENAPI_PREFIX = `You are an agent designed to answer questions by making web requests to an API given the OpenAPI spec.
If the question does not seem related to the API, return I don't know. Do not make up an answer.
Only use information provided by the tools to construct your response.
To find information in the OpenAPI spec, use the 'json_explorer' tool. The input to this tool is a question about the API.
Take the following steps:
First, find the base URL needed to make the request.
Second, find the relevant paths needed to answer the question. Take note that, sometimes, you might need to make more than one request to more than one path to answer the question.
Third, find the required parameters needed to make the request. For GET requests, these are usually URL parameters and for POST requests, these are request body parameters.
Fourth, make the requests needed to answer the question. Ensure that you are sending the correct parameters to the request by checking which parameters are required. For parameters with a fixed set of values, please use the spec to look at which values are allowed.
Use the exact parameter names as listed in the spec, do not make up any names or abbreviate the names of parameters.
If you get a not found error, ensure that you are using a path that actually exists in the spec.`;
export const OPENAPI_SUFFIX = `Begin!"
Question: {input}
Thought: I should explore the spec to find the base url for the API.
{agent_scratchpad}`;
export const JSON_EXPLORER_DESCRIPTION = `
Can be used to answer questions about the openapi spec for the API. Always use this tool before trying to make a request.
Example inputs to this tool:
'What are the required query parameters for a GET request to the /bar endpoint?'
'What are the required parameters in the request body for a POST request to the /foo endpoint?'
Always give this tool a specific question.`;
|
0 | lc_public_repos/langchainjs/langchain/src/agents/toolkits | lc_public_repos/langchainjs/langchain/src/agents/toolkits/json/json.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import { type ToolInterface, BaseToolkit } from "@langchain/core/tools";
import {
JsonGetValueTool,
JsonListKeysTool,
JsonSpec,
} from "../../../tools/json.js";
import { JSON_PREFIX, JSON_SUFFIX } from "./prompt.js";
import { LLMChain } from "../../../chains/llm_chain.js";
import { ZeroShotCreatePromptArgs, ZeroShotAgent } from "../../mrkl/index.js";
import { AgentExecutor } from "../../executor.js";
/**
* Represents a toolkit for working with JSON data. It initializes the
* JSON tools based on the provided JSON specification.
* @example
* ```typescript
* const toolkit = new JsonToolkit(new JsonSpec());
* const executor = createJsonAgent(model, toolkit);
* const result = await executor.invoke({
* input: 'What are the required parameters in the request body to the /completions endpoint?'
* });
* ```
*/
export class JsonToolkit extends BaseToolkit {
tools: ToolInterface[];
constructor(public jsonSpec: JsonSpec) {
super();
this.tools = [
new JsonListKeysTool(jsonSpec),
new JsonGetValueTool(jsonSpec),
];
}
}
/**
* @deprecated Create a specific agent with a custom tool instead.
*
* Creates a JSON agent using a language model, a JSON toolkit, and
* optional prompt arguments. It creates a prompt for the agent using the
* JSON tools and the provided prefix and suffix. It then creates a
* ZeroShotAgent with the prompt and the JSON tools, and returns an
* AgentExecutor for executing the agent with the tools.
* @param llm The language model used to create the JSON agent.
* @param toolkit The JSON toolkit used to create the JSON agent.
* @param args Optional prompt arguments used to create the JSON agent.
* @returns An AgentExecutor for executing the created JSON agent with the tools.
*/
export function createJsonAgent(
llm: BaseLanguageModelInterface,
toolkit: JsonToolkit,
args?: ZeroShotCreatePromptArgs
) {
const {
prefix = JSON_PREFIX,
suffix = JSON_SUFFIX,
inputVariables = ["input", "agent_scratchpad"],
} = args ?? {};
const { tools } = toolkit;
const prompt = ZeroShotAgent.createPrompt(tools, {
prefix,
suffix,
inputVariables,
});
const chain = new LLMChain({ prompt, llm });
const agent = new ZeroShotAgent({
llmChain: chain,
allowedTools: tools.map((t) => t.name),
});
return AgentExecutor.fromAgentAndTools({
agent,
tools,
returnIntermediateSteps: true,
});
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents/toolkits | lc_public_repos/langchainjs/langchain/src/agents/toolkits/json/prompt.ts | export const JSON_PREFIX = `You are an agent designed to interact with JSON.
Your goal is to return a final answer by interacting with the JSON.
You have access to the following tools which help you learn more about the JSON you are interacting with.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
Do not make up any information that is not contained in the JSON.
Your input to the tools should be in the form of in json pointer syntax (e.g. /key1/0/key2).
You must escape a slash in a key with a ~1, and escape a tilde with a ~0.
For example, to access the key /foo, you would use /~1foo
You should only use keys that you know for a fact exist. You must validate that a key exists by seeing it previously when calling 'json_list_keys'.
If you have not seen a key in one of those responses, you cannot use it.
You should only add one key at a time to the path. You cannot add multiple keys at once.
If you encounter a null or undefined value, go back to the previous key, look at the available keys, and try again.
If the question does not seem to be related to the JSON, just return "I don't know" as the answer.
Always begin your interaction with the 'json_list_keys' with an empty string as the input to see what keys exist in the JSON.
Note that sometimes the value at a given path is large. In this case, you will get an error "Value is a large dictionary, should explore its keys directly".
In this case, you should ALWAYS follow up by using the 'json_list_keys' tool to see what keys exist at that path.
Do not simply refer the user to the JSON or a section of the JSON, as this is not a valid answer. Keep digging until you find the answer and explicitly return it.`;
export const JSON_SUFFIX = `Begin!"
Question: {input}
Thought: I should look at the keys that exist to see what I can query. I should use the 'json_list_keys' tool with an empty string as the input.
{agent_scratchpad}`;
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/xml/index.ts | import type {
BaseLanguageModel,
BaseLanguageModelInterface,
} from "@langchain/core/language_models/base";
import type { ToolInterface } from "@langchain/core/tools";
import { RunnablePassthrough } from "@langchain/core/runnables";
import type { BasePromptTemplate } from "@langchain/core/prompts";
import { AgentStep, AgentAction, AgentFinish } from "@langchain/core/agents";
import { ChainValues } from "@langchain/core/utils/types";
import {
AIMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
} from "@langchain/core/prompts";
import { CallbackManager } from "@langchain/core/callbacks/manager";
import { LLMChain } from "../../chains/llm_chain.js";
import {
AgentArgs,
AgentRunnableSequence,
BaseSingleActionAgent,
} from "../agent.js";
import { AGENT_INSTRUCTIONS } from "./prompt.js";
import { XMLAgentOutputParser } from "./output_parser.js";
import { renderTextDescription } from "../../tools/render.js";
import { formatXml } from "../format_scratchpad/xml.js";
/**
* Interface for the input to the XMLAgent class.
*/
export interface XMLAgentInput {
tools: ToolInterface[];
llmChain: LLMChain;
}
/**
* Class that represents an agent that uses XML tags.
*
* @deprecated Use the {@link https://api.js.langchain.com/functions/langchain.agents.createXmlAgent.html | createXmlAgent method instead}.
*/
export class XMLAgent extends BaseSingleActionAgent implements XMLAgentInput {
static lc_name() {
return "XMLAgent";
}
lc_namespace = ["langchain", "agents", "xml"];
tools: ToolInterface[];
llmChain: LLMChain;
outputParser: XMLAgentOutputParser = new XMLAgentOutputParser();
_agentType() {
return "xml" as const;
}
constructor(fields: XMLAgentInput) {
super(fields);
this.tools = fields.tools;
this.llmChain = fields.llmChain;
}
get inputKeys() {
return ["input"];
}
static createPrompt() {
return ChatPromptTemplate.fromMessages([
HumanMessagePromptTemplate.fromTemplate(AGENT_INSTRUCTIONS),
AIMessagePromptTemplate.fromTemplate("{intermediate_steps}"),
]);
}
/**
* Plans the next action or finish state of the agent based on the
* provided steps, inputs, and optional callback manager.
* @param steps The steps to consider in planning.
* @param inputs The inputs to consider in planning.
* @param callbackManager Optional CallbackManager to use in planning.
* @returns A Promise that resolves to an AgentAction or AgentFinish object representing the planned action or finish state.
*/
async plan(
steps: AgentStep[],
inputs: ChainValues,
callbackManager?: CallbackManager
): Promise<AgentAction | AgentFinish> {
let log = "";
for (const { action, observation } of steps) {
log += `<tool>${action.tool}</tool><tool_input>${action.toolInput}</tool_input><observation>${observation}</observation>`;
}
let tools = "";
for (const tool of this.tools) {
tools += `${tool.name}: ${tool.description}\n`;
}
const _inputs = {
intermediate_steps: log,
tools,
question: inputs.input,
stop: ["</tool_input>", "</final_answer>"],
};
const response = await this.llmChain.call(_inputs, callbackManager);
return this.outputParser.parse(response[this.llmChain.outputKey]);
}
/**
* Creates an XMLAgent from a BaseLanguageModel and a list of tools.
* @param llm The BaseLanguageModel to use.
* @param tools The tools to be used by the agent.
* @param args Optional arguments for creating the agent.
* @returns An instance of XMLAgent.
*/
static fromLLMAndTools(
llm: BaseLanguageModelInterface,
tools: ToolInterface[],
args?: XMLAgentInput & Pick<AgentArgs, "callbacks">
) {
const prompt = XMLAgent.createPrompt();
const chain = new LLMChain({
prompt,
llm,
callbacks: args?.callbacks,
});
return new XMLAgent({
llmChain: chain,
tools,
});
}
}
/**
* Params used by the createXmlAgent function.
*/
export type CreateXmlAgentParams = {
/** LLM to use for the agent. */
llm: BaseLanguageModelInterface;
/** Tools this agent has access to. */
tools: ToolInterface[];
/**
* The prompt to use. Must have input keys for
* `tools` and `agent_scratchpad`.
*/
prompt: BasePromptTemplate;
/**
* Whether to invoke the underlying model in streaming mode,
* allowing streaming of intermediate steps. Defaults to true.
*/
streamRunnable?: boolean;
};
/**
* Create an agent that uses XML to format its logic.
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
* @returns A runnable sequence representing an agent. It takes as input all the same input
* variables as the prompt passed in does. It returns as output either an
* AgentAction or AgentFinish.
*
* @example
* ```typescript
* import { AgentExecutor, createXmlAgent } from "langchain/agents";
* import { pull } from "langchain/hub";
* import type { PromptTemplate } from "@langchain/core/prompts";
*
* import { ChatAnthropic } from "@langchain/anthropic";
*
* // Define the tools the agent will have access to.
* const tools = [...];
*
* // Get the prompt to use - you can modify this!
* // If you want to see the prompt in full, you can at:
* // https://smith.langchain.com/hub/hwchase17/xml-agent-convo
* const prompt = await pull<PromptTemplate>("hwchase17/xml-agent-convo");
*
* const llm = new ChatAnthropic({
* temperature: 0,
* });
*
* const agent = await createXmlAgent({
* llm,
* tools,
* prompt,
* });
*
* const agentExecutor = new AgentExecutor({
* agent,
* tools,
* });
*
* const result = await agentExecutor.invoke({
* input: "what is LangChain?",
* });
*
* // With chat history
* const result2 = await agentExecutor.invoke({
* input: "what's my name?",
* // Notice that chat_history is a string, since this prompt is aimed at LLMs, not chat models
* chat_history: "Human: Hi! My name is Cob\nAI: Hello Cob! Nice to meet you",
* });
* ```
*/
export async function createXmlAgent({
llm,
tools,
prompt,
streamRunnable,
}: CreateXmlAgentParams) {
const missingVariables = ["tools", "agent_scratchpad"].filter(
(v) => !prompt.inputVariables.includes(v)
);
if (missingVariables.length > 0) {
throw new Error(
`Provided prompt is missing required input variables: ${JSON.stringify(
missingVariables
)}`
);
}
const partialedPrompt = await prompt.partial({
tools: renderTextDescription(tools),
});
// TODO: Add .bind to core runnable interface.
const llmWithStop = (llm as BaseLanguageModel).bind({
stop: ["</tool_input>", "</final_answer>"],
});
const agent = AgentRunnableSequence.fromRunnables(
[
RunnablePassthrough.assign({
agent_scratchpad: (input: { steps: AgentStep[] }) =>
formatXml(input.steps),
}),
partialedPrompt,
llmWithStop,
new XMLAgentOutputParser(),
],
{
name: "XMLAgent",
streamRunnable,
singleAction: true,
}
);
return agent;
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/xml/output_parser.ts | import { AgentAction, AgentFinish } from "@langchain/core/agents";
import { OutputParserException } from "@langchain/core/output_parsers";
import { AgentActionOutputParser } from "../types.js";
/**
* @example
* ```typescript
* const prompt = ChatPromptTemplate.fromMessages([
* HumanMessagePromptTemplate.fromTemplate(AGENT_INSTRUCTIONS),
* new MessagesPlaceholder("agent_scratchpad"),
* ]);
* const runnableAgent = RunnableSequence.from([
* ...rest of runnable
* prompt,
* new ChatAnthropic({ modelName: "claude-2", temperature: 0 }).bind({
* stop: ["</tool_input>", "</final_answer>"],
* }),
* new XMLAgentOutputParser(),
* ]);
* const result = await executor.invoke({
* input: "What is the weather in Honolulu?",
* tools: [],
* });
* ```
*/
export class XMLAgentOutputParser extends AgentActionOutputParser {
lc_namespace = ["langchain", "agents", "xml"];
static lc_name() {
return "XMLAgentOutputParser";
}
/**
* Parses the output text from the agent and returns an AgentAction or
* AgentFinish object.
* @param text The output text from the agent.
* @returns An AgentAction or AgentFinish object.
*/
async parse(text: string): Promise<AgentAction | AgentFinish> {
if (text.includes("</tool>")) {
const [tool, toolInput] = text.split("</tool>");
const _tool = tool.split("<tool>")[1];
const _toolInput = toolInput.split("<tool_input>")[1];
return { tool: _tool, toolInput: _toolInput, log: text };
} else if (text.includes("<final_answer>")) {
const [, answer] = text.split("<final_answer>");
return { returnValues: { output: answer }, log: text };
} else {
throw new OutputParserException(`Could not parse LLM output: ${text}`);
}
}
getFormatInstructions(): string {
throw new Error(
"getFormatInstructions not implemented inside OpenAIFunctionsAgentOutputParser."
);
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/xml/prompt.ts | export const AGENT_INSTRUCTIONS = `You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
{tools}
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags.
You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
<tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>
When you are done, respond with a final answer between <final_answer></final_answer>. For example:
<final_answer>The weather in SF is 64 degrees</final_answer>
Begin!
Question: {question}`;
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/react/index.ts | import type { ToolInterface } from "@langchain/core/tools";
import { BasePromptTemplate } from "@langchain/core/prompts";
import type {
BaseLanguageModel,
BaseLanguageModelInterface,
} from "@langchain/core/language_models/base";
import { RunnablePassthrough } from "@langchain/core/runnables";
import { AgentStep } from "@langchain/core/agents";
import { renderTextDescription } from "../../tools/render.js";
import { formatLogToString } from "../format_scratchpad/log.js";
import { ReActSingleInputOutputParser } from "./output_parser.js";
import { AgentRunnableSequence } from "../agent.js";
/**
* Params used by the createXmlAgent function.
*/
export type CreateReactAgentParams = {
/** LLM to use for the agent. */
llm: BaseLanguageModelInterface;
/** Tools this agent has access to. */
tools: ToolInterface[];
/**
* The prompt to use. Must have input keys for
* `tools`, `tool_names`, and `agent_scratchpad`.
*/
prompt: BasePromptTemplate;
/**
* Whether to invoke the underlying model in streaming mode,
* allowing streaming of intermediate steps. Defaults to true.
*/
streamRunnable?: boolean;
};
/**
* Create an agent that uses ReAct prompting.
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
* @returns A runnable sequence representing an agent. It takes as input all the same input
* variables as the prompt passed in does. It returns as output either an
* AgentAction or AgentFinish.
*
* @example
* ```typescript
* import { AgentExecutor, createReactAgent } from "langchain/agents";
* import { pull } from "langchain/hub";
* import type { PromptTemplate } from "@langchain/core/prompts";
*
* import { OpenAI } from "@langchain/openai";
*
* // Define the tools the agent will have access to.
* const tools = [...];
*
* // Get the prompt to use - you can modify this!
* // If you want to see the prompt in full, you can at:
* // https://smith.langchain.com/hub/hwchase17/react
* const prompt = await pull<PromptTemplate>("hwchase17/react");
*
* const llm = new OpenAI({
* temperature: 0,
* });
*
* const agent = await createReactAgent({
* llm,
* tools,
* prompt,
* });
*
* const agentExecutor = new AgentExecutor({
* agent,
* tools,
* });
*
* const result = await agentExecutor.invoke({
* input: "what is LangChain?",
* });
* ```
*/
export async function createReactAgent({
llm,
tools,
prompt,
streamRunnable,
}: CreateReactAgentParams) {
const missingVariables = ["tools", "tool_names", "agent_scratchpad"].filter(
(v) => !prompt.inputVariables.includes(v)
);
if (missingVariables.length > 0) {
throw new Error(
`Provided prompt is missing required input variables: ${JSON.stringify(
missingVariables
)}`
);
}
const toolNames = tools.map((tool) => tool.name);
const partialedPrompt = await prompt.partial({
tools: renderTextDescription(tools),
tool_names: toolNames.join(", "),
});
// TODO: Add .bind to core runnable interface.
const llmWithStop = (llm as BaseLanguageModel).bind({
stop: ["\nObservation:"],
});
const agent = AgentRunnableSequence.fromRunnables(
[
RunnablePassthrough.assign({
agent_scratchpad: (input: { steps: AgentStep[] }) =>
formatLogToString(input.steps),
}),
partialedPrompt,
llmWithStop,
new ReActSingleInputOutputParser({
toolNames,
}),
],
{
name: "ReactAgent",
streamRunnable,
singleAction: true,
}
);
return agent;
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/react/output_parser.ts | import { AgentAction, AgentFinish } from "@langchain/core/agents";
import { renderTemplate } from "@langchain/core/prompts";
import { OutputParserException } from "@langchain/core/output_parsers";
import { AgentActionOutputParser } from "../types.js";
import { FORMAT_INSTRUCTIONS } from "./prompt.js";
const FINAL_ANSWER_ACTION = "Final Answer:";
const FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE =
"Parsing LLM output produced both a final answer and a parse-able action:";
/**
* Parses ReAct-style LLM calls that have a single tool input.
*
* Expects output to be in one of two formats.
*
* If the output signals that an action should be taken,
* should be in the below format. This will result in an AgentAction
* being returned.
*
* ```
* Thought: agent thought here
* Action: search
* Action Input: what is the temperature in SF?
* ```
*
* If the output signals that a final answer should be given,
* should be in the below format. This will result in an AgentFinish
* being returned.
*
* ```
* Thought: agent thought here
* Final Answer: The temperature is 100 degrees
* ```
* @example
* ```typescript
*
* const runnableAgent = RunnableSequence.from([
* ...rest of runnable
* new ReActSingleInputOutputParser({ toolNames: ["SerpAPI", "Calculator"] }),
* ]);
* const agent = AgentExecutor.fromAgentAndTools({
* agent: runnableAgent,
* tools: [new SerpAPI(), new Calculator()],
* });
* const result = await agent.invoke({
* input: "whats the weather in pomfret?",
* });
* ```
*/
export class ReActSingleInputOutputParser extends AgentActionOutputParser {
lc_namespace = ["langchain", "agents", "react"];
private toolNames: string[];
constructor(fields: { toolNames: string[] }) {
super(...arguments);
this.toolNames = fields.toolNames;
}
/**
* Parses the given text into an AgentAction or AgentFinish object. If an
* output fixing parser is defined, uses it to parse the text.
* @param text Text to parse.
* @returns Promise that resolves to an AgentAction or AgentFinish object.
*/
async parse(text: string): Promise<AgentAction | AgentFinish> {
const includesAnswer = text.includes(FINAL_ANSWER_ACTION);
const regex =
/Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)/;
const actionMatch = text.match(regex);
if (actionMatch) {
if (includesAnswer) {
throw new OutputParserException(
`${FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: ${text}`
);
}
const action = actionMatch[1];
const actionInput = actionMatch[2];
const toolInput = actionInput.trim().replace(/"/g, "");
return {
tool: action,
toolInput,
log: text,
};
}
if (includesAnswer) {
const finalAnswerText = text.split(FINAL_ANSWER_ACTION)[1].trim();
return {
returnValues: {
output: finalAnswerText,
},
log: text,
};
}
throw new OutputParserException(`Could not parse LLM output: ${text}`);
}
/**
* Returns the format instructions as a string. If the 'raw' option is
* true, returns the raw FORMAT_INSTRUCTIONS.
* @param options Options for getting the format instructions.
* @returns Format instructions as a string.
*/
getFormatInstructions(): string {
return renderTemplate(FORMAT_INSTRUCTIONS, "f-string", {
tool_names: this.toolNames.join(", "),
});
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/react/prompt.ts | export const FORMAT_INSTRUCTIONS = `Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question`;
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/chat_convo/outputParser.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import type { AgentAction, AgentFinish } from "@langchain/core/agents";
import {
FormatInstructionsOptions,
OutputParserException,
} from "@langchain/core/output_parsers";
import { renderTemplate } from "@langchain/core/prompts";
import { AgentActionOutputParser } from "../types.js";
import { FORMAT_INSTRUCTIONS } from "./prompt.js";
import { OutputFixingParser } from "../../output_parsers/fix.js";
export type ChatConversationalAgentOutputParserFormatInstructionsOptions =
FormatInstructionsOptions & {
toolNames: string[];
raw?: boolean;
};
/**
* Class that represents an output parser for the ChatConversationalAgent
* class. It extends the AgentActionOutputParser class and provides
* methods for parsing the output of the MRKL chain into agent actions.
*/
export class ChatConversationalAgentOutputParser extends AgentActionOutputParser {
lc_namespace = ["langchain", "agents", "chat_convo"];
private toolNames: string[];
constructor(fields: { toolNames: string[] }) {
super(...arguments);
this.toolNames = fields.toolNames;
}
/**
* Parses the given text into an AgentAction or AgentFinish object. If an
* output fixing parser is defined, uses it to parse the text.
* @param text Text to parse.
* @returns Promise that resolves to an AgentAction or AgentFinish object.
*/
async parse(text: string): Promise<AgentAction | AgentFinish> {
let jsonOutput = text.trim();
if (jsonOutput.includes("```json") || jsonOutput.includes("```")) {
const testString = jsonOutput.includes("```json") ? "```json" : "```";
const firstIndex = jsonOutput.indexOf(testString);
const actionInputIndex = jsonOutput.indexOf("action_input");
if (actionInputIndex > firstIndex) {
jsonOutput = jsonOutput
.slice(firstIndex + testString.length)
.trimStart();
const lastIndex = jsonOutput.lastIndexOf("```");
if (lastIndex !== -1) {
jsonOutput = jsonOutput.slice(0, lastIndex).trimEnd();
}
}
}
try {
const response = JSON.parse(jsonOutput);
const { action, action_input } = response;
if (action === "Final Answer") {
return { returnValues: { output: action_input }, log: text };
}
return { tool: action, toolInput: action_input, log: text };
} catch (e) {
throw new OutputParserException(
`Failed to parse. Text: "${text}". Error: ${e}`
);
}
}
/**
* Returns the format instructions as a string. If the 'raw' option is
* true, returns the raw FORMAT_INSTRUCTIONS.
* @param options Options for getting the format instructions.
* @returns Format instructions as a string.
*/
getFormatInstructions(): string {
return renderTemplate(FORMAT_INSTRUCTIONS, "f-string", {
tool_names: this.toolNames.join(", "),
});
}
}
export type ChatConversationalAgentOutputParserArgs = {
baseParser?: ChatConversationalAgentOutputParser;
outputFixingParser?: OutputFixingParser<AgentAction | AgentFinish>;
toolNames?: string[];
};
/**
* Class that represents an output parser with retries for the
* ChatConversationalAgent class. It extends the AgentActionOutputParser
* class and provides methods for parsing the output of the MRKL chain
* into agent actions with retry functionality.
*/
export class ChatConversationalAgentOutputParserWithRetries extends AgentActionOutputParser {
lc_namespace = ["langchain", "agents", "chat_convo"];
private baseParser: ChatConversationalAgentOutputParser;
private outputFixingParser?: OutputFixingParser<AgentAction | AgentFinish>;
private toolNames: string[] = [];
constructor(fields: ChatConversationalAgentOutputParserArgs) {
super(fields);
this.toolNames = fields.toolNames ?? this.toolNames;
this.baseParser =
fields?.baseParser ??
new ChatConversationalAgentOutputParser({ toolNames: this.toolNames });
this.outputFixingParser = fields?.outputFixingParser;
}
/**
* Returns the format instructions as a string.
* @returns Format instructions as a string.
*/
getFormatInstructions(
options: ChatConversationalAgentOutputParserFormatInstructionsOptions
): string {
if (options.raw) {
return FORMAT_INSTRUCTIONS;
}
return renderTemplate(FORMAT_INSTRUCTIONS, "f-string", {
tool_names: options.toolNames.join(", "),
});
}
/**
* Parses the given text into an AgentAction or AgentFinish object.
* @param text Text to parse.
* @returns Promise that resolves to an AgentAction or AgentFinish object.
*/
async parse(text: string): Promise<AgentAction | AgentFinish> {
if (this.outputFixingParser !== undefined) {
return this.outputFixingParser.parse(text);
}
return this.baseParser.parse(text);
}
/**
* Static method to create a new
* ChatConversationalAgentOutputParserWithRetries from a BaseLanguageModelInterface
* and options. If no base parser is provided in the options, a new
* ChatConversationalAgentOutputParser is created. An OutputFixingParser
* is also created from the BaseLanguageModelInterface and the base parser.
* @param llm BaseLanguageModelInterface instance used to create the OutputFixingParser.
* @param options Options for creating the ChatConversationalAgentOutputParserWithRetries instance.
* @returns A new instance of ChatConversationalAgentOutputParserWithRetries.
*/
static fromLLM(
llm: BaseLanguageModelInterface,
options: Omit<ChatConversationalAgentOutputParserArgs, "outputFixingParser">
): ChatConversationalAgentOutputParserWithRetries {
const baseParser =
options.baseParser ??
new ChatConversationalAgentOutputParser({
toolNames: options.toolNames ?? [],
});
const outputFixingParser = OutputFixingParser.fromLLM(llm, baseParser);
return new ChatConversationalAgentOutputParserWithRetries({
baseParser,
outputFixingParser,
toolNames: options.toolNames,
});
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/chat_convo/index.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import type { ToolInterface } from "@langchain/core/tools";
import {
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
renderTemplate,
} from "@langchain/core/prompts";
import type { AgentStep } from "@langchain/core/agents";
import {
type BaseMessage,
HumanMessage,
AIMessage,
} from "@langchain/core/messages";
import { LLMChain } from "../../chains/llm_chain.js";
import { Optional } from "../../types/type-utils.js";
import { Agent, AgentArgs, OutputParserArgs } from "../agent.js";
import { AgentActionOutputParser, AgentInput } from "../types.js";
import { ChatConversationalAgentOutputParserWithRetries } from "./outputParser.js";
import {
PREFIX_END,
DEFAULT_PREFIX,
DEFAULT_SUFFIX,
TEMPLATE_TOOL_RESPONSE,
} from "./prompt.js";
/**
* Interface defining the structure of arguments used to create a prompt
* for the ChatConversationalAgent class.
*/
export interface ChatConversationalCreatePromptArgs {
/** String to put after the list of tools. */
systemMessage?: string;
/** String to put before the list of tools. */
humanMessage?: string;
/** List of input variables the final prompt will expect. */
inputVariables?: string[];
/** Output parser to use for formatting. */
outputParser?: AgentActionOutputParser;
}
/**
* Type that extends the AgentInput interface for the
* ChatConversationalAgent class, making the outputParser property
* optional.
*/
export type ChatConversationalAgentInput = Optional<AgentInput, "outputParser">;
/**
* Agent for the MRKL chain.
* @augments Agent
*
* @deprecated Use the {@link https://api.js.langchain.com/functions/langchain.agents.createStructuredChatAgent.html | createStructuredChatAgent method instead}.
*/
export class ChatConversationalAgent extends Agent {
static lc_name() {
return "ChatConversationalAgent";
}
lc_namespace = ["langchain", "agents", "chat_convo"];
declare ToolType: ToolInterface;
constructor(input: ChatConversationalAgentInput) {
const outputParser =
input.outputParser ?? ChatConversationalAgent.getDefaultOutputParser();
super({ ...input, outputParser });
}
_agentType() {
return "chat-conversational-react-description" as const;
}
observationPrefix() {
return "Observation: ";
}
llmPrefix() {
return "Thought:";
}
_stop(): string[] {
return ["Observation:"];
}
static validateTools(tools: ToolInterface[]) {
const descriptionlessTool = tools.find((tool) => !tool.description);
if (descriptionlessTool) {
const msg =
`Got a tool ${descriptionlessTool.name} without a description.` +
` This agent requires descriptions for all tools.`;
throw new Error(msg);
}
}
/**
* Constructs the agent scratchpad based on the agent steps. It returns an
* array of base messages representing the thoughts of the agent.
* @param steps The agent steps to construct the scratchpad from.
* @returns An array of base messages representing the thoughts of the agent.
*/
async constructScratchPad(steps: AgentStep[]): Promise<BaseMessage[]> {
const thoughts: BaseMessage[] = [];
for (const step of steps) {
thoughts.push(new AIMessage(step.action.log));
thoughts.push(
new HumanMessage(
renderTemplate(TEMPLATE_TOOL_RESPONSE, "f-string", {
observation: step.observation,
})
)
);
}
return thoughts;
}
/**
* Returns the default output parser for the ChatConversationalAgent
* class. It takes optional fields as arguments to customize the output
* parser.
* @param fields Optional fields to customize the output parser.
* @returns The default output parser for the ChatConversationalAgent class.
*/
static getDefaultOutputParser(
fields?: OutputParserArgs & {
toolNames: string[];
}
): AgentActionOutputParser {
if (fields?.llm) {
return ChatConversationalAgentOutputParserWithRetries.fromLLM(
fields.llm,
{
toolNames: fields.toolNames,
}
);
}
return new ChatConversationalAgentOutputParserWithRetries({
toolNames: fields?.toolNames,
});
}
/**
* Create prompt in the style of the ChatConversationAgent.
*
* @param tools - List of tools the agent will have access to, used to format the prompt.
* @param args - Arguments to create the prompt with.
* @param args.systemMessage - String to put before the list of tools.
* @param args.humanMessage - String to put after the list of tools.
* @param args.outputParser - Output parser to use for formatting.
*/
static createPrompt(
tools: ToolInterface[],
args?: ChatConversationalCreatePromptArgs
) {
const systemMessage = (args?.systemMessage ?? DEFAULT_PREFIX) + PREFIX_END;
const humanMessage = args?.humanMessage ?? DEFAULT_SUFFIX;
const toolStrings = tools
.map((tool) => `${tool.name}: ${tool.description}`)
.join("\n");
const toolNames = tools.map((tool) => tool.name);
const outputParser =
args?.outputParser ??
ChatConversationalAgent.getDefaultOutputParser({ toolNames });
const formatInstructions = outputParser.getFormatInstructions({
toolNames,
});
const renderedHumanMessage = renderTemplate(humanMessage, "f-string", {
format_instructions: formatInstructions,
tools: toolStrings,
});
const messages = [
SystemMessagePromptTemplate.fromTemplate(systemMessage),
new MessagesPlaceholder("chat_history"),
HumanMessagePromptTemplate.fromTemplate(renderedHumanMessage),
new MessagesPlaceholder("agent_scratchpad"),
];
return ChatPromptTemplate.fromMessages(messages);
}
/**
* Creates an instance of the ChatConversationalAgent class from a
* BaseLanguageModel and a set of tools. It takes optional arguments to
* customize the agent.
* @param llm The BaseLanguageModel to create the agent from.
* @param tools The set of tools to create the agent from.
* @param args Optional arguments to customize the agent.
* @returns An instance of the ChatConversationalAgent class.
*/
static fromLLMAndTools(
llm: BaseLanguageModelInterface,
tools: ToolInterface[],
args?: ChatConversationalCreatePromptArgs & AgentArgs
) {
ChatConversationalAgent.validateTools(tools);
const outputParser =
args?.outputParser ??
ChatConversationalAgent.getDefaultOutputParser({
llm,
toolNames: tools.map((tool) => tool.name),
});
const prompt = ChatConversationalAgent.createPrompt(tools, {
...args,
outputParser,
});
const chain = new LLMChain({
prompt,
llm,
callbacks: args?.callbacks ?? args?.callbackManager,
});
return new ChatConversationalAgent({
llmChain: chain,
outputParser,
allowedTools: tools.map((t) => t.name),
});
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/chat_convo/prompt.ts | export const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.`;
export const PREFIX_END = ` However, above all else, all responses must adhere to the format of RESPONSE FORMAT INSTRUCTIONS.`;
export const FORMAT_INSTRUCTIONS = `RESPONSE FORMAT INSTRUCTIONS
----------------------------
Output a JSON markdown code snippet containing a valid JSON object in one of two formats:
**Option 1:**
Use this if you want the human to use a tool.
Markdown code snippet formatted in the following schema:
\`\`\`json
{{{{
"action": string, // The action to take. Must be one of [{tool_names}]
"action_input": string // The input to the action. May be a stringified object.
}}}}
\`\`\`
**Option #2:**
Use this if you want to respond directly and conversationally to the human. Markdown code snippet formatted in the following schema:
\`\`\`json
{{{{
"action": "Final Answer",
"action_input": string // You should put what you want to return to user here and make sure to use valid json newline characters.
}}}}
\`\`\`
For both options, remember to always include the surrounding markdown code snippet delimiters (begin with "\`\`\`json" and end with "\`\`\`")!
`;
export const DEFAULT_SUFFIX = `TOOLS
------
Assistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:
{tools}
{format_instructions}
USER'S INPUT
--------------------
Here is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):
{{input}}`;
export const TEMPLATE_TOOL_RESPONSE = `TOOL RESPONSE:
---------------------
{observation}
USER'S INPUT
--------------------
Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else.`;
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/mrkl/outputParser.ts | import { OutputParserException } from "@langchain/core/output_parsers";
import { OutputParserArgs } from "../agent.js";
import { AgentActionOutputParser } from "../types.js";
import { FORMAT_INSTRUCTIONS } from "./prompt.js";
export const FINAL_ANSWER_ACTION = "Final Answer:";
/**
* A class that extends `AgentActionOutputParser` to provide a custom
* implementation for parsing the output of a ZeroShotAgent action.
*/
export class ZeroShotAgentOutputParser extends AgentActionOutputParser {
lc_namespace = ["langchain", "agents", "mrkl"];
finishToolName: string;
constructor(fields?: OutputParserArgs) {
super(fields);
this.finishToolName = fields?.finishToolName || FINAL_ANSWER_ACTION;
}
/**
* Parses the text output of an agent action, extracting the tool, tool
* input, and output.
* @param text The text output of an agent action.
* @returns An object containing the tool, tool input, and output extracted from the text, along with the original text as a log.
*/
async parse(text: string) {
if (text.includes(this.finishToolName)) {
const parts = text.split(this.finishToolName);
const output = parts[parts.length - 1].trim();
return {
returnValues: { output },
log: text,
};
}
const match = /Action:([\s\S]*?)(?:\nAction Input:([\s\S]*?))?$/.exec(text);
if (!match) {
throw new OutputParserException(`Could not parse LLM output: ${text}`);
}
return {
tool: match[1].trim(),
toolInput: match[2]
? match[2].trim().replace(/^("+)(.*?)(\1)$/, "$2")
: "",
log: text,
};
}
/**
* Returns the format instructions for parsing the output of an agent
* action in the style of the ZeroShotAgent.
* @returns The format instructions for parsing the output.
*/
getFormatInstructions(): string {
return FORMAT_INSTRUCTIONS;
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/mrkl/index.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import { ToolInterface } from "@langchain/core/tools";
import { PromptTemplate, renderTemplate } from "@langchain/core/prompts";
import { LLMChain } from "../../chains/llm_chain.js";
import { Optional } from "../../types/type-utils.js";
import { Agent, AgentArgs, OutputParserArgs } from "../agent.js";
import { deserializeHelper } from "../helpers.js";
import {
AgentInput,
SerializedFromLLMAndTools,
SerializedZeroShotAgent,
} from "../types.js";
import { ZeroShotAgentOutputParser } from "./outputParser.js";
import { FORMAT_INSTRUCTIONS, PREFIX, SUFFIX } from "./prompt.js";
/**
* Interface for creating a prompt for the ZeroShotAgent.
*/
export interface ZeroShotCreatePromptArgs {
/** String to put after the list of tools. */
suffix?: string;
/** String to put before the list of tools. */
prefix?: string;
/** List of input variables the final prompt will expect. */
inputVariables?: string[];
}
/**
* Type for the input to the ZeroShotAgent, with the 'outputParser'
* property made optional.
*/
export type ZeroShotAgentInput = Optional<AgentInput, "outputParser">;
/**
* Agent for the MRKL chain.
* @augments Agent
* @example
* ```typescript
*
* const agent = new ZeroShotAgent({
* llmChain: new LLMChain({
* llm: new ChatOpenAI({ temperature: 0 }),
* prompt: ZeroShotAgent.createPrompt([new SerpAPI(), new Calculator()], {
* prefix: `Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:`,
* suffix: `Begin! Remember to speak as a pirate when giving your final answer. Use lots of "Args"
* Question: {input}
* {agent_scratchpad}`,
* inputVariables: ["input", "agent_scratchpad"],
* }),
* }),
* allowedTools: ["search", "calculator"],
* });
*
* const result = await agent.invoke({
* input: `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`,
* });
* ```
*
* @deprecated Use the {@link https://api.js.langchain.com/functions/langchain.agents.createReactAgent.html | createReactAgent method instead}.
*/
export class ZeroShotAgent extends Agent {
static lc_name() {
return "ZeroShotAgent";
}
lc_namespace = ["langchain", "agents", "mrkl"];
declare ToolType: ToolInterface;
constructor(input: ZeroShotAgentInput) {
const outputParser =
input?.outputParser ?? ZeroShotAgent.getDefaultOutputParser();
super({ ...input, outputParser });
}
_agentType() {
return "zero-shot-react-description" as const;
}
observationPrefix() {
return "Observation: ";
}
llmPrefix() {
return "Thought:";
}
/**
* Returns the default output parser for the ZeroShotAgent.
* @param fields Optional arguments for the output parser.
* @returns An instance of ZeroShotAgentOutputParser.
*/
static getDefaultOutputParser(fields?: OutputParserArgs) {
return new ZeroShotAgentOutputParser(fields);
}
/**
* Validates the tools for the ZeroShotAgent. Throws an error if any tool
* does not have a description.
* @param tools List of tools to validate.
*/
static validateTools(tools: ToolInterface[]) {
const descriptionlessTool = tools.find((tool) => !tool.description);
if (descriptionlessTool) {
const msg =
`Got a tool ${descriptionlessTool.name} without a description.` +
` This agent requires descriptions for all tools.`;
throw new Error(msg);
}
}
/**
* Create prompt in the style of the zero shot agent.
*
* @param tools - List of tools the agent will have access to, used to format the prompt.
* @param args - Arguments to create the prompt with.
* @param args.suffix - String to put after the list of tools.
* @param args.prefix - String to put before the list of tools.
* @param args.inputVariables - List of input variables the final prompt will expect.
*/
static createPrompt(tools: ToolInterface[], args?: ZeroShotCreatePromptArgs) {
const {
prefix = PREFIX,
suffix = SUFFIX,
inputVariables = ["input", "agent_scratchpad"],
} = args ?? {};
const toolStrings = tools
.map((tool) => `${tool.name}: ${tool.description}`)
.join("\n");
const toolNames = tools.map((tool) => tool.name);
const formatInstructions = renderTemplate(FORMAT_INSTRUCTIONS, "f-string", {
tool_names: toolNames,
});
const template = [prefix, toolStrings, formatInstructions, suffix].join(
"\n\n"
);
return new PromptTemplate({
template,
inputVariables,
});
}
/**
* Creates a ZeroShotAgent from a Large Language Model and a set of tools.
* @param llm The Large Language Model to use.
* @param tools The tools for the agent to use.
* @param args Optional arguments for creating the agent.
* @returns A new instance of ZeroShotAgent.
*/
static fromLLMAndTools(
llm: BaseLanguageModelInterface,
tools: ToolInterface[],
args?: ZeroShotCreatePromptArgs & AgentArgs
) {
ZeroShotAgent.validateTools(tools);
const prompt = ZeroShotAgent.createPrompt(tools, args);
const outputParser =
args?.outputParser ?? ZeroShotAgent.getDefaultOutputParser();
const chain = new LLMChain({
prompt,
llm,
callbacks: args?.callbacks ?? args?.callbackManager,
});
return new ZeroShotAgent({
llmChain: chain,
allowedTools: tools.map((t) => t.name),
outputParser,
});
}
static async deserialize(
data: SerializedZeroShotAgent & {
llm?: BaseLanguageModelInterface;
tools?: ToolInterface[];
}
): Promise<ZeroShotAgent> {
const { llm, tools, ...rest } = data;
return deserializeHelper(
llm,
tools,
rest,
(
llm: BaseLanguageModelInterface,
tools: ToolInterface[],
args: SerializedFromLLMAndTools
) =>
ZeroShotAgent.fromLLMAndTools(llm, tools, {
prefix: args.prefix,
suffix: args.suffix,
inputVariables: args.input_variables,
}),
(args) => new ZeroShotAgent(args)
);
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/mrkl/prompt.ts | export const PREFIX = `Answer the following questions as best you can. You have access to the following tools:`;
export const FORMAT_INSTRUCTIONS = `Use the following format in your response:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question`;
export const SUFFIX = `Begin!
Question: {input}
Thought:{agent_scratchpad}`;
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/agent.int.test.ts | /* eslint-disable no-process-env */
import { expect, test } from "@jest/globals";
import { OpenAI, OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { Tool } from "@langchain/core/tools";
import { RunnableSequence } from "@langchain/core/runnables";
import { OutputParserException } from "@langchain/core/output_parsers";
import { AIMessage } from "@langchain/core/messages";
import { AgentStep } from "@langchain/core/agents";
import { ChatMessageHistory } from "../../stores/message/in_memory.js";
import { AgentExecutor, ZeroShotAgent } from "../index.js";
import { SerpAPI } from "../../util/testing/tools/serpapi.js";
import { Calculator } from "../../util/testing/tools/calculator.js";
import { initializeAgentExecutorWithOptions } from "../initialize.js";
import { WebBrowser } from "../../tools/webbrowser.js";
import { BufferMemory } from "../../memory/buffer_memory.js";
test("Pass runnable to agent executor", async () => {
const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-3.5-turbo" });
const tools: Tool[] = [
new SerpAPI(undefined, {
location: "Austin,Texas,United States",
hl: "en",
gl: "us",
}),
new Calculator(),
];
const prompt = ZeroShotAgent.createPrompt(tools);
const outputParser = ZeroShotAgent.getDefaultOutputParser();
const runnable = RunnableSequence.from([
{
input: (i: { input: string }) => i.input,
agent_scratchpad: (i: { input: string }) => i.input,
},
prompt,
model,
outputParser,
]);
const executor = AgentExecutor.fromAgentAndTools({
agent: runnable,
tools,
});
const res = await executor.invoke({
input:
"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?",
});
// console.log(
// {
// res,
// },
// "Pass runnable to agent executor"
// );
expect(res.output).not.toEqual("");
expect(res.output).not.toEqual("Agent stopped due to max iterations.");
});
test("Custom output parser", async () => {
const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-3.5-turbo" });
const tools: Tool[] = [
new SerpAPI(undefined, {
location: "Austin,Texas,United States",
hl: "en",
gl: "us",
}),
new Calculator(),
];
const parser = (output: AIMessage) => {
const text = output.content;
if (typeof text !== "string") {
throw new Error("Cannot parse non-string output.");
}
if (text.includes("Final Answer:")) {
return {
returnValues: {
output: "We did it!",
},
log: text,
};
}
const match = /Action:([\s\S]*?)(?:\nAction Input:([\s\S]*?))?$/.exec(text);
if (!match) {
throw new OutputParserException(`Could not parse LLM output: ${text}`);
}
return {
tool: match[1].trim(),
toolInput: match[2]
? match[2].trim().replace(/^("+)(.*?)(\1)$/, "$2")
: "",
log: text,
};
};
const prompt = ZeroShotAgent.createPrompt(tools);
const runnable = RunnableSequence.from([
{
input: (i: { input: string }) => i.input,
agent_scratchpad: (i: { input: string }) => i.input,
},
prompt,
model,
parser,
]);
const executor = AgentExecutor.fromAgentAndTools({
agent: runnable,
tools,
});
const res = await executor.invoke({
input:
"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?",
});
// console.log(
// {
// res,
// },
// "Custom output parser"
// );
expect(res.output).toEqual("We did it!");
});
test("Add a fallback method", async () => {
// Model should always fail since the model name passed does not exist.
const modelBase = new ChatOpenAI({
modelName: "fake-model",
temperature: 10,
});
const modelLarge = new ChatOpenAI({
modelName: "gpt-3.5-turbo-16k",
temperature: 0.6,
});
const model = modelBase.withFallbacks({
fallbacks: [modelLarge],
});
const prompt = ZeroShotAgent.createPrompt([]);
const outputParser = ZeroShotAgent.getDefaultOutputParser();
const runnable = RunnableSequence.from([
{
input: (i: { input: string }) => i.input,
agent_scratchpad: (i: { input: string }) => i.input,
},
prompt,
model,
outputParser,
]);
const executor = AgentExecutor.fromAgentAndTools({
agent: runnable,
tools: [],
});
const res = await executor.invoke({
input: "Is the sky blue? Response with a concise answer",
});
// console.log(
// {
// res,
// },
// "Pass runnable to agent executor"
// );
expect(res.output).not.toEqual("");
expect(res.output).not.toEqual("Agent stopped due to max iterations.");
});
test("Run agent with an abort signal", async () => {
const model = new OpenAI({ temperature: 0, modelName: "text-babbage-001" });
const tools = [new Calculator()];
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "zero-shot-react-description",
});
// console.log("Loaded agent.");
const input = `What is 3 to the fourth power?`;
// console.log(`Executing with input "${input}"...`);
const controller = new AbortController();
await expect(() => {
const result = executor.call({ input, signal: controller.signal });
controller.abort();
return result;
}).rejects.toThrow();
});
test("Run agent with incorrect api key should throw error", async () => {
const model = new OpenAI({
temperature: 0,
modelName: "text-babbage-001",
openAIApiKey: "invalid",
});
const tools = [
new SerpAPI(undefined, {
location: "Austin,Texas,United States",
hl: "en",
gl: "us",
}),
new Calculator(),
];
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "zero-shot-react-description",
});
// console.log("Loaded agent.");
const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`;
let error;
// Test that the model throws an error
await expect(async () => {
try {
await model.invoke(input);
} catch (e) {
error = e;
throw e;
}
}).rejects.toThrowError();
// Test that the agent throws the same error
await expect(() => executor.call({ input })).rejects.toThrowError(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).message
);
}, 10000);
test("Run tool web-browser", async () => {
const model = new OpenAI({ temperature: 0 });
const tools = [
new SerpAPI(process.env.SERPAPI_API_KEY, {
location: "Austin,Texas,United States",
hl: "en",
gl: "us",
}),
new Calculator(),
new WebBrowser({ model, embeddings: new OpenAIEmbeddings() }),
];
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "zero-shot-react-description",
returnIntermediateSteps: true,
});
// console.log("Loaded agent.");
const input = `What is the word of the day on merriam webster`;
// console.log(`Executing with input "${input}"...`);
const result = await executor.call({ input });
// console.log(
// {
// result,
// },
// "Run tool web-browser"
// );
expect(result.intermediateSteps.length).toBeGreaterThanOrEqual(1);
expect(result.intermediateSteps[0].action.tool).toEqual("search");
expect(result.intermediateSteps[1].action.tool).toEqual("web-browser");
expect(result.output).not.toEqual("");
expect(result.output).not.toEqual("Agent stopped due to max iterations.");
});
test("Agent can stream", async () => {
const model = new ChatOpenAI({
temperature: 0,
modelName: "gpt-4-1106-preview",
streaming: true,
});
const tools = [
new Calculator(),
new WebBrowser({ model, embeddings: new OpenAIEmbeddings() }),
];
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "zero-shot-react-description",
returnIntermediateSteps: false,
});
// console.log("Loaded agent.");
const input = `What is the word of the day on merriam webster`;
// console.log(`Executing with input "${input}"...`);
const result = await executor.stream({ input });
let streamIters = 0;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const finalResponse: any = [];
for await (const item of result) {
streamIters += 1;
// console.log("Stream item:", item);
// each stream does NOT contain the previous steps,
// because returnIntermediateSteps is false so we
// push each new stream item to the array.
finalResponse.push(item);
}
// The last item should contain "output"
expect("output" in finalResponse[finalResponse.length - 1]).toBeTruthy();
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const intermediateSteps = finalResponse.flatMap((item: any) => {
if ("intermediateSteps" in item) {
return item.intermediateSteps;
}
return [];
});
expect(streamIters).toBeGreaterThan(1);
const toolsUsed: Array<string> = intermediateSteps.map(
(step: AgentStep) => step.action.tool
);
// the last tool used should be the web-browser
expect(toolsUsed?.[toolsUsed.length - 1]).toEqual("web-browser");
});
test("Agent can stream with chat messages", async () => {
const model = new ChatOpenAI({
temperature: 0,
modelName: "gpt-4-1106-preview",
streaming: true,
});
const tools = [
new Calculator(),
new WebBrowser({ model, embeddings: new OpenAIEmbeddings() }),
];
const memory = new BufferMemory({
chatHistory: new ChatMessageHistory([]),
memoryKey: "chat_history",
inputKey: "input",
outputKey: "output",
returnMessages: true,
});
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "chat-conversational-react-description",
returnIntermediateSteps: true,
memory,
});
// console.log("Loaded agent.");
const input = `What is the word of the day on merriam webster, and what is the sum of all letter indices (relative to the english alphabet) in the word?`;
// console.log(`Executing with input "${input}"...`);
const result = await executor.stream({ input, chat_history: [] });
let streamIters = 0;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
let finalResponse: any;
for await (const item of result) {
streamIters += 1;
// console.log("Stream item:", item);
// each stream contains the previous steps
// because returnIntermediateSteps is true),
// so we can overwrite on each stream.
finalResponse = item;
}
// console.log("__finalResponse__", finalResponse);
expect("intermediateSteps" in finalResponse).toBeTruthy();
expect("output" in finalResponse).toBeTruthy();
expect(streamIters).toBeGreaterThan(1);
const toolsUsed: Array<string> = finalResponse.intermediateSteps.map(
(step: AgentStep) => step.action.tool
);
// the first tool used should be web-browser, and last should be calculator.
// This can be flaky so if the test is failing, inspect these conditions first.
expect(toolsUsed?.[toolsUsed.length - 1]).toEqual("calculator");
expect(toolsUsed?.[0]).toEqual("web-browser");
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/structured_output_runnables.int.test.ts | import { zodToJsonSchema } from "zod-to-json-schema";
import fs from "fs";
import { z } from "zod";
import { AgentAction, AgentFinish, AgentStep } from "@langchain/core/agents";
import { AIMessage } from "@langchain/core/messages";
import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { convertToOpenAIFunction } from "@langchain/core/utils/function_calling";
import { RunnableSequence } from "@langchain/core/runnables";
import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
import { createRetrieverTool } from "../toolkits/index.js";
import { RecursiveCharacterTextSplitter } from "../../text_splitter.js";
import { MemoryVectorStore } from "../../vectorstores/memory.js";
import { AgentExecutor } from "../executor.js";
import { formatForOpenAIFunctions } from "../format_scratchpad/openai_functions.js";
/** Define a custom structured output parser. */
const structuredOutputParser = (
output: AIMessage
): AgentAction | AgentFinish => {
if (typeof output.content !== "string") {
throw new Error("Cannot parse non-string output.");
}
if (output.additional_kwargs.function_call === undefined) {
return { returnValues: { output: output.content }, log: output.content };
}
const functionCall = output.additional_kwargs.function_call;
const name = functionCall?.name as string;
const inputs = functionCall?.arguments as string;
// console.log(functionCall);
const jsonInput = JSON.parse(inputs);
if (name === "response") {
return { returnValues: { ...jsonInput }, log: output.content };
}
return {
tool: name,
toolInput: jsonInput,
log: output.content,
};
};
test("Pass custom structured output parsers", async () => {
/** Read text file & embed documents */
const text = fs.readFileSync("../examples/state_of_the_union.txt", "utf8");
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
let docs = await textSplitter.createDocuments([text]);
// Add fake source information
docs = docs.map((doc, i) => ({
...doc,
metadata: {
page_chunk: i,
},
}));
/** Initialize docs & create retriever */
const vectorStore = await MemoryVectorStore.fromDocuments(
docs,
new OpenAIEmbeddings()
);
const retriever = vectorStore.asRetriever();
/** Instantiate the LLM */
const llm = new ChatOpenAI({});
/** Define the prompt template */
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
new MessagesPlaceholder("agent_scratchpad"),
["user", "{input}"],
]);
/** Define the response schema */
const responseSchema = z.object({
answer: z.string().describe("The final answer to respond to the user"),
sources: z
.array(z.string())
.describe(
"List of page chunks that contain answer to the question. Only include a page chunk if it contains relevant information"
),
});
/** Create the response function */
const responseOpenAIFunction = {
name: "response",
description: "Return the response to the user",
parameters: zodToJsonSchema(responseSchema),
};
/** Convert retriever into a tool */
const retrieverTool = createRetrieverTool(retriever, {
name: "state-of-union-retriever",
description:
"Query a retriever to get information about state of the union address",
});
/** Bind both retriever and response functions to LLM */
const llmWithTools = llm.bind({
functions: [convertToOpenAIFunction(retrieverTool), responseOpenAIFunction],
});
/** Create the runnable */
const runnableAgent = RunnableSequence.from([
{
input: (i: { input: string; steps: Array<AgentStep> }) => i.input,
agent_scratchpad: (i: { input: string; steps: Array<AgentStep> }) =>
formatForOpenAIFunctions(i.steps),
},
prompt,
llmWithTools,
structuredOutputParser,
]);
/** Create the agent by passing in the runnable & tools */
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools: [retrieverTool],
});
/** Call invoke on the agent */
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await executor.invoke({
input: "what did the president say about kentaji brown jackson",
});
// console.log({
// res,
// });
/**
{
res: {
answer: 'President mentioned that he nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. He described her as one of our nation’s top legal minds and stated that she will continue Justice Breyer’s legacy of excellence.',
sources: [
'And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.'
]
}
}
*/
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/runnable.int.test.ts | /* eslint-disable no-process-env */
import { test } from "@jest/globals";
import { ChatOpenAI } from "@langchain/openai";
import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
import {
AIMessage,
BaseMessage,
FunctionMessage,
} from "@langchain/core/messages";
import { convertToOpenAIFunction } from "@langchain/core/utils/function_calling";
import { AgentStep } from "@langchain/core/agents";
import { RunnableSequence } from "@langchain/core/runnables";
import { AgentExecutor } from "../executor.js";
import { SerpAPI } from "../../util/testing/tools/serpapi.js";
import { Calculator } from "../../util/testing/tools/calculator.js";
import { OpenAIFunctionsAgentOutputParser } from "../openai/output_parser.js";
test("Runnable variant", async () => {
const tools = [new Calculator(), new SerpAPI()];
const model = new ChatOpenAI({ modelName: "gpt-4", temperature: 0 });
const prompt = ChatPromptTemplate.fromMessages([
["ai", "You are a helpful assistant"],
["human", "{input}"],
new MessagesPlaceholder("agent_scratchpad"),
]);
const modelWithTools = model.bind({
functions: [...tools.map((tool) => convertToOpenAIFunction(tool))],
});
const formatAgentSteps = (steps: AgentStep[]): BaseMessage[] =>
steps.flatMap(({ action, observation }) => {
if ("messageLog" in action && action.messageLog !== undefined) {
const log = action.messageLog as BaseMessage[];
return log.concat(new FunctionMessage(observation, action.tool));
} else {
return [new AIMessage(action.log)];
}
});
const runnableAgent = RunnableSequence.from([
{
input: (i: { input: string; steps: AgentStep[] }) => i.input,
agent_scratchpad: (i: { input: string; steps: AgentStep[] }) =>
formatAgentSteps(i.steps),
},
prompt,
modelWithTools,
new OpenAIFunctionsAgentOutputParser(),
]);
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools,
});
// console.log("Loaded agent executor");
const query = "What is the weather in New York?";
// console.log(`Calling agent executor with query: ${query}`);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await executor.invoke({
input: query,
});
// console.log(result);
});
test("Runnable variant executor astream log", async () => {
const tools = [new Calculator(), new SerpAPI()];
const model = new ChatOpenAI({
modelName: "gpt-4",
temperature: 0,
streaming: true,
});
const prompt = ChatPromptTemplate.fromMessages([
["ai", "You are a helpful assistant"],
["human", "{input}"],
new MessagesPlaceholder("agent_scratchpad"),
]);
const modelWithTools = model.bind({
functions: [...tools.map((tool) => convertToOpenAIFunction(tool))],
});
const formatAgentSteps = (steps: AgentStep[]): BaseMessage[] =>
steps.flatMap(({ action, observation }) => {
if ("messageLog" in action && action.messageLog !== undefined) {
const log = action.messageLog as BaseMessage[];
return log.concat(new FunctionMessage(observation, action.tool));
} else {
return [new AIMessage(action.log)];
}
});
const runnableAgent = RunnableSequence.from([
{
input: (i: { input: string; steps: AgentStep[] }) => i.input,
agent_scratchpad: (i: { input: string; steps: AgentStep[] }) =>
formatAgentSteps(i.steps),
},
prompt,
modelWithTools,
new OpenAIFunctionsAgentOutputParser(),
]);
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools,
});
// console.log("Loaded agent executor");
const query = "What is the weather in New York?";
// console.log(`Calling agent executor with query: ${query}`);
const stream = await executor.streamLog({
input: query,
});
let hasSeenLLMLogPatch = false;
for await (const chunk of stream) {
// console.log(JSON.stringify(chunk));
if (chunk.ops[0].path.includes("ChatOpenAI")) {
hasSeenLLMLogPatch = true;
}
}
expect(hasSeenLLMLogPatch).toBe(true);
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/structured_chat_output_parser_with_retries.int.test.ts | import { test, expect } from "@jest/globals";
import { ChatOpenAI } from "@langchain/openai";
import { AgentAction, AgentFinish } from "@langchain/core/agents";
import { StructuredChatOutputParserWithRetries } from "../structured_chat/outputParser.js";
test("Can parse JSON with text in front of it", async () => {
const testCases = [
{
input:
'Here we have an invalid format (missing markdown block) that the parser should retry and fix: {\n \t\r\n"action": "blogpost",\n\t\r "action_input": "```sql\\nSELECT * FROM orders\\nJOIN users ON users.id = orders.user_id\\nWHERE users.email = \'bud\'```"\n\t\r}\n\n\n\t\r and at the end there is more nonsense',
tool: "blogpost",
toolInput:
"```sql\nSELECT * FROM orders\nJOIN users ON users.id = orders.user_id\nWHERE users.email = 'bud'```",
},
{
input:
'Here we have an invalid format (missing markdown block) with a structured tool that the parser should retry and fix: {\n \t\r\n"action": "blogpost",\n\t\r "action_input": {"query": "SELECT * FROM orders\\nJOIN users ON users.id = orders.user_id\\nWHERE users.email = $1",\n\t"parameters": ["bud"]\n\t}\n\t\r}\n\n\n\t\r and at the end there is more nonsense',
tool: "blogpost",
toolInput: {
query:
"SELECT * FROM orders\nJOIN users ON users.id = orders.user_id\nWHERE users.email = $1",
parameters: ["bud"],
},
},
{
input: `I don't know the answer.`,
tool: "Final Answer",
toolInput: "I don't know the answer.",
},
];
const p = StructuredChatOutputParserWithRetries.fromLLM(
new ChatOpenAI({ temperature: 0, modelName: "gpt-3.5-turbo" }),
{
toolNames: ["blogpost"],
}
);
for (const message of testCases) {
const parsed = await p.parse(message.input);
expect(parsed).toBeDefined();
if (message.tool === "Final Answer") {
expect((parsed as AgentFinish).returnValues).toBeDefined();
} else {
expect((parsed as AgentAction).tool).toEqual(message.tool);
if (typeof message.toolInput === "object") {
expect(message.toolInput).toEqual((parsed as AgentAction).toolInput);
}
if (typeof message.toolInput === "string") {
expect(message.toolInput).toContain((parsed as AgentAction).toolInput);
}
}
}
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/create_react_agent.int.test.ts | import { test, expect } from "@jest/globals";
import { OpenAI } from "@langchain/openai";
import type { PromptTemplate } from "@langchain/core/prompts";
import { TavilySearchResults } from "../../util/testing/tools/tavily_search.js";
import { pull } from "../../hub.js";
import { AgentExecutor, createReactAgent } from "../index.js";
const tools = [new TavilySearchResults({ maxResults: 1 })];
test("createReactAgent works", async () => {
const prompt = await pull<PromptTemplate>("hwchase17/react");
const llm = new OpenAI({
modelName: "gpt-3.5-turbo-instruct",
temperature: 0,
});
const agent = await createReactAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const input = "what is LangChain?";
const result = await agentExecutor.invoke({
input,
});
// console.log(result);
expect(result.input).toBe(input);
expect(typeof result.output).toBe("string");
// Length greater than 10 because any less than that would warrant
// an investigation into why such a short generation was returned.
expect(result.output.length).toBeGreaterThan(10);
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/chat_convo_output_parser.test.ts | import { test, expect } from "@jest/globals";
import { AgentAction, AgentFinish } from "@langchain/core/agents";
import { ChatConversationalAgentOutputParser } from "../chat_convo/outputParser.js";
test("Can parse JSON with text in front of it", async () => {
const testCases = [
{
input: `Based on the information from the search, I can provide you with a query to get all the orders for the email \`example@gmail.com\`. Here's the query:\n\n\`\`\`sql\nSELECT * FROM orders\nJOIN users ON users.id = orders.user_id\nWHERE users.email = 'example@gmail.com'\n\`\`\`\n\nPlease make any necessary modifications depending on your database schema and table structures. Run this query on your database to retrieve the orders made by the specified user.\n\n\`\`\`json\n{\n "action": "Final Answer",\n "action_input": "To get all the orders for a user with the email \`example@gmail.com\`, you can use the following query:\\n\\n\`\`\`\\nSELECT * FROM orders\\nJOIN users ON users.id = orders.user_id\\nWHERE users.email = 'example@gmail.com'\\n\`\`\`\\n\\nPlease make any necessary modifications depending on your database schema and table structures. Run this query on your database to retrieve the orders made by the specified user."\n}\n\`\`\``,
output: `{\n "action": "Final Answer",\n "action_input": "To get all the orders for a user with the email \`example@gmail.com\`, you can use the following query:\\n\\n\`\`\`\\nSELECT * FROM orders\\nJOIN users ON users.id = orders.user_id\\nWHERE users.email = 'example@gmail.com'\\n\`\`\`\\n\\nPlease make any necessary modifications depending on your database schema and table structures. Run this query on your database to retrieve the made by the specifsredroied user."\n}`,
tool: "Final Answer",
toolInput: "To get all the orders for a user with the email ",
},
{
input:
'Here is an example of a valid JSON object matching the provided spec:\n\n```json\n{\n "action": "metabase",\n "action_input": ["GET", "/api/table/1"]\n}\n```\n\nIn this example, the "action" key has a string value of "metabase", and the "action_input" key has an array value containing two elements: a string value of "GET" and a string value of "/api/table/1". This JSON object could be used to make a request to a Metabase API endpoint with the specified method and arguments.',
output: `{ "action": "metabase", "action_input": ["GET", "/api/table/1"] } `,
tool: "metabase",
toolInput: ["GET", "/api/table/1"],
},
{
input:
'```\n{\n "action": "metabase",\n "action_input": ["GET", "/api/table/1"]\n}\n```',
output: `{ "action": "metabase", "action_input": ["GET", "/api/table/1"] } `,
tool: "metabase",
toolInput: ["GET", "/api/table/1"],
},
{
input:
'Here we have some boilerplate nonsense```\n{\n "action": "blogpost",\n "action_input": "```sql\\nSELECT * FROM orders\\nJOIN users ON users.id = orders.user_id\\nWHERE users.email = \'bud\'```"\n}\n``` and at the end there is more nonsense',
output:
'{"action":"blogpost","action_input":"```sql\\nSELECT * FROM orders\\nJOIN users ON users.id = orders.user_id\\nWHERE users.email = \'bud\'```"}',
tool: "blogpost",
toolInput:
"```sql\nSELECT * FROM orders\nJOIN users ON users.id = orders.user_id\nWHERE users.email = 'bud'```",
},
{
input:
'Here we have some boilerplate nonsense```json\n{\n \t\r\n"action": "blogpost",\n\t\r "action_input": "```sql\\nSELECT * FROM orders\\nJOIN users ON users.id = orders.user_id\\nWHERE users.email = \'bud\'```"\n\t\r}\n\n\n\t\r``` and at the end there is more nonsense',
output:
'{"action":"blogpost","action_input":"```sql\\nSELECT * FROM orders\\nJOIN users ON users.id = orders.user_id\\nWHERE users.email = \'bud\'```"}',
tool: "blogpost",
toolInput:
"```sql\nSELECT * FROM orders\nJOIN users ON users.id = orders.user_id\nWHERE users.email = 'bud'```",
},
{
input:
'{\n \t\r\n"action":"Final Answer",\n\t\r "action_input":"The tool input ```json\\n{\\"yes\\":true}\\n```"\n\t\r}',
output:
'{"action":"Final Answer","action_input":"The tool input ```json\\n{\\"yes\\":true}\\n```"}',
tool: "Final Answer",
toolInput: 'The tool input ```json\\n{\\"yes\\":true}\\n```',
},
{
input:
'```json\n{\n \t\r\n"action":"Final Answer",\n\t\r "action_input":"The tool input ```json\\n{\\"yes\\":true}\\n```"\n\t\r}\n\n\n\t\r```',
output:
'{"action":"Final Answer","action_input":"The tool input ```json\\n{\\"yes\\":true}\\n```"}',
tool: "Final Answer",
toolInput: 'The tool input ```json\\n{\\"yes\\":true}\\n```',
},
{
input:
'Here we have some boilerplate nonsense```json\n{\n \t\r\n"action":"Final Answer",\n\t\r "action_input":"The tool input ```json\\n{\\"yes\\":true}\\n```"\n\t\r}\n\n\n\t\r``` and at the end there is more nonsense',
output:
'{"action":"Final Answer","action_input":"The tool input ```json\\n{\\"yes\\":true}\\n```"}',
tool: "Final Answer",
toolInput: 'The tool input ```json\\n{\\"yes\\":true}\\n```',
},
{
input:
'Here we have some boilerplate nonsense```\n{\n \t\r\n"action":"Final Answer",\n\t\r "action_input":"The tool input ```javascript\\n{\\"yes\\":true}\\n```"\n\t\r}\n\n\n\t\r``` and at the end there is more nonsense',
output:
'{"action":"Final Answer","action_input":"The tool input ```javascript\\n{\\"yes\\":true}\\n```"}',
tool: "Final Answer",
toolInput: 'The tool input ```javascript\\n{\\"yes\\":true}\\n```',
},
{
input:
'{\n \t\r\n"action":"Final Answer",\n\t\r "action_input":"The tool input ```javascript\\n{\\"yes\\":true}\\n```"\n\t\r}',
output:
'{"action":"Final Answer","action_input":"The tool input ```javascript\\n{\\"yes\\":true}\\n```"}',
tool: "Final Answer",
toolInput: 'The tool input ```javascript\\n{\\"yes\\":true}\\n```',
},
{
input:
'{\n \t\r\n"action":"Final Answer",\n\t\r "action_input":"this is a regular text response"\n\t\r}',
output:
'{"action":"Final Answer","action_input":"this is a regular text response"}',
tool: "Final Answer",
toolInput: "this is a regular text response",
},
];
const p = new ChatConversationalAgentOutputParser({
toolNames: ["blogpost", "metabase", "ToolWithJson"],
});
for (const message of testCases) {
const parsed = await p.parse(message.input);
expect(parsed).toBeDefined();
if (message.tool === "Final Answer") {
expect((parsed as AgentFinish).returnValues).toBeDefined();
} else {
expect((parsed as AgentAction).tool).toEqual(message.tool);
if (typeof message.toolInput === "object") {
expect(message.toolInput).toEqual((parsed as AgentAction).toolInput);
}
if (typeof message.toolInput === "string") {
expect(message.toolInput).toContain((parsed as AgentAction).toolInput);
}
}
}
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/create_xml_agent.int.test.ts | import { test, expect } from "@jest/globals";
import type { PromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";
import { TavilySearchResults } from "../../util/testing/tools/tavily_search.js";
import { pull } from "../../hub.js";
import { AgentExecutor, createXmlAgent } from "../index.js";
const tools = [new TavilySearchResults({ maxResults: 1 })];
test("createXmlAgent works", async () => {
const prompt = await pull<PromptTemplate>("hwchase17/xml-agent-convo");
const llm = new ChatOpenAI({
modelName: "gpt-4-turbo",
temperature: 0,
});
const agent = await createXmlAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const input = "what is LangChain?";
const result = await agentExecutor.invoke({
input,
});
// console.log(result);
expect(result.input).toBe(input);
expect(typeof result.output).toBe("string");
// Length greater than 10 because any less than that would warrant
// an investigation into why such a short generation was returned.
expect(result.output.length).toBeGreaterThan(10);
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/json.test.ts | import { test, expect } from "@jest/globals";
import {
JsonListKeysTool,
JsonSpec,
JsonGetValueTool,
} from "../../tools/json.js";
test("JsonListKeysTool", async () => {
const jsonSpec = new JsonSpec({
foo: "bar",
baz: { test: { foo: [1, 2, 3], qux: [{ x: 1, y: 2, z: 3 }, { a: 1 }] } },
});
const jsonListKeysTool = new JsonListKeysTool(jsonSpec);
expect(await jsonListKeysTool.invoke("")).toBe("foo, baz");
expect(await jsonListKeysTool.invoke("/foo")).toContain("not a dictionary");
expect(await jsonListKeysTool.invoke("/baz")).toBe("test");
expect(await jsonListKeysTool.invoke("/baz/test")).toBe("foo, qux");
expect(await jsonListKeysTool.invoke("/baz/test/foo")).toContain(
"not a dictionary"
);
expect(await jsonListKeysTool.invoke("/baz/test/foo/0")).toContain(
"not a dictionary"
);
expect(await jsonListKeysTool.invoke("/baz/test/qux")).toContain(
"not a dictionary"
);
expect(await jsonListKeysTool.invoke("/baz/test/qux/0")).toBe("x, y, z");
expect(await jsonListKeysTool.invoke("/baz/test/qux/1")).toBe("a");
expect(await jsonListKeysTool.invoke("/bar")).toContain("not a dictionary");
});
test("JsonListKeysTool, paths containing escaped characters", async () => {
const jsonSpec = new JsonSpec({
paths: {
"a~b": 1,
"a/b": 2,
"a~/b": 3,
"a//~b": 4,
},
});
const jsonListKeyTool = new JsonListKeysTool(jsonSpec);
expect(await jsonListKeyTool.invoke("/paths")).toBe(
"a~0b, a~1b, a~0~1b, a~1~1~0b"
);
});
test("JsonGetValueTool", async () => {
const jsonSpec = new JsonSpec({
foo: "bar",
baz: { test: { foo: [1, 2, 3], qux: [{ x: 1, y: 2, z: 3 }, { a: 1 }] } },
});
const jsonGetValueTool = new JsonGetValueTool(jsonSpec);
expect(await jsonGetValueTool.invoke("")).toBe(
`{"foo":"bar","baz":{"test":{"foo":[1,2,3],"qux":[{"x":1,"y":2,"z":3},{"a":1}]}}}`
);
expect(await jsonGetValueTool.invoke("/foo")).toBe("bar");
expect(await jsonGetValueTool.invoke("/baz")).toBe(
`{"test":{"foo":[1,2,3],"qux":[{"x":1,"y":2,"z":3},{"a":1}]}}`
);
expect(await jsonGetValueTool.invoke("/baz/test")).toBe(
`{"foo":[1,2,3],"qux":[{"x":1,"y":2,"z":3},{"a":1}]}`
);
expect(await jsonGetValueTool.invoke("/baz/test/foo")).toBe("[1,2,3]");
expect(await jsonGetValueTool.invoke("/baz/test/foo/0")).toBe("1");
expect(await jsonGetValueTool.invoke("/baz/test/qux")).toBe(
`[{"x":1,"y":2,"z":3},{"a":1}]`
);
expect(await jsonGetValueTool.invoke("/baz/test/qux/0")).toBe(
`{"x":1,"y":2,"z":3}`
);
expect(await jsonGetValueTool.invoke("/baz/test/qux/0/x")).toBe("1");
expect(await jsonGetValueTool.invoke("/baz/test/qux/1")).toBe(`{"a":1}`);
expect(await jsonGetValueTool.invoke("/bar")).toContain(`null`);
});
test("JsonGetValueTool, large values", async () => {
const jsonSpec = new JsonSpec(
{ foo: "bar", baz: { test: { foo: [1, 2, 3, 4] } } },
5
);
const jsonGetValueTool = new JsonGetValueTool(jsonSpec);
expect(await jsonGetValueTool.invoke("")).toContain("large dictionary");
expect(await jsonGetValueTool.invoke("/foo")).toBe("bar");
expect(await jsonGetValueTool.invoke("/baz")).toContain("large dictionary");
expect(await jsonGetValueTool.invoke("/baz/test")).toContain(
"large dictionary"
);
expect(await jsonGetValueTool.invoke("/baz/test/foo")).toBe("[1,2,...");
expect(await jsonGetValueTool.invoke("/baz/test/foo/0")).toBe("1");
});
test("JsonGetValueTool, paths containing escaped characters", async () => {
const jsonSpec = new JsonSpec({
paths: {
"~IDSGenericFXCrossRate": 1,
"/IDSGenericFXCrossRate": 2,
"~/IDSGenericFXCrossRate": 3,
"/~IDSGenericFXCrossRate": 4,
},
});
const jsonGetValueTool = new JsonGetValueTool(jsonSpec);
expect(await jsonGetValueTool.invoke("/paths/~0IDSGenericFXCrossRate")).toBe(
"1"
);
expect(await jsonGetValueTool.invoke("/paths/~1IDSGenericFXCrossRate")).toBe(
"2"
);
expect(
await jsonGetValueTool.invoke("/paths/~0~1IDSGenericFXCrossRate")
).toBe("3");
expect(
await jsonGetValueTool.invoke("/paths/~1~0IDSGenericFXCrossRate")
).toBe("4");
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/create_openai_functions_agent.int.test.ts | /** eslint-disable @typescript-eslint/no-non-null-assertion */
import { test, expect } from "@jest/globals";
import { ChatOpenAI } from "@langchain/openai";
import type { ChatPromptTemplate } from "@langchain/core/prompts";
import { TavilySearchResults } from "../../util/testing/tools/tavily_search.js";
import { pull } from "../../hub.js";
import { AgentExecutor, createOpenAIFunctionsAgent } from "../index.js";
const tools = [new TavilySearchResults({ maxResults: 1 })];
test("createOpenAIFunctionsAgent works", async () => {
const prompt = await pull<ChatPromptTemplate>(
"hwchase17/openai-functions-agent"
);
const llm = new ChatOpenAI({
modelName: "gpt-3.5-turbo-1106",
temperature: 0,
});
const agent = await createOpenAIFunctionsAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const input = "what is LangChain?";
const result = await agentExecutor.invoke({
input,
});
// console.log(result);
expect(result.input).toBe(input);
expect(typeof result.output).toBe("string");
// Length greater than 10 because any less than that would warrant
// an investigation into why such a short generation was returned.
expect(result.output.length).toBeGreaterThan(10);
});
test("createOpenAIFunctionsAgent can stream log", async () => {
const prompt = await pull<ChatPromptTemplate>(
"hwchase17/openai-functions-agent"
);
const llm = new ChatOpenAI({
modelName: "gpt-3.5-turbo-1106",
temperature: 0,
streaming: true,
});
const agent = await createOpenAIFunctionsAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const input = "tell me a short story.";
const logStream = await agentExecutor.streamLog({
input,
});
const chunks = [];
let firstChunkTime;
for await (const chunk of logStream) {
if (!firstChunkTime) {
firstChunkTime = new Date().getTime();
}
// console.log(chunk);
chunks.push(chunk);
}
if (!firstChunkTime) {
throw new Error("firstChunkTime was not set.");
}
// console.log(chunks.length);
// console.log();
// console.log(
// "Time to complete after first chunk:",
// new Date().getTime() - firstChunkTime
// );
// console.log(chunks.length);
expect(chunks.length).toBeGreaterThan(1);
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/create_structured_chat_agent.int.test.ts | import { test, expect } from "@jest/globals";
import { ChatOpenAI } from "@langchain/openai";
import type { ChatPromptTemplate } from "@langchain/core/prompts";
import { TavilySearchResults } from "../../util/testing/tools/tavily_search.js";
import { pull } from "../../hub.js";
import { AgentExecutor, createStructuredChatAgent } from "../index.js";
const tools = [new TavilySearchResults({ maxResults: 1 })];
test("createStructuredChatAgent works", async () => {
const prompt = await pull<ChatPromptTemplate>(
"hwchase17/structured-chat-agent"
);
const llm = new ChatOpenAI({
modelName: "gpt-3.5-turbo-1106",
temperature: 0,
});
const agent = await createStructuredChatAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const input = "what is LangChain?";
const result = await agentExecutor.invoke({
input,
});
// console.log(result);
expect(result.input).toBe(input);
expect(typeof result.output).toBe("string");
// Length greater than 10 because any less than that would warrant
// an investigation into why such a short generation was returned.
expect(result.output.length).toBeGreaterThan(10);
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/react.test.ts | import { ReActSingleInputOutputParser } from "../react/output_parser.js";
test("ReActSingleInputOutputParser identifies final answer", async () => {
const finalAnswerText = `Observation: 2.169459462491557
Thought: I now know the final answer
Final Answer: Harry Styles, Olivia Wilde's boyfriend, is 29 years old and his age raised to the 0.23 power is 2.169459462491557.`;
const outputParser = new ReActSingleInputOutputParser({
toolNames: [],
});
const parsedOutput = await outputParser.parse(finalAnswerText);
// console.log(parsedOutput);
expect(parsedOutput).toHaveProperty("returnValues");
expect(
"returnValues" in parsedOutput && parsedOutput.returnValues.output
).toEqual(
"Harry Styles, Olivia Wilde's boyfriend, is 29 years old and his age raised to the 0.23 power is 2.169459462491557."
);
});
test("ReActSingleInputOutputParser identifies agent actions", async () => {
const finalAnswerText = `Observation: 29 years
Thought: I need to calculate 29 raised to the 0.23 power
Action: calculator
Action Input: 29^0.23`;
const outputParser = new ReActSingleInputOutputParser({
toolNames: [],
});
const parsedOutput = await outputParser.parse(finalAnswerText);
// console.log(parsedOutput);
expect(parsedOutput).toHaveProperty("toolInput");
expect(parsedOutput).toHaveProperty("tool");
});
test("ReActSingleInputOutputParser throws if no agent finish/action is passed", async () => {
const finalAnswerText = `Who is Harry Styles' girlfriend?`;
const outputParser = new ReActSingleInputOutputParser({
toolNames: [],
});
await expect(outputParser.parse(finalAnswerText)).rejects.toThrow();
});
test("ReActSingleInputOutputParser throws if agent finish and action are passed", async () => {
const finalAnswerText = `Observation: 29 years
Thought: I need to calculate 29 raised to the 0.23 power
Action: calculator
Action Input: 29^0.23
Final Answer: Harry Styles, Olivia Wilde's boyfriend, is 29 years old and his age raised to the 0.23 power is 2.169459462491557.`;
const outputParser = new ReActSingleInputOutputParser({
toolNames: [],
});
await expect(outputParser.parse(finalAnswerText)).rejects.toThrow();
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/sql.test.ts | /* eslint-disable no-process-env */
import { test, expect, beforeEach, afterEach } from "@jest/globals";
import { DataSource } from "typeorm";
import {
InfoSqlTool,
QuerySqlTool,
ListTablesSqlTool,
QueryCheckerTool,
} from "../../tools/sql.js";
import { SqlDatabase } from "../../sql_db.js";
const previousEnv = process.env;
let db: SqlDatabase;
beforeEach(async () => {
const datasource = new DataSource({
type: "sqlite",
database: ":memory:",
synchronize: true,
});
await datasource.initialize();
await datasource.query(`
CREATE TABLE products (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, price INTEGER);
`);
await datasource.query(`
INSERT INTO products (name, price) VALUES ('Apple', 100);
`);
await datasource.query(`
INSERT INTO products (name, price) VALUES ('Banana', 200);
`);
await datasource.query(`
INSERT INTO products (name, price) VALUES ('Orange', 300);
`);
await datasource.query(`
CREATE TABLE users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, age INTEGER);
`);
await datasource.query(`
INSERT INTO users (name, age) VALUES ('Alice', 20);
`);
await datasource.query(`
INSERT INTO users (name, age) VALUES ('Bob', 21);
`);
await datasource.query(`
INSERT INTO users (name, age) VALUES ('Charlie', 22);
`);
db = await SqlDatabase.fromDataSourceParams({
appDataSource: datasource,
});
process.env = { ...previousEnv, OPENAI_API_KEY: "test" };
});
afterEach(async () => {
process.env = previousEnv;
await db.appDataSource.destroy();
});
test.skip("QuerySqlTool", async () => {
const querySqlTool = new QuerySqlTool(db);
const result = await querySqlTool.invoke("SELECT * FROM users");
expect(result).toBe(
`[{"id":1,"name":"Alice","age":20},{"id":2,"name":"Bob","age":21},{"id":3,"name":"Charlie","age":22}]`
);
});
test.skip("QuerySqlTool with error", async () => {
const querySqlTool = new QuerySqlTool(db);
const result = await querySqlTool.invoke("SELECT * FROM userss");
expect(result).toBe(`QueryFailedError: SQLITE_ERROR: no such table: userss`);
});
test.skip("InfoSqlTool", async () => {
const infoSqlTool = new InfoSqlTool(db);
const result = await infoSqlTool.invoke("users, products");
const expectStr = `
CREATE TABLE products (
id INTEGER , name TEXT , price INTEGER )
SELECT * FROM "products" LIMIT 3;
id name price
1 Apple 100
2 Banana 200
3 Orange 300
CREATE TABLE users (
id INTEGER , name TEXT , age INTEGER )
SELECT * FROM "users" LIMIT 3;
id name age
1 Alice 20
2 Bob 21
3 Charlie 22`;
expect(result.trim()).toBe(expectStr.trim());
});
test.skip("InfoSqlTool with customDescription", async () => {
db.customDescription = {
products: "Custom Description for Products Table",
users: "Custom Description for Users Table",
userss: "Should not appear",
};
const infoSqlTool = new InfoSqlTool(db);
const result = await infoSqlTool.invoke("users, products");
const expectStr = `
Custom Description for Products Table
CREATE TABLE products (
id INTEGER , name TEXT , price INTEGER )
SELECT * FROM "products" LIMIT 3;
id name price
1 Apple 100
2 Banana 200
3 Orange 300
Custom Description for Users Table
CREATE TABLE users (
id INTEGER , name TEXT , age INTEGER )
SELECT * FROM "users" LIMIT 3;
id name age
1 Alice 20
2 Bob 21
3 Charlie 22`;
expect(result.trim()).toBe(expectStr.trim());
});
test.skip("InfoSqlTool with error", async () => {
const infoSqlTool = new InfoSqlTool(db);
const result = await infoSqlTool.invoke("userss, products");
expect(result).toBe(
`Error: Wrong target table name: the table userss was not found in the database`
);
});
test.skip("ListTablesSqlTool", async () => {
const listSqlTool = new ListTablesSqlTool(db);
const result = await listSqlTool.invoke("");
expect(result).toBe(`products, users`);
});
test.skip("QueryCheckerTool", async () => {
const queryCheckerTool = new QueryCheckerTool();
expect(queryCheckerTool.llmChain).not.toBeNull();
expect(queryCheckerTool.llmChain.inputKeys).toEqual(["query"]);
});
test.skip("ListTablesSqlTool with include tables", async () => {
const includesTables = ["users"];
db.includesTables = includesTables;
const listSqlTool = new ListTablesSqlTool(db);
const result = await listSqlTool.invoke("");
expect(result).toBe("users");
});
test.skip("ListTablesSqlTool with ignore tables", async () => {
const ignoreTables = ["products"];
db.ignoreTables = ignoreTables;
const listSqlTool = new ListTablesSqlTool(db);
const result = await listSqlTool.invoke("");
expect(result).toBe("users");
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/structured_chat_output_parser.test.ts | import { test, expect } from "@jest/globals";
import { AgentAction, AgentFinish } from "@langchain/core/agents";
import { StructuredChatOutputParser } from "../structured_chat/outputParser.js";
test("Can parse JSON with text in front of it", async () => {
const testCases = [
{
input:
'Here we have some boilerplate nonsense```json\n{\n "action": "blogpost",\n "action_input": "```sql\\nSELECT * FROM orders\\nJOIN users ON users.id = orders.user_id\\nWHERE users.email = \'bud\'```"\n}\n``` and at the end there is more nonsense',
output:
'{"action":"blogpost","action_input":"```sql\\nSELECT * FROM orders\\nJOIN users ON users.id = orders.user_id\\nWHERE users.email = \'bud\'```"}',
tool: "blogpost",
toolInput:
"```sql\nSELECT * FROM orders\nJOIN users ON users.id = orders.user_id\nWHERE users.email = 'bud'```",
},
{
input:
'Here we have some boilerplate nonsense```json\n{\n \t\r\n"action": "blogpost",\n\t\r "action_input": "```sql\\nSELECT * FROM orders\\nJOIN users ON users.id = orders.user_id\\nWHERE users.email = \'bud\'```"\n\t\r}\n\n\n\t\r``` and at the end there is more nonsense',
output:
'{"action":"blogpost","action_input":"```sql\\nSELECT * FROM orders\\nJOIN users ON users.id = orders.user_id\\nWHERE users.email = \'bud\'```"}',
tool: "blogpost",
toolInput:
"```sql\nSELECT * FROM orders\nJOIN users ON users.id = orders.user_id\nWHERE users.email = 'bud'```",
},
];
const p = new StructuredChatOutputParser({ toolNames: ["blogpost"] });
for (const message of testCases) {
const parsed = await p.parse(message.input);
expect(parsed).toBeDefined();
if (message.tool === "Final Answer") {
expect((parsed as AgentFinish).returnValues).toBeDefined();
} else {
expect((parsed as AgentAction).tool).toEqual(message.tool);
if (typeof message.toolInput === "object") {
expect(message.toolInput).toEqual((parsed as AgentAction).toolInput);
}
if (typeof message.toolInput === "string") {
expect(message.toolInput).toContain((parsed as AgentAction).toolInput);
}
}
}
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/create_openai_tools_agent.int.test.ts | import { test, expect } from "@jest/globals";
import { ChatOpenAI } from "@langchain/openai";
import type { ChatPromptTemplate } from "@langchain/core/prompts";
import { RunnableLambda } from "@langchain/core/runnables";
import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain";
import { AsyncLocalStorageProviderSingleton } from "@langchain/core/singletons";
import { tool } from "@langchain/core/tools";
import { z } from "zod";
import { AsyncLocalStorage } from "async_hooks";
import { TavilySearchResults } from "../../util/testing/tools/tavily_search.js";
import { pull } from "../../hub.js";
import { AgentExecutor, createOpenAIToolsAgent } from "../index.js";
const tools = [new TavilySearchResults({ maxResults: 1 })];
test("createOpenAIToolsAgent works", async () => {
const prompt = await pull<ChatPromptTemplate>("hwchase17/openai-tools-agent");
const llm = new ChatOpenAI({
modelName: "gpt-3.5-turbo-1106",
temperature: 0,
});
const agent = await createOpenAIToolsAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const input = "what is LangChain?";
const result = await agentExecutor.invoke({
input,
});
// console.log(result);
expect(result.input).toBe(input);
expect(typeof result.output).toBe("string");
// Length greater than 10 because any less than that would warrant
// an investigation into why such a short generation was returned.
expect(result.output.length).toBeGreaterThan(10);
});
test("createOpenAIToolsAgent handles errors", async () => {
const errorTools = [
tool(
async () => {
const error = new Error("Error getting search results");
throw error;
},
{
name: "search-results",
schema: z.object({
query: z.string(),
}),
description: "Searches the web",
}
),
];
const prompt = await pull<ChatPromptTemplate>("hwchase17/openai-tools-agent");
const llm = new ChatOpenAI({
modelName: "gpt-3.5-turbo-1106",
temperature: 0,
});
const agent = await createOpenAIToolsAgent({
llm,
tools: errorTools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools: errorTools,
handleToolRuntimeErrors: (e) => {
throw e;
},
});
const input = "what is LangChain?";
await expect(agentExecutor.invoke({ input })).rejects.toThrowError(
"Error getting search results"
);
});
test.skip("createOpenAIToolsAgent tracing works when it is nested in a lambda", async () => {
AsyncLocalStorageProviderSingleton.initializeGlobalInstance(
new AsyncLocalStorage()
);
const prompt = await pull<ChatPromptTemplate>("hwchase17/openai-tools-agent");
const llm = new ChatOpenAI({
modelName: "gpt-3.5-turbo-1106",
temperature: 0,
});
const agent = await createOpenAIToolsAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const outer = RunnableLambda.from(async (input) => {
const noop = RunnableLambda.from(() => "hi").withConfig({
runName: "nested_testing",
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const noopRes = await noop.invoke({ nested: "nested" });
// console.log(noopRes);
const res = await agentExecutor.invoke({
input,
});
return res;
});
const input = "what is LangChain?";
const result = await outer.invoke(input, {
tags: ["test"],
callbacks: [new LangChainTracer({ projectName: "langchainjs-tracing-2" })],
});
// console.log(result);
expect(result.input).toBe(input);
expect(typeof result.output).toBe("string");
// Length greater than 10 because any less than that would warrant
// an investigation into why such a short generation was returned.
expect(result.output.length).toBeGreaterThan(10);
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tests/create_tool_calling_agent.int.test.ts | import { z } from "zod";
import { test, expect } from "@jest/globals";
import { ChatOpenAI } from "@langchain/openai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { DynamicStructuredTool } from "@langchain/core/tools";
import { TavilySearchResults } from "../../util/testing/tools/tavily_search.js";
import { AgentExecutor, createToolCallingAgent } from "../index.js";
const syntaxErrorTool = new DynamicStructuredTool({
name: "query",
description:
"use this tool to generate and execute a query from a question using the index.",
schema: z.object({
index_name: z.string().describe("The name of the index to query."),
question: z.string().describe("The question to answer."),
}),
func: async (_params) => {
return JSON.stringify({
result: "-ERR Syntax error at offset 19 near Bronx",
query:
'FT.AGGREGATE bites "@Borough:{The Bronx} @Gender:{M}" GROUPBY 0 REDUCE COUNT 0',
});
},
});
const tools = [new TavilySearchResults({ maxResults: 1 })];
test("createToolCallingAgent works", async () => {
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["placeholder", "{chat_history}"],
["human", "{input}"],
["placeholder", "{agent_scratchpad}"],
]);
const llm = new ChatOpenAI({
modelName: "gpt-4-turbo",
temperature: 0,
});
const agent = await createToolCallingAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const input = "what is the current weather in SF?";
const result = await agentExecutor.invoke({
input,
});
// console.log(result);
expect(result.input).toBe(input);
expect(typeof result.output).toBe("string");
// Length greater than 10 because any less than that would warrant
// an investigation into why such a short generation was returned.
expect(result.output.length).toBeGreaterThan(10);
});
test("createToolCallingAgent stream events works", async () => {
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["placeholder", "{chat_history}"],
["human", "{input}"],
["placeholder", "{agent_scratchpad}"],
]);
const llm = new ChatOpenAI({
modelName: "gpt-4o",
temperature: 0,
});
const agent = await createToolCallingAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const input = "what is the current weather in SF?";
const eventStream = agentExecutor.streamEvents(
{
input,
},
{
version: "v2",
}
);
for await (const event of eventStream) {
const eventType = event.event;
// console.log("Event type: ", eventType);
if (eventType === "on_chat_model_stream") {
// console.log("Content: ", event.data);
}
}
});
test("createToolCallingAgent stream events works for multiple turns", async () => {
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["placeholder", "{chat_history}"],
["human", "{input}"],
["placeholder", "{agent_scratchpad}"],
]);
const llm = new ChatOpenAI({
modelName: "gpt-4o",
temperature: 0,
});
const agent = await createToolCallingAgent({
llm,
tools: [syntaxErrorTool],
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools: [syntaxErrorTool],
maxIterations: 3,
});
const input =
"Generate a query that looks up how many animals have been bitten in the Bronx.";
const eventStream = agentExecutor.streamEvents(
{
input,
},
{
version: "v2",
}
);
for await (const event of eventStream) {
const eventType = event.event;
// console.log("Event type: ", eventType);
if (eventType === "on_chat_model_stream") {
// console.log("Content: ", event.data);
}
}
});
test("createToolCallingAgent accepts fallbacks", async () => {
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["placeholder", "{chat_history}"],
["human", "{input}"],
["placeholder", "{agent_scratchpad}"],
]);
const llm = new ChatOpenAI({
modelName: "gpt-4o",
temperature: 0,
})
.bindTools(tools)
.withFallbacks({
fallbacks: [
new ChatOpenAI({
modelName: "gpt-4o",
temperature: 0,
}).bindTools(tools),
],
});
const agent = await createToolCallingAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const input = "what is the current weather in SF?";
const eventStream = agentExecutor.streamEvents(
{
input,
},
{
version: "v2",
}
);
for await (const event of eventStream) {
const eventType = event.event;
// console.log("Event type: ", eventType);
if (eventType === "on_chat_model_stream") {
// console.log("Content: ", event.data);
}
}
});
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/structured_chat/outputParser.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import { Callbacks } from "@langchain/core/callbacks/manager";
import { AgentAction, AgentFinish } from "@langchain/core/agents";
import { OutputParserException } from "@langchain/core/output_parsers";
import { renderTemplate } from "@langchain/core/prompts";
import { AgentActionOutputParser } from "../types.js";
import {
AGENT_ACTION_FORMAT_INSTRUCTIONS,
FORMAT_INSTRUCTIONS,
} from "./prompt.js";
import { OutputFixingParser } from "../../output_parsers/fix.js";
/**
* A class that provides a custom implementation for parsing the output of
* a StructuredChatAgent action. It extends the `AgentActionOutputParser`
* class and extracts the action and action input from the text output,
* returning an `AgentAction` or `AgentFinish` object.
*/
export class StructuredChatOutputParser extends AgentActionOutputParser {
lc_namespace = ["langchain", "agents", "structured_chat"];
private toolNames: string[];
constructor(fields: { toolNames: string[] }) {
super(...arguments);
this.toolNames = fields.toolNames;
}
/**
* Parses the given text and returns an `AgentAction` or `AgentFinish`
* object. If an `OutputFixingParser` is provided, it is used for parsing;
* otherwise, the base parser is used.
* @param text The text to parse.
* @param callbacks Optional callbacks for asynchronous operations.
* @returns A Promise that resolves to an `AgentAction` or `AgentFinish` object.
*/
async parse(text: string): Promise<AgentAction | AgentFinish> {
try {
const regex = /```(?:json)?(.*)(```)/gs;
const actionMatch = regex.exec(text);
if (actionMatch === null) {
throw new OutputParserException(
`Could not parse an action. The agent action must be within a markdown code block, and "action" must be a provided tool or "Final Answer"`
);
}
const response = JSON.parse(actionMatch[1].trim());
const { action, action_input } = response;
if (action === "Final Answer") {
return { returnValues: { output: action_input }, log: text };
}
return { tool: action, toolInput: action_input || {}, log: text };
} catch (e) {
throw new OutputParserException(
`Failed to parse. Text: "${text}". Error: ${e}`
);
}
}
/**
* Returns the format instructions for parsing the output of an agent
* action in the style of the StructuredChatAgent.
* @returns A string representing the format instructions.
*/
getFormatInstructions(): string {
return renderTemplate(AGENT_ACTION_FORMAT_INSTRUCTIONS, "f-string", {
tool_names: this.toolNames.join(", "),
});
}
}
/**
* An interface for the arguments used to construct a
* `StructuredChatOutputParserWithRetries` instance.
*/
export interface StructuredChatOutputParserArgs {
baseParser?: StructuredChatOutputParser;
outputFixingParser?: OutputFixingParser<AgentAction | AgentFinish>;
toolNames?: string[];
}
/**
* A class that provides a wrapper around the `StructuredChatOutputParser`
* and `OutputFixingParser` classes. It extends the
* `AgentActionOutputParser` class and allows for retrying the output
* parsing using the `OutputFixingParser` if it is provided.
* @example
* ```typescript
* const outputParser = new StructuredChatOutputParserWithRetries.fromLLM(
* new ChatOpenAI({ temperature: 0 }),
* {
* toolNames: ["calculator", "random-number-generator"],
* },
* );
* const result = await outputParser.parse(
* "What is a random number between 5 and 10 raised to the second power?"
* );
* ```
*/
export class StructuredChatOutputParserWithRetries extends AgentActionOutputParser {
lc_namespace = ["langchain", "agents", "structured_chat"];
private baseParser: StructuredChatOutputParser;
private outputFixingParser?: OutputFixingParser<AgentAction | AgentFinish>;
private toolNames: string[] = [];
constructor(fields: StructuredChatOutputParserArgs) {
super(fields);
this.toolNames = fields.toolNames ?? this.toolNames;
this.baseParser =
fields?.baseParser ??
new StructuredChatOutputParser({ toolNames: this.toolNames });
this.outputFixingParser = fields?.outputFixingParser;
}
/**
* Parses the given text and returns an `AgentAction` or `AgentFinish`
* object. Throws an `OutputParserException` if the parsing fails.
* @param text The text to parse.
* @returns A Promise that resolves to an `AgentAction` or `AgentFinish` object.
*/
async parse(
text: string,
callbacks?: Callbacks
): Promise<AgentAction | AgentFinish> {
if (this.outputFixingParser !== undefined) {
return this.outputFixingParser.parse(text, callbacks);
}
return this.baseParser.parse(text);
}
/**
* Returns the format instructions for parsing the output of an agent
* action in the style of the StructuredChatAgent.
* @returns A string representing the format instructions.
*/
getFormatInstructions(): string {
return renderTemplate(FORMAT_INSTRUCTIONS, "f-string", {
tool_names: this.toolNames.join(", "),
});
}
/**
* Creates a new `StructuredChatOutputParserWithRetries` instance from a
* `BaseLanguageModel` and options. The options can include a base parser
* and tool names.
* @param llm A `BaseLanguageModel` instance.
* @param options Options for creating a `StructuredChatOutputParserWithRetries` instance.
* @returns A new `StructuredChatOutputParserWithRetries` instance.
*/
static fromLLM(
llm: BaseLanguageModelInterface,
options: Omit<StructuredChatOutputParserArgs, "outputFixingParser">
): StructuredChatOutputParserWithRetries {
const baseParser =
options.baseParser ??
new StructuredChatOutputParser({ toolNames: options.toolNames ?? [] });
const outputFixingParser = OutputFixingParser.fromLLM(llm, baseParser);
return new StructuredChatOutputParserWithRetries({
baseParser,
outputFixingParser,
toolNames: options.toolNames,
});
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/structured_chat/index.ts | import { zodToJsonSchema, JsonSchema7ObjectType } from "zod-to-json-schema";
import type { StructuredToolInterface } from "@langchain/core/tools";
import {
isOpenAITool,
type BaseLanguageModel,
type BaseLanguageModelInterface,
type ToolDefinition,
} from "@langchain/core/language_models/base";
import { RunnablePassthrough } from "@langchain/core/runnables";
import type { BasePromptTemplate } from "@langchain/core/prompts";
import {
BaseMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
PromptTemplate,
} from "@langchain/core/prompts";
import { AgentStep } from "@langchain/core/agents";
import { isStructuredTool } from "@langchain/core/utils/function_calling";
import { LLMChain } from "../../chains/llm_chain.js";
import { Optional } from "../../types/type-utils.js";
import {
Agent,
AgentArgs,
AgentRunnableSequence,
OutputParserArgs,
} from "../agent.js";
import { AgentInput } from "../types.js";
import { StructuredChatOutputParserWithRetries } from "./outputParser.js";
import { FORMAT_INSTRUCTIONS, PREFIX, SUFFIX } from "./prompt.js";
import { renderTextDescriptionAndArgs } from "../../tools/render.js";
import { formatLogToString } from "../format_scratchpad/log.js";
/**
* Interface for arguments used to create a prompt for a
* StructuredChatAgent.
*/
export interface StructuredChatCreatePromptArgs {
/** String to put after the list of tools. */
suffix?: string;
/** String to put before the list of tools. */
prefix?: string;
/** String to use directly as the human message template. */
humanMessageTemplate?: string;
/** List of input variables the final prompt will expect. */
inputVariables?: string[];
/** List of historical prompts from memory. */
memoryPrompts?: BaseMessagePromptTemplate[];
}
/**
* Type for input data for creating a StructuredChatAgent, with the
* 'outputParser' property made optional.
*/
export type StructuredChatAgentInput = Optional<AgentInput, "outputParser">;
/**
* Agent that interoperates with Structured Tools using React logic.
* @augments Agent
* @deprecated Use the {@link https://api.js.langchain.com/functions/langchain.agents.createStructuredChatAgent.html | createStructuredChatAgent method instead}.
*/
export class StructuredChatAgent extends Agent {
static lc_name() {
return "StructuredChatAgent";
}
lc_namespace = ["langchain", "agents", "structured_chat"];
constructor(input: StructuredChatAgentInput) {
const outputParser =
input?.outputParser ?? StructuredChatAgent.getDefaultOutputParser();
super({ ...input, outputParser });
}
_agentType() {
return "structured-chat-zero-shot-react-description" as const;
}
observationPrefix() {
return "Observation: ";
}
llmPrefix() {
return "Thought:";
}
_stop(): string[] {
return ["Observation:"];
}
/**
* Validates that all provided tools have a description. Throws an error
* if any tool lacks a description.
* @param tools Array of StructuredTool instances to validate.
*/
static validateTools(tools: StructuredToolInterface[]) {
const descriptionlessTool = tools.find((tool) => !tool.description);
if (descriptionlessTool) {
const msg =
`Got a tool ${descriptionlessTool.name} without a description.` +
` This agent requires descriptions for all tools.`;
throw new Error(msg);
}
}
/**
* Returns a default output parser for the StructuredChatAgent. If an LLM
* is provided, it creates an output parser with retry logic from the LLM.
* @param fields Optional fields to customize the output parser. Can include an LLM and a list of tool names.
* @returns An instance of StructuredChatOutputParserWithRetries.
*/
static getDefaultOutputParser(
fields?: OutputParserArgs & {
toolNames: string[];
}
) {
if (fields?.llm) {
return StructuredChatOutputParserWithRetries.fromLLM(fields.llm, {
toolNames: fields.toolNames,
});
}
return new StructuredChatOutputParserWithRetries({
toolNames: fields?.toolNames,
});
}
/**
* Constructs the agent's scratchpad from a list of steps. If the agent's
* scratchpad is not empty, it prepends a message indicating that the
* agent has not seen any previous work.
* @param steps Array of AgentStep instances to construct the scratchpad from.
* @returns A Promise that resolves to a string representing the agent's scratchpad.
*/
async constructScratchPad(steps: AgentStep[]): Promise<string> {
const agentScratchpad = await super.constructScratchPad(steps);
if (agentScratchpad) {
return `This was your previous work (but I haven't seen any of it! I only see what you return as final answer):\n${agentScratchpad}`;
}
return agentScratchpad;
}
/**
* Creates a string representation of the schemas of the provided tools.
* @param tools Array of StructuredTool instances to create the schemas string from.
* @returns A string representing the schemas of the provided tools.
*/
static createToolSchemasString(tools: StructuredToolInterface[]) {
return tools
.map(
(tool) =>
`${tool.name}: ${tool.description}, args: ${JSON.stringify(
(zodToJsonSchema(tool.schema) as JsonSchema7ObjectType).properties
)}`
)
.join("\n");
}
/**
* Create prompt in the style of the agent.
*
* @param tools - List of tools the agent will have access to, used to format the prompt.
* @param args - Arguments to create the prompt with.
* @param args.suffix - String to put after the list of tools.
* @param args.prefix - String to put before the list of tools.
* @param args.inputVariables List of input variables the final prompt will expect.
* @param args.memoryPrompts List of historical prompts from memory.
*/
static createPrompt(
tools: StructuredToolInterface[],
args?: StructuredChatCreatePromptArgs
) {
const {
prefix = PREFIX,
suffix = SUFFIX,
inputVariables = ["input", "agent_scratchpad"],
humanMessageTemplate = "{input}\n\n{agent_scratchpad}",
memoryPrompts = [],
} = args ?? {};
const template = [prefix, FORMAT_INSTRUCTIONS, suffix].join("\n\n");
const messages = [
new SystemMessagePromptTemplate(
new PromptTemplate({
template,
inputVariables,
partialVariables: {
tool_schemas: StructuredChatAgent.createToolSchemasString(tools),
tool_names: tools.map((tool) => tool.name).join(", "),
},
})
),
...memoryPrompts,
new HumanMessagePromptTemplate(
new PromptTemplate({
template: humanMessageTemplate,
inputVariables,
})
),
];
return ChatPromptTemplate.fromMessages(messages);
}
/**
* Creates a StructuredChatAgent from an LLM and a list of tools.
* Validates the tools, creates a prompt, and sets up an LLM chain for the
* agent.
* @param llm BaseLanguageModel instance to create the agent from.
* @param tools Array of StructuredTool instances to create the agent from.
* @param args Optional arguments to customize the creation of the agent. Can include arguments for creating the prompt and AgentArgs.
* @returns A new instance of StructuredChatAgent.
*/
static fromLLMAndTools(
llm: BaseLanguageModelInterface,
tools: StructuredToolInterface[],
args?: StructuredChatCreatePromptArgs & AgentArgs
) {
StructuredChatAgent.validateTools(tools);
const prompt = StructuredChatAgent.createPrompt(tools, args);
const outputParser =
args?.outputParser ??
StructuredChatAgent.getDefaultOutputParser({
llm,
toolNames: tools.map((tool) => tool.name),
});
const chain = new LLMChain({
prompt,
llm,
callbacks: args?.callbacks,
});
return new StructuredChatAgent({
llmChain: chain,
outputParser,
allowedTools: tools.map((t) => t.name),
});
}
}
/**
* Params used by the createStructuredChatAgent function.
*/
export type CreateStructuredChatAgentParams = {
/** LLM to use as the agent. */
llm: BaseLanguageModelInterface;
/** Tools this agent has access to. */
tools: (StructuredToolInterface | ToolDefinition)[];
/**
* The prompt to use. Must have input keys for
* `tools`, `tool_names`, and `agent_scratchpad`.
*/
prompt: BasePromptTemplate;
/**
* Whether to invoke the underlying model in streaming mode,
* allowing streaming of intermediate steps. Defaults to true.
*/
streamRunnable?: boolean;
};
/**
* Create an agent aimed at supporting tools with multiple inputs.
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
* @returns A runnable sequence representing an agent. It takes as input all the same input
* variables as the prompt passed in does. It returns as output either an
* AgentAction or AgentFinish.
*
* @example
* ```typescript
* import { AgentExecutor, createStructuredChatAgent } from "langchain/agents";
* import { pull } from "langchain/hub";
* import type { ChatPromptTemplate } from "@langchain/core/prompts";
* import { AIMessage, HumanMessage } from "@langchain/core/messages";
*
* import { ChatOpenAI } from "@langchain/openai";
*
* // Define the tools the agent will have access to.
* const tools = [...];
*
* // Get the prompt to use - you can modify this!
* // If you want to see the prompt in full, you can at:
* // https://smith.langchain.com/hub/hwchase17/structured-chat-agent
* const prompt = await pull<ChatPromptTemplate>(
* "hwchase17/structured-chat-agent"
* );
*
* const llm = new ChatOpenAI({
* temperature: 0,
* modelName: "gpt-3.5-turbo-1106",
* });
*
* const agent = await createStructuredChatAgent({
* llm,
* tools,
* prompt,
* });
*
* const agentExecutor = new AgentExecutor({
* agent,
* tools,
* });
*
* const result = await agentExecutor.invoke({
* input: "what is LangChain?",
* });
*
* // With chat history
* const result2 = await agentExecutor.invoke({
* input: "what's my name?",
* chat_history: [
* new HumanMessage("hi! my name is cob"),
* new AIMessage("Hello Cob! How can I assist you today?"),
* ],
* });
* ```
*/
export async function createStructuredChatAgent({
llm,
tools,
prompt,
streamRunnable,
}: CreateStructuredChatAgentParams) {
const missingVariables = ["tools", "tool_names", "agent_scratchpad"].filter(
(v) => !prompt.inputVariables.includes(v)
);
if (missingVariables.length > 0) {
throw new Error(
`Provided prompt is missing required input variables: ${JSON.stringify(
missingVariables
)}`
);
}
let toolNames: string[] = [];
if (tools.every(isOpenAITool)) {
toolNames = tools.map((tool) => tool.function.name);
} else if (tools.every(isStructuredTool)) {
toolNames = tools.map((tool) => tool.name);
} else {
throw new Error(
"All tools must be either OpenAI or Structured tools, not a mix."
);
}
const partialedPrompt = await prompt.partial({
tools: renderTextDescriptionAndArgs(tools),
tool_names: toolNames.join(", "),
});
// TODO: Add .bind to core runnable interface.
const llmWithStop = (llm as BaseLanguageModel).bind({
stop: ["Observation"],
});
const agent = AgentRunnableSequence.fromRunnables(
[
RunnablePassthrough.assign({
agent_scratchpad: (input: { steps: AgentStep[] }) =>
formatLogToString(input.steps),
}),
partialedPrompt,
llmWithStop,
StructuredChatOutputParserWithRetries.fromLLM(llm, {
toolNames,
}),
],
{
name: "StructuredChatAgent",
streamRunnable,
singleAction: true,
}
);
return agent;
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/structured_chat/prompt.ts | export const PREFIX = `Answer the following questions truthfully and as best you can.`;
export const AGENT_ACTION_FORMAT_INSTRUCTIONS = `Output a JSON markdown code snippet containing a valid JSON blob (denoted below by $JSON_BLOB).
This $JSON_BLOB must have a "action" key (with the name of the tool to use) and an "action_input" key (tool input).
Valid "action" values: "Final Answer" (which you must use when giving your final response to the user), or one of [{tool_names}].
The $JSON_BLOB must be valid, parseable JSON and only contain a SINGLE action. Here is an example of an acceptable output:
\`\`\`json
{{
"action": $TOOL_NAME,
"action_input": $INPUT
}}
\`\`\`
Remember to include the surrounding markdown code snippet delimiters (begin with "\`\`\`" json and close with "\`\`\`")!
`;
export const FORMAT_INSTRUCTIONS = `You have access to the following tools.
You must format your inputs to these tools to match their "JSON schema" definitions below.
"JSON Schema" is a declarative language that allows you to annotate and validate JSON documents.
For example, the example "JSON Schema" instance {{"properties": {{"foo": {{"description": "a list of test words", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}}
would match an object with one required property, "foo". The "type" property specifies "foo" must be an "array", and the "description" property semantically describes it as "a list of test words". The items within "foo" must be strings.
Thus, the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of this example "JSON Schema". The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
Here are the JSON Schema instances for the tools you have access to:
{tool_schemas}
The way you use the tools is as follows:
------------------------
${AGENT_ACTION_FORMAT_INSTRUCTIONS}
If you are using a tool, "action_input" must adhere to the tool's input schema, given above.
------------------------
ALWAYS use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action:
\`\`\`json
$JSON_BLOB
\`\`\`
Observation: the result of the action
... (this Thought/Action/Observation can repeat N times)
Thought: I now know the final answer
Action:
\`\`\`json
{{
"action": "Final Answer",
"action_input": "Final response to human"
}}
\`\`\``;
export const SUFFIX = `Begin! Reminder to ALWAYS use the above format, and to use tools if appropriate.`;
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/openai_functions/index.ts | import type {
BaseLanguageModelInterface,
BaseLanguageModelInput,
BaseFunctionCallOptions,
} from "@langchain/core/language_models/base";
import type { StructuredToolInterface } from "@langchain/core/tools";
import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
import { Runnable, RunnablePassthrough } from "@langchain/core/runnables";
import { ChatOpenAI, ChatOpenAICallOptions } from "@langchain/openai";
import type {
AgentAction,
AgentFinish,
AgentStep,
} from "@langchain/core/agents";
import { convertToOpenAIFunction } from "@langchain/core/utils/function_calling";
import {
AIMessage,
BaseMessage,
FunctionMessage,
SystemMessage,
BaseMessageChunk,
} from "@langchain/core/messages";
import { ChainValues } from "@langchain/core/utils/types";
import {
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
BasePromptTemplate,
} from "@langchain/core/prompts";
import { CallbackManager } from "@langchain/core/callbacks/manager";
import { Agent, AgentArgs, AgentRunnableSequence } from "../agent.js";
import { AgentInput } from "../types.js";
import { PREFIX } from "./prompt.js";
import { LLMChain } from "../../chains/llm_chain.js";
import {
FunctionsAgentAction,
OpenAIFunctionsAgentOutputParser,
} from "../openai/output_parser.js";
import { formatToOpenAIFunctionMessages } from "../format_scratchpad/openai_functions.js";
// eslint-disable-next-line @typescript-eslint/no-explicit-any
type CallOptionsIfAvailable<T> = T extends { CallOptions: infer CO } ? CO : any;
/**
* Checks if the given action is a FunctionsAgentAction.
* @param action The action to check.
* @returns True if the action is a FunctionsAgentAction, false otherwise.
*/
function isFunctionsAgentAction(
action: AgentAction | FunctionsAgentAction
): action is FunctionsAgentAction {
return (action as FunctionsAgentAction).messageLog !== undefined;
}
function _convertAgentStepToMessages(
action: AgentAction | FunctionsAgentAction,
observation: string
) {
if (isFunctionsAgentAction(action) && action.messageLog !== undefined) {
return action.messageLog?.concat(
new FunctionMessage(observation, action.tool)
);
} else {
return [new AIMessage(action.log)];
}
}
export function _formatIntermediateSteps(
intermediateSteps: AgentStep[]
): BaseMessage[] {
return intermediateSteps.flatMap(({ action, observation }) =>
_convertAgentStepToMessages(action, observation)
);
}
/**
* Interface for the input data required to create an OpenAIAgent.
*/
export interface OpenAIAgentInput extends AgentInput {
tools: StructuredToolInterface[];
}
/**
* Interface for the arguments required to create a prompt for an
* OpenAIAgent.
*/
export interface OpenAIAgentCreatePromptArgs {
prefix?: string;
systemMessage?: SystemMessage;
}
/**
* Class representing an agent for the OpenAI chat model in LangChain. It
* extends the Agent class and provides additional functionality specific
* to the OpenAIAgent type.
*
* @deprecated Use the {@link https://api.js.langchain.com/functions/langchain.agents.createOpenAIFunctionsAgent.html | createOpenAIFunctionsAgent method instead}.
*/
export class OpenAIAgent extends Agent {
static lc_name() {
return "OpenAIAgent";
}
lc_namespace = ["langchain", "agents", "openai"];
_agentType() {
return "openai-functions" as const;
}
observationPrefix() {
return "Observation: ";
}
llmPrefix() {
return "Thought:";
}
_stop(): string[] {
return ["Observation:"];
}
tools: StructuredToolInterface[];
outputParser: OpenAIFunctionsAgentOutputParser =
new OpenAIFunctionsAgentOutputParser();
constructor(input: Omit<OpenAIAgentInput, "outputParser">) {
super({ ...input, outputParser: undefined });
this.tools = input.tools;
}
/**
* Creates a prompt for the OpenAIAgent using the provided tools and
* fields.
* @param _tools The tools to be used in the prompt.
* @param fields Optional fields for creating the prompt.
* @returns A BasePromptTemplate object representing the created prompt.
*/
static createPrompt(
_tools: StructuredToolInterface[],
fields?: OpenAIAgentCreatePromptArgs
): BasePromptTemplate {
const { prefix = PREFIX } = fields || {};
return ChatPromptTemplate.fromMessages([
SystemMessagePromptTemplate.fromTemplate(prefix),
new MessagesPlaceholder("chat_history"),
HumanMessagePromptTemplate.fromTemplate("{input}"),
new MessagesPlaceholder("agent_scratchpad"),
]);
}
/**
* Creates an OpenAIAgent from a BaseLanguageModel and a list of tools.
* @param llm The BaseLanguageModel to use.
* @param tools The tools to be used by the agent.
* @param args Optional arguments for creating the agent.
* @returns An instance of OpenAIAgent.
*/
static fromLLMAndTools(
llm: BaseLanguageModelInterface,
tools: StructuredToolInterface[],
args?: OpenAIAgentCreatePromptArgs & Pick<AgentArgs, "callbacks">
) {
OpenAIAgent.validateTools(tools);
if (llm._modelType() !== "base_chat_model" || llm._llmType() !== "openai") {
throw new Error("OpenAIAgent requires an OpenAI chat model");
}
const prompt = OpenAIAgent.createPrompt(tools, args);
const chain = new LLMChain({
prompt,
llm,
callbacks: args?.callbacks,
});
return new OpenAIAgent({
llmChain: chain,
allowedTools: tools.map((t) => t.name),
tools,
});
}
/**
* Constructs a scratch pad from a list of agent steps.
* @param steps The steps to include in the scratch pad.
* @returns A string or a list of BaseMessages representing the constructed scratch pad.
*/
async constructScratchPad(
steps: AgentStep[]
): Promise<string | BaseMessage[]> {
return _formatIntermediateSteps(steps);
}
/**
* Plans the next action or finish state of the agent based on the
* provided steps, inputs, and optional callback manager.
* @param steps The steps to consider in planning.
* @param inputs The inputs to consider in planning.
* @param callbackManager Optional CallbackManager to use in planning.
* @returns A Promise that resolves to an AgentAction or AgentFinish object representing the planned action or finish state.
*/
async plan(
steps: Array<AgentStep>,
inputs: ChainValues,
callbackManager?: CallbackManager
): Promise<AgentAction | AgentFinish> {
// Add scratchpad and stop to inputs
const thoughts = await this.constructScratchPad(steps);
const newInputs: ChainValues = {
...inputs,
agent_scratchpad: thoughts,
};
if (this._stop().length !== 0) {
newInputs.stop = this._stop();
}
// Split inputs between prompt and llm
const llm = this.llmChain.llm as
| ChatOpenAI
| Runnable<
BaseLanguageModelInput,
BaseMessageChunk,
ChatOpenAICallOptions
>;
const valuesForPrompt = { ...newInputs };
const valuesForLLM: CallOptionsIfAvailable<typeof llm> = {
functions: this.tools.map((tool) => convertToOpenAIFunction(tool)),
};
const callKeys =
"callKeys" in this.llmChain.llm ? this.llmChain.llm.callKeys : [];
for (const key of callKeys) {
if (key in inputs) {
valuesForLLM[key as keyof CallOptionsIfAvailable<typeof llm>] =
inputs[key];
delete valuesForPrompt[key];
}
}
const promptValue = await this.llmChain.prompt.formatPromptValue(
valuesForPrompt
);
const message = await (
llm as Runnable<
BaseLanguageModelInput,
BaseMessageChunk,
ChatOpenAICallOptions
>
).invoke(promptValue.toChatMessages(), {
...valuesForLLM,
callbacks: callbackManager,
});
return this.outputParser.parseAIMessage(message);
}
}
/**
* Params used by the createOpenAIFunctionsAgent function.
*/
export type CreateOpenAIFunctionsAgentParams = {
/**
* LLM to use as the agent. Should work with OpenAI function calling,
* so must either be an OpenAI model that supports that or a wrapper of
* a different model that adds in equivalent support.
*/
llm: BaseChatModel<BaseFunctionCallOptions>;
/** Tools this agent has access to. */
tools: StructuredToolInterface[];
/** The prompt to use, must have an input key for `agent_scratchpad`. */
prompt: ChatPromptTemplate;
/**
* Whether to invoke the underlying model in streaming mode,
* allowing streaming of intermediate steps. Defaults to true.
*/
streamRunnable?: boolean;
};
/**
* Create an agent that uses OpenAI-style function calling.
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
* @returns A runnable sequence representing an agent. It takes as input all the same input
* variables as the prompt passed in does. It returns as output either an
* AgentAction or AgentFinish.
*
* @example
* ```typescript
* import { AgentExecutor, createOpenAIFunctionsAgent } from "langchain/agents";
* import { pull } from "langchain/hub";
* import type { ChatPromptTemplate } from "@langchain/core/prompts";
* import { AIMessage, HumanMessage } from "@langchain/core/messages";
*
* import { ChatOpenAI } from "@langchain/openai";
*
* // Define the tools the agent will have access to.
* const tools = [...];
*
* // Get the prompt to use - you can modify this!
* // If you want to see the prompt in full, you can at:
* // https://smith.langchain.com/hub/hwchase17/openai-functions-agent
* const prompt = await pull<ChatPromptTemplate>(
* "hwchase17/openai-functions-agent"
* );
*
* const llm = new ChatOpenAI({
* temperature: 0,
* });
*
* const agent = await createOpenAIFunctionsAgent({
* llm,
* tools,
* prompt,
* });
*
* const agentExecutor = new AgentExecutor({
* agent,
* tools,
* });
*
* const result = await agentExecutor.invoke({
* input: "what is LangChain?",
* });
*
* // With chat history
* const result2 = await agentExecutor.invoke({
* input: "what's my name?",
* chat_history: [
* new HumanMessage("hi! my name is cob"),
* new AIMessage("Hello Cob! How can I assist you today?"),
* ],
* });
* ```
*/
export async function createOpenAIFunctionsAgent({
llm,
tools,
prompt,
streamRunnable,
}: CreateOpenAIFunctionsAgentParams) {
if (!prompt.inputVariables.includes("agent_scratchpad")) {
throw new Error(
[
`Prompt must have an input variable named "agent_scratchpad".`,
`Found ${JSON.stringify(prompt.inputVariables)} instead.`,
].join("\n")
);
}
const llmWithTools = llm.bind({
functions: tools.map((tool) => convertToOpenAIFunction(tool)),
});
const agent = AgentRunnableSequence.fromRunnables(
[
RunnablePassthrough.assign({
agent_scratchpad: (input: { steps: AgentStep[] }) =>
formatToOpenAIFunctionMessages(input.steps),
}),
prompt,
llmWithTools,
new OpenAIFunctionsAgentOutputParser(),
],
{
name: "OpenAIFunctionsAgent",
streamRunnable,
singleAction: true,
}
);
return agent;
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/openai_functions/output_parser.ts | import type { OpenAIClient } from "@langchain/openai";
import { AgentAction, AgentFinish } from "@langchain/core/agents";
import { BaseMessage, isBaseMessage } from "@langchain/core/messages";
import { ChatGeneration } from "@langchain/core/outputs";
import { OutputParserException } from "@langchain/core/output_parsers";
import { AgentActionOutputParser } from "../types.js";
/**
* Type that represents an agent action with an optional message log.
*/
export type FunctionsAgentAction = AgentAction & {
messageLog?: BaseMessage[];
};
/**
* @example
* ```typescript
*
* const prompt = ChatPromptTemplate.fromMessages([
* ["ai", "You are a helpful assistant"],
* ["human", "{input}"],
* new MessagesPlaceholder("agent_scratchpad"),
* ]);
*
* const modelWithFunctions = new ChatOpenAI({
* modelName: "gpt-4",
* temperature: 0,
* }).bind({
* functions: tools.map((tool) => convertToOpenAIFunction(tool)),
* });
*
* const runnableAgent = RunnableSequence.from([
* {
* input: (i) => i.input,
* agent_scratchpad: (i) => formatAgentSteps(i.steps),
* },
* prompt,
* modelWithFunctions,
* new OpenAIFunctionsAgentOutputParser(),
* ]);
*
* const result = await runnableAgent.invoke({
* input: "What is the weather in New York?",
* steps: agentSteps,
* });
*
* ```
*/
export class OpenAIFunctionsAgentOutputParser extends AgentActionOutputParser {
lc_namespace = ["langchain", "agents", "openai"];
static lc_name() {
return "OpenAIFunctionsAgentOutputParser";
}
async parse(text: string): Promise<AgentAction | AgentFinish> {
throw new Error(
`OpenAIFunctionsAgentOutputParser can only parse messages.\nPassed input: ${text}`
);
}
async parseResult(generations: ChatGeneration[]) {
if ("message" in generations[0] && isBaseMessage(generations[0].message)) {
return this.parseAIMessage(generations[0].message);
}
throw new Error(
"parseResult on OpenAIFunctionsAgentOutputParser only works on ChatGeneration output"
);
}
/**
* Parses the output message into a FunctionsAgentAction or AgentFinish
* object.
* @param message The BaseMessage to parse.
* @returns A FunctionsAgentAction or AgentFinish object.
*/
parseAIMessage(message: BaseMessage): FunctionsAgentAction | AgentFinish {
if (message.content && typeof message.content !== "string") {
throw new Error("This agent cannot parse non-string model responses.");
}
if (message.additional_kwargs.function_call) {
// eslint-disable-next-line prefer-destructuring
const function_call: OpenAIClient.Chat.ChatCompletionMessage.FunctionCall =
message.additional_kwargs.function_call;
try {
const toolInput = function_call.arguments
? JSON.parse(function_call.arguments)
: {};
return {
tool: function_call.name as string,
toolInput,
log: `Invoking "${function_call.name}" with ${
function_call.arguments ?? "{}"
}\n${message.content}`,
messageLog: [message],
};
} catch (error) {
throw new OutputParserException(
`Failed to parse function arguments from chat model response. Text: "${function_call.arguments}". ${error}`
);
}
} else {
return {
returnValues: { output: message.content },
log: message.content,
};
}
}
getFormatInstructions(): string {
throw new Error(
"getFormatInstructions not implemented inside OpenAIFunctionsAgentOutputParser."
);
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/openai_functions/prompt.ts | export const PREFIX = `You are a helpful AI assistant.`;
export const SUFFIX = ``;
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/openai_tools/index.ts | import type { StructuredToolInterface } from "@langchain/core/tools";
import type {
BaseChatModel,
BaseChatModelCallOptions,
} from "@langchain/core/language_models/chat_models";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { RunnablePassthrough } from "@langchain/core/runnables";
import { OpenAIClient } from "@langchain/openai";
import { convertToOpenAITool } from "@langchain/core/utils/function_calling";
import { ToolDefinition } from "@langchain/core/language_models/base";
import { formatToOpenAIToolMessages } from "../format_scratchpad/openai_tools.js";
import {
OpenAIToolsAgentOutputParser,
type ToolsAgentStep,
} from "./output_parser.js";
import { AgentRunnableSequence } from "../agent.js";
export { OpenAIToolsAgentOutputParser, type ToolsAgentStep };
/**
* Params used by the createOpenAIToolsAgent function.
*/
export type CreateOpenAIToolsAgentParams = {
/**
* LLM to use as the agent. Should work with OpenAI tool calling,
* so must either be an OpenAI model that supports that or a wrapper of
* a different model that adds in equivalent support.
*/
llm: BaseChatModel<
BaseChatModelCallOptions & {
tools?:
| StructuredToolInterface[]
| OpenAIClient.ChatCompletionTool[]
// eslint-disable-next-line @typescript-eslint/no-explicit-any
| any[];
}
>;
/** Tools this agent has access to. */
tools: StructuredToolInterface[] | ToolDefinition[];
/** The prompt to use, must have an input key of `agent_scratchpad`. */
prompt: ChatPromptTemplate;
/**
* Whether to invoke the underlying model in streaming mode,
* allowing streaming of intermediate steps. Defaults to true.
*/
streamRunnable?: boolean;
};
/**
* Create an agent that uses OpenAI-style tool calling.
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
* @returns A runnable sequence representing an agent. It takes as input all the same input
* variables as the prompt passed in does. It returns as output either an
* AgentAction or AgentFinish.
*
* @example
* ```typescript
* import { AgentExecutor, createOpenAIToolsAgent } from "langchain/agents";
* import { pull } from "langchain/hub";
* import type { ChatPromptTemplate } from "@langchain/core/prompts";
* import { AIMessage, HumanMessage } from "@langchain/core/messages";
*
* import { ChatOpenAI } from "@langchain/openai";
*
* // Define the tools the agent will have access to.
* const tools = [...];
*
* // Get the prompt to use - you can modify this!
* // If you want to see the prompt in full, you can at:
* // https://smith.langchain.com/hub/hwchase17/openai-tools-agent
* const prompt = await pull<ChatPromptTemplate>(
* "hwchase17/openai-tools-agent"
* );
*
* const llm = new ChatOpenAI({
* temperature: 0,
* modelName: "gpt-3.5-turbo-1106",
* });
*
* const agent = await createOpenAIToolsAgent({
* llm,
* tools,
* prompt,
* });
*
* const agentExecutor = new AgentExecutor({
* agent,
* tools,
* });
*
* const result = await agentExecutor.invoke({
* input: "what is LangChain?",
* });
*
* // With chat history
* const result2 = await agentExecutor.invoke({
* input: "what's my name?",
* chat_history: [
* new HumanMessage("hi! my name is cob"),
* new AIMessage("Hello Cob! How can I assist you today?"),
* ],
* });
* ```
*/
export async function createOpenAIToolsAgent({
llm,
tools,
prompt,
streamRunnable,
}: CreateOpenAIToolsAgentParams) {
if (!prompt.inputVariables.includes("agent_scratchpad")) {
throw new Error(
[
`Prompt must have an input variable named "agent_scratchpad".`,
`Found ${JSON.stringify(prompt.inputVariables)} instead.`,
].join("\n")
);
}
const modelWithTools = llm.bind({
tools: tools.map((tool) => convertToOpenAITool(tool)),
});
const agent = AgentRunnableSequence.fromRunnables(
[
RunnablePassthrough.assign({
agent_scratchpad: (input: { steps: ToolsAgentStep[] }) =>
formatToOpenAIToolMessages(input.steps),
}),
prompt,
modelWithTools,
new OpenAIToolsAgentOutputParser(),
],
{
name: "OpenAIToolsAgent",
streamRunnable,
singleAction: false,
}
);
return agent;
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/openai_tools/output_parser.ts | import type { OpenAIClient } from "@langchain/openai";
import { AgentAction, AgentFinish } from "@langchain/core/agents";
import { BaseMessage, isBaseMessage } from "@langchain/core/messages";
import { ChatGeneration } from "@langchain/core/outputs";
import { OutputParserException } from "@langchain/core/output_parsers";
import { AgentMultiActionOutputParser } from "../types.js";
import {
ToolsAgentAction,
ToolsAgentStep,
} from "../tool_calling/output_parser.js";
export type { ToolsAgentAction, ToolsAgentStep };
/**
* @example
* ```typescript
* const prompt = ChatPromptTemplate.fromMessages([
* ["ai", "You are a helpful assistant"],
* ["human", "{input}"],
* new MessagesPlaceholder("agent_scratchpad"),
* ]);
*
* const runnableAgent = RunnableSequence.from([
* {
* input: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
* agent_scratchpad: (i: { input: string; steps: ToolsAgentStep[] }) =>
* formatToOpenAIToolMessages(i.steps),
* },
* prompt,
* new ChatOpenAI({
* modelName: "gpt-3.5-turbo-1106",
* temperature: 0,
* }).bind({ tools: tools.map((tool) => convertToOpenAITool(tool)) }),
* new OpenAIToolsAgentOutputParser(),
* ]).withConfig({ runName: "OpenAIToolsAgent" });
*
* const result = await runnableAgent.invoke({
* input:
* "What is the sum of the current temperature in San Francisco, New York, and Tokyo?",
* });
* ```
*/
export class OpenAIToolsAgentOutputParser extends AgentMultiActionOutputParser {
lc_namespace = ["langchain", "agents", "openai"];
static lc_name() {
return "OpenAIToolsAgentOutputParser";
}
async parse(text: string): Promise<AgentAction[] | AgentFinish> {
throw new Error(
`OpenAIFunctionsAgentOutputParser can only parse messages.\nPassed input: ${text}`
);
}
async parseResult(generations: ChatGeneration[]) {
if ("message" in generations[0] && isBaseMessage(generations[0].message)) {
return this.parseAIMessage(generations[0].message);
}
throw new Error(
"parseResult on OpenAIFunctionsAgentOutputParser only works on ChatGeneration output"
);
}
/**
* Parses the output message into a ToolsAgentAction[] or AgentFinish
* object.
* @param message The BaseMessage to parse.
* @returns A ToolsAgentAction[] or AgentFinish object.
*/
parseAIMessage(message: BaseMessage): ToolsAgentAction[] | AgentFinish {
if (message.content && typeof message.content !== "string") {
throw new Error("This agent cannot parse non-string model responses.");
}
if (message.additional_kwargs.tool_calls) {
const toolCalls: OpenAIClient.Chat.ChatCompletionMessageToolCall[] =
message.additional_kwargs.tool_calls;
try {
return toolCalls.map((toolCall, i) => {
const toolInput = toolCall.function.arguments
? JSON.parse(toolCall.function.arguments)
: {};
const messageLog = i === 0 ? [message] : [];
return {
tool: toolCall.function.name as string,
toolInput,
toolCallId: toolCall.id,
log: `Invoking "${toolCall.function.name}" with ${
toolCall.function.arguments ?? "{}"
}\n${message.content}`,
messageLog,
};
});
} catch (error) {
throw new OutputParserException(
`Failed to parse tool arguments from chat model response. Text: "${JSON.stringify(
toolCalls
)}". ${error}`
);
}
} else {
return {
returnValues: { output: message.content },
log: message.content,
};
}
}
getFormatInstructions(): string {
throw new Error(
"getFormatInstructions not implemented inside OpenAIToolsAgentOutputParser."
);
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tool_calling/index.ts | import { ChatPromptTemplate } from "@langchain/core/prompts";
import { StructuredToolInterface } from "@langchain/core/tools";
import { RunnablePassthrough } from "@langchain/core/runnables";
import {
LanguageModelLike,
ToolDefinition,
} from "@langchain/core/language_models/base";
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
import { AgentRunnableSequence } from "../agent.js";
import {
ToolCallingAgentOutputParser,
ToolsAgentStep,
} from "./output_parser.js";
import { formatToToolMessages } from "../format_scratchpad/tool_calling.js";
function _isBaseChatModel(x: LanguageModelLike): x is BaseChatModel {
const model = x as BaseChatModel;
return (
typeof model._modelType === "function" &&
model._modelType() === "base_chat_model"
);
}
/**
* Params used by the createOpenAIToolsAgent function.
*/
export type CreateToolCallingAgentParams = {
/**
* LLM to use as the agent. Should work with OpenAI tool calling,
* so must either be an OpenAI model that supports that or a wrapper of
* a different model that adds in equivalent support.
*/
llm: LanguageModelLike;
/** Tools this agent has access to. */
tools: StructuredToolInterface[] | ToolDefinition[];
/** The prompt to use, must have an input key of `agent_scratchpad`. */
prompt: ChatPromptTemplate;
/**
* Whether to invoke the underlying model in streaming mode,
* allowing streaming of intermediate steps. Defaults to true.
*/
streamRunnable?: boolean;
};
/**
* Create an agent that uses tools.
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
* @returns A runnable sequence representing an agent. It takes as input all the same input
* variables as the prompt passed in does. It returns as output either an
* AgentAction or AgentFinish.
* @example
* ```typescript
* import { ChatAnthropic } from "@langchain/anthropic";
* import { ChatPromptTemplate, MessagesPlaceholder } from "@langchain/core/prompts";
* import { AgentExecutor, createToolCallingAgent } from "langchain/agents";
*
* const prompt = ChatPromptTemplate.fromMessages(
* [
* ["system", "You are a helpful assistant"],
* ["placeholder", "{chat_history}"],
* ["human", "{input}"],
* ["placeholder", "{agent_scratchpad}"],
* ]
* );
*
*
* const llm = new ChatAnthropic({
* modelName: "claude-3-opus-20240229",
* temperature: 0,
* });
*
* // Define the tools the agent will have access to.
* const tools = [...];
*
* const agent = createToolCallingAgent({ llm, tools, prompt });
*
* const agentExecutor = new AgentExecutor({ agent, tools });
*
* const result = await agentExecutor.invoke({input: "what is LangChain?"});
*
* // Using with chat history
* import { AIMessage, HumanMessage } from "@langchain/core/messages";
*
* const result2 = await agentExecutor.invoke(
* {
* input: "what's my name?",
* chat_history: [
* new HumanMessage({content: "hi! my name is bob"}),
* new AIMessage({content: "Hello Bob! How can I assist you today?"}),
* ],
* }
* );
* ```
*/
export function createToolCallingAgent({
llm,
tools,
prompt,
streamRunnable,
}: CreateToolCallingAgentParams) {
if (!prompt.inputVariables.includes("agent_scratchpad")) {
throw new Error(
[
`Prompt must have an input variable named "agent_scratchpad".`,
`Found ${JSON.stringify(prompt.inputVariables)} instead.`,
].join("\n")
);
}
let modelWithTools;
if (_isBaseChatModel(llm)) {
if (llm.bindTools === undefined) {
throw new Error(
`This agent requires that the "bind_tools()" method be implemented on the input model.`
);
}
modelWithTools = llm.bindTools(tools);
} else {
modelWithTools = llm;
}
const agent = AgentRunnableSequence.fromRunnables(
[
RunnablePassthrough.assign({
agent_scratchpad: (input: { steps: ToolsAgentStep[] }) =>
formatToToolMessages(input.steps),
}),
prompt,
modelWithTools,
new ToolCallingAgentOutputParser(),
],
{
name: "ToolCallingAgent",
streamRunnable,
singleAction: false,
}
);
return agent;
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/tool_calling/output_parser.ts | import { AgentAction, AgentFinish, AgentStep } from "@langchain/core/agents";
import {
AIMessage,
BaseMessage,
isBaseMessage,
} from "@langchain/core/messages";
import { OutputParserException } from "@langchain/core/output_parsers";
import { ChatGeneration } from "@langchain/core/outputs";
import { ToolCall } from "@langchain/core/messages/tool";
import { AgentMultiActionOutputParser } from "../types.js";
/**
* Type that represents an agent action with an optional message log.
*/
export type ToolsAgentAction = AgentAction & {
toolCallId: string;
messageLog?: BaseMessage[];
};
export type ToolsAgentStep = AgentStep & {
action: ToolsAgentAction;
};
export function parseAIMessageToToolAction(
message: AIMessage
): ToolsAgentAction[] | AgentFinish {
const stringifiedMessageContent =
typeof message.content === "string"
? message.content
: JSON.stringify(message.content);
let toolCalls: ToolCall[] = [];
if (message.tool_calls !== undefined && message.tool_calls.length > 0) {
toolCalls = message.tool_calls;
} else {
if (
message.additional_kwargs.tool_calls === undefined ||
message.additional_kwargs.tool_calls.length === 0
) {
return {
returnValues: { output: message.content },
log: stringifiedMessageContent,
};
}
// Best effort parsing
for (const toolCall of message.additional_kwargs.tool_calls ?? []) {
const functionName = toolCall.function?.name;
try {
const args = JSON.parse(toolCall.function.arguments);
toolCalls.push({ name: functionName, args, id: toolCall.id });
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) {
throw new OutputParserException(
`Failed to parse tool arguments from chat model response. Text: "${JSON.stringify(
toolCalls
)}". ${e}`
);
}
}
}
return toolCalls.map((toolCall, i) => {
const messageLog = i === 0 ? [message] : [];
const log = `Invoking "${toolCall.name}" with ${JSON.stringify(
toolCall.args ?? {}
)}\n${stringifiedMessageContent}`;
return {
tool: toolCall.name as string,
toolInput: toolCall.args,
toolCallId: toolCall.id ?? "",
log,
messageLog,
};
});
}
export class ToolCallingAgentOutputParser extends AgentMultiActionOutputParser {
lc_namespace = ["langchain", "agents", "tool_calling"];
static lc_name() {
return "ToolCallingAgentOutputParser";
}
async parse(text: string): Promise<AgentAction[] | AgentFinish> {
throw new Error(
`ToolCallingAgentOutputParser can only parse messages.\nPassed input: ${text}`
);
}
async parseResult(generations: ChatGeneration[]) {
if ("message" in generations[0] && isBaseMessage(generations[0].message)) {
return parseAIMessageToToolAction(generations[0].message);
}
throw new Error(
"parseResult on ToolCallingAgentOutputParser only works on ChatGeneration output"
);
}
getFormatInstructions(): string {
throw new Error(
"getFormatInstructions not implemented inside ToolCallingAgentOutputParser."
);
}
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/axios-types.ts | import type { AxiosRequestConfig } from "axios";
import { EventSourceMessage } from "@langchain/core/utils/event_source_parse";
export interface StreamingAxiosRequestConfig extends AxiosRequestConfig {
responseType: "stream";
/**
* Called when a message is received. NOTE: Unlike the default browser
* EventSource.onmessage, this callback is called for _all_ events,
* even ones with a custom `event` field.
*/
onmessage?: (ev: EventSourceMessage) => void;
}
export type StreamingAxiosConfiguration =
| StreamingAxiosRequestConfig
| AxiosRequestConfig;
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/axios-fetch-adapter.js | /* eslint-disable no-plusplus */
/* eslint-disable prefer-template */
/* eslint-disable prefer-arrow-callback */
/* eslint-disable no-var */
/* eslint-disable vars-on-top */
/* eslint-disable no-param-reassign */
/* eslint-disable import/no-extraneous-dependencies */
/**
* This is copied from @vespaiach/axios-fetch-adapter, which exposes an ESM
* module without setting the "type" field in package.json.
*/
import axios from "axios";
import {
EventStreamContentType,
getLines,
getBytes,
getMessages,
} from "./event-source-parse.js";
function tryJsonStringify(data) {
try {
return JSON.stringify(data);
} catch (e) {
return data;
}
}
/**
* In order to avoid import issues with axios 1.x, copying here the internal
* utility functions that we used to import directly from axios.
*/
// Copied from axios/lib/core/settle.js
function settle(resolve, reject, response) {
const { validateStatus } = response.config;
if (!response.status || !validateStatus || validateStatus(response.status)) {
resolve(response);
} else {
reject(
createError(
`Request failed with status code ${response.status} and body ${
typeof response.data === "string"
? response.data
: tryJsonStringify(response.data)
}`,
response.config,
null,
response.request,
response
)
);
}
}
// Copied from axios/lib/helpers/isAbsoluteURL.js
function isAbsoluteURL(url) {
// A URL is considered absolute if it begins with "<scheme>://" or "//" (protocol-relative URL).
// RFC 3986 defines scheme name as a sequence of characters beginning with a letter and followed
// by any combination of letters, digits, plus, period, or hyphen.
return /^([a-z][a-z\d+\-.]*:)?\/\//i.test(url);
}
// Copied from axios/lib/helpers/combineURLs.js
function combineURLs(baseURL, relativeURL) {
return relativeURL
? baseURL.replace(/\/+$/, "") + "/" + relativeURL.replace(/^\/+/, "")
: baseURL;
}
// Copied from axios/lib/helpers/buildURL.js
function encode(val) {
return encodeURIComponent(val)
.replace(/%3A/gi, ":")
.replace(/%24/g, "$")
.replace(/%2C/gi, ",")
.replace(/%20/g, "+")
.replace(/%5B/gi, "[")
.replace(/%5D/gi, "]");
}
function buildURL(url, params, paramsSerializer) {
if (!params) {
return url;
}
var serializedParams;
if (paramsSerializer) {
serializedParams = paramsSerializer(params);
} else if (isURLSearchParams(params)) {
serializedParams = params.toString();
} else {
var parts = [];
forEach(params, function serialize(val, key) {
if (val === null || typeof val === "undefined") {
return;
}
if (isArray(val)) {
key = `${key}[]`;
} else {
val = [val];
}
forEach(val, function parseValue(v) {
if (isDate(v)) {
v = v.toISOString();
} else if (isObject(v)) {
v = JSON.stringify(v);
}
parts.push(`${encode(key)}=${encode(v)}`);
});
});
serializedParams = parts.join("&");
}
if (serializedParams) {
var hashmarkIndex = url.indexOf("#");
if (hashmarkIndex !== -1) {
url = url.slice(0, hashmarkIndex);
}
url += (url.indexOf("?") === -1 ? "?" : "&") + serializedParams;
}
return url;
}
// Copied from axios/lib/core/buildFullPath.js
function buildFullPath(baseURL, requestedURL) {
if (baseURL && !isAbsoluteURL(requestedURL)) {
return combineURLs(baseURL, requestedURL);
}
return requestedURL;
}
// Copied from axios/lib/utils.js
function isUndefined(val) {
return typeof val === "undefined";
}
function isObject(val) {
return val !== null && typeof val === "object";
}
function isDate(val) {
return toString.call(val) === "[object Date]";
}
function isURLSearchParams(val) {
return toString.call(val) === "[object URLSearchParams]";
}
function isArray(val) {
return Array.isArray(val);
}
function forEach(obj, fn) {
// Don't bother if no value provided
if (obj === null || typeof obj === "undefined") {
return;
}
// Force an array if not already something iterable
if (typeof obj !== "object") {
obj = [obj];
}
if (isArray(obj)) {
// Iterate over array values
for (var i = 0, l = obj.length; i < l; i++) {
fn.call(null, obj[i], i, obj);
}
} else {
// Iterate over object keys
for (var key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) {
fn.call(null, obj[key], key, obj);
}
}
}
}
function isFormData(val) {
return toString.call(val) === "[object FormData]";
}
// TODO this needs to be fixed to run in newer browser-like environments
// https://github.com/vespaiach/axios-fetch-adapter/issues/20#issue-1396365322
function isStandardBrowserEnv() {
if (
typeof navigator !== "undefined" &&
// eslint-disable-next-line no-undef
(navigator.product === "ReactNative" ||
// eslint-disable-next-line no-undef
navigator.product === "NativeScript" ||
// eslint-disable-next-line no-undef
navigator.product === "NS")
) {
return false;
}
return typeof window !== "undefined" && typeof document !== "undefined";
}
/**
* - Create a request object
* - Get response body
* - Check if timeout
*/
export default async function fetchAdapter(config) {
const request = createRequest(config);
const data = await getResponse(request, config);
return new Promise((resolve, reject) => {
if (data instanceof Error) {
reject(data);
} else {
// eslint-disable-next-line no-unused-expressions
Object.prototype.toString.call(config.settle) === "[object Function]"
? config.settle(resolve, reject, data)
: settle(resolve, reject, data);
}
});
}
/**
* Fetch API stage two is to get response body. This funtion tries to retrieve
* response body based on response's type
*/
async function getResponse(request, config) {
let stageOne;
try {
stageOne = await fetch(request);
} catch (e) {
if (e && e.name === "AbortError") {
return createError("Request aborted", config, "ECONNABORTED", request);
}
if (e && e.name === "TimeoutError") {
return createError("Request timeout", config, "ECONNABORTED", request);
}
return createError("Network Error", config, "ERR_NETWORK", request);
}
const headers = {};
stageOne.headers.forEach((value, key) => {
headers[key] = value;
});
const response = {
ok: stageOne.ok,
status: stageOne.status,
statusText: stageOne.statusText,
headers,
config,
request,
};
if (stageOne.status >= 200 && stageOne.status !== 204) {
if (config.responseType === "stream") {
const contentType = stageOne.headers.get("content-type");
if (!contentType?.startsWith(EventStreamContentType)) {
// If the content-type is not stream, response is most likely an error
if (stageOne.status >= 400) {
// If the error is a JSON, parse it. Otherwise, return as text
if (contentType?.startsWith("application/json")) {
response.data = await stageOne.json();
return response;
} else {
response.data = await stageOne.text();
return response;
}
}
// If the non-stream response is also not an error, throw
throw new Error(
`Expected content-type to be ${EventStreamContentType}, Actual: ${contentType}`
);
}
await getBytes(stageOne.body, getLines(getMessages(config.onmessage)));
} else {
switch (config.responseType) {
case "arraybuffer":
response.data = await stageOne.arrayBuffer();
break;
case "blob":
response.data = await stageOne.blob();
break;
case "json":
response.data = await stageOne.json();
break;
case "formData":
response.data = await stageOne.formData();
break;
default:
response.data = await stageOne.text();
break;
}
}
}
return response;
}
/**
* This function will create a Request object based on configuration's axios
*/
function createRequest(config) {
const headers = new Headers(config.headers);
// HTTP basic authentication
if (config.auth) {
const username = config.auth.username || "";
const password = config.auth.password
? decodeURI(encodeURIComponent(config.auth.password))
: "";
headers.set("Authorization", `Basic ${btoa(`${username}:${password}`)}`);
}
const method = config.method.toUpperCase();
const options = {
headers,
method,
};
if (method !== "GET" && method !== "HEAD") {
options.body = config.data;
// In these cases the browser will automatically set the correct Content-Type,
// but only if that header hasn't been set yet. So that's why we're deleting it.
if (isFormData(options.body) && isStandardBrowserEnv()) {
headers.delete("Content-Type");
}
}
// Some `fetch` implementations will override the Content-Type to text/plain
// when body is a string.
// See https://github.com/langchain-ai/langchainjs/issues/1010
if (typeof options.body === "string") {
options.body = new TextEncoder().encode(options.body);
}
if (config.mode) {
options.mode = config.mode;
}
if (config.cache) {
options.cache = config.cache;
}
if (config.integrity) {
options.integrity = config.integrity;
}
if (config.redirect) {
options.redirect = config.redirect;
}
if (config.referrer) {
options.referrer = config.referrer;
}
if (config.timeout && config.timeout > 0) {
options.signal = AbortSignal.timeout(config.timeout);
}
if (config.signal) {
// this overrides the timeout signal if both are set
options.signal = config.signal;
}
// This config is similar to XHR’s withCredentials flag, but with three available values instead of two.
// So if withCredentials is not set, default value 'same-origin' will be used
if (!isUndefined(config.withCredentials)) {
options.credentials = config.withCredentials ? "include" : "omit";
}
// for streaming
if (config.responseType === "stream") {
options.headers.set("Accept", EventStreamContentType);
}
const fullPath = buildFullPath(config.baseURL, config.url);
const url = buildURL(fullPath, config.params, config.paramsSerializer);
// Expected browser to throw error if there is any wrong configuration value
return new Request(url, options);
}
/**
* Note:
*
* From version >= 0.27.0, createError function is replaced by AxiosError class.
* So I copy the old createError function here for backward compatible.
*
*
*
* Create an Error with the specified message, config, error code, request and response.
*
* @param {string} message The error message.
* @param {Object} config The config.
* @param {string} [code] The error code (for example, 'ECONNABORTED').
* @param {Object} [request] The request.
* @param {Object} [response] The response.
* @returns {Error} The created error.
*/
function createError(message, config, code, request, response) {
if (axios.AxiosError && typeof axios.AxiosError === "function") {
return new axios.AxiosError(
message,
axios.AxiosError[code],
config,
request,
response
);
}
const error = new Error(message);
return enhanceError(error, config, code, request, response);
}
/**
*
* Note:
*
* This function is for backward compatible.
*
*
* Update an Error with the specified config, error code, and response.
*
* @param {Error} error The error to update.
* @param {Object} config The config.
* @param {string} [code] The error code (for example, 'ECONNABORTED').
* @param {Object} [request] The request.
* @param {Object} [response] The response.
* @returns {Error} The error.
*/
function enhanceError(error, config, code, request, response) {
error.config = config;
if (code) {
error.code = code;
}
error.request = request;
error.response = response;
error.isAxiosError = true;
error.toJSON = function toJSON() {
return {
// Standard
message: this.message,
name: this.name,
// Microsoft
description: this.description,
number: this.number,
// Mozilla
fileName: this.fileName,
lineNumber: this.lineNumber,
columnNumber: this.columnNumber,
stack: this.stack,
// Axios
config: this.config,
code: this.code,
status:
this.response && this.response.status ? this.response.status : null,
};
};
return error;
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/tiktoken.ts | export * from "@langchain/core/utils/tiktoken";
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/extname.ts | export const extname = (path: string) => `.${path.split(".").pop()}`;
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/prompt-layer.ts | import type { OpenAI as OpenAIClient } from "openai";
import { AsyncCaller } from "@langchain/core/utils/async_caller";
export const promptLayerTrackRequest = async (
callerFunc: AsyncCaller,
functionName: string,
kwargs:
| OpenAIClient.CompletionCreateParams
| OpenAIClient.Chat.CompletionCreateParams,
plTags: string[] | undefined,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
requestResponse: any,
startTime: number,
endTime: number,
apiKey: string | undefined
) => {
// https://github.com/MagnivOrg/promptlayer-js-helper
const promptLayerResp = await callerFunc.call(
fetch,
"https://api.promptlayer.com/track-request",
{
method: "POST",
headers: {
"Content-Type": "application/json",
Accept: "application/json",
},
body: JSON.stringify({
function_name: functionName,
provider: "langchain",
kwargs,
tags: plTags,
request_response: requestResponse,
request_start_time: Math.floor(startTime / 1000),
request_end_time: Math.floor(endTime / 1000),
api_key: apiKey,
}),
}
);
return promptLayerResp.json();
};
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/async_caller.ts | export * from "@langchain/core/utils/async_caller";
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/sql_utils.ts | import type { DataSource, DataSourceOptions } from "typeorm";
import { PromptTemplate } from "@langchain/core/prompts";
import {
DEFAULT_SQL_DATABASE_PROMPT,
SQL_SAP_HANA_PROMPT,
SQL_MSSQL_PROMPT,
SQL_MYSQL_PROMPT,
SQL_POSTGRES_PROMPT,
SQL_SQLITE_PROMPT,
SQL_ORACLE_PROMPT,
} from "../chains/sql_db/sql_db_prompt.js";
interface RawResultTableAndColumn {
table_name: string;
column_name: string;
data_type: string | undefined;
is_nullable: string;
}
export interface SqlDatabaseParams {
includesTables?: Array<string>;
ignoreTables?: Array<string>;
sampleRowsInTableInfo?: number;
customDescription?: Record<string, string>;
}
export interface SqlDatabaseOptionsParams extends SqlDatabaseParams {
appDataSourceOptions: DataSourceOptions;
}
export interface SqlDatabaseDataSourceParams extends SqlDatabaseParams {
appDataSource: DataSource;
}
export type SerializedSqlDatabase = SqlDatabaseOptionsParams & {
_type: string;
};
export interface SqlTable {
tableName: string;
columns: SqlColumn[];
}
export interface SqlColumn {
columnName: string;
dataType?: string;
isNullable?: boolean;
}
export const verifyListTablesExistInDatabase = (
tablesFromDatabase: Array<SqlTable>,
listTables: Array<string>,
errorPrefixMsg: string
): void => {
const onlyTableNames: Array<string> = tablesFromDatabase.map(
(table: SqlTable) => table.tableName
);
if (listTables.length > 0) {
for (const tableName of listTables) {
if (!onlyTableNames.includes(tableName)) {
throw new Error(
`${errorPrefixMsg} the table ${tableName} was not found in the database`
);
}
}
}
};
export const verifyIncludeTablesExistInDatabase = (
tablesFromDatabase: Array<SqlTable>,
includeTables: Array<string>
): void => {
verifyListTablesExistInDatabase(
tablesFromDatabase,
includeTables,
"Include tables not found in database:"
);
};
export const verifyIgnoreTablesExistInDatabase = (
tablesFromDatabase: Array<SqlTable>,
ignoreTables: Array<string>
): void => {
verifyListTablesExistInDatabase(
tablesFromDatabase,
ignoreTables,
"Ignore tables not found in database:"
);
};
const formatToSqlTable = (
rawResultsTableAndColumn: Array<RawResultTableAndColumn>
): Array<SqlTable> => {
const sqlTable: Array<SqlTable> = [];
for (const oneResult of rawResultsTableAndColumn) {
const sqlColumn = {
columnName: oneResult.column_name,
dataType: oneResult.data_type,
isNullable: oneResult.is_nullable === "YES",
};
const currentTable = sqlTable.find(
(oneTable) => oneTable.tableName === oneResult.table_name
);
if (currentTable) {
currentTable.columns.push(sqlColumn);
} else {
const newTable = {
tableName: oneResult.table_name,
columns: [sqlColumn],
};
sqlTable.push(newTable);
}
}
return sqlTable;
};
export const getTableAndColumnsName = async (
appDataSource: DataSource
): Promise<Array<SqlTable>> => {
let sql;
if (appDataSource.options.type === "postgres") {
const schema = appDataSource.options?.schema ?? "public";
sql = `SELECT
t.table_name,
c.*
FROM
information_schema.tables t
JOIN information_schema.columns c
ON t.table_name = c.table_name
WHERE
t.table_schema = '${schema}'
AND c.table_schema = '${schema}'
ORDER BY
t.table_name,
c.ordinal_position;`;
const rep = await appDataSource.query(sql);
return formatToSqlTable(rep);
}
if (
appDataSource.options.type === "sqlite" ||
appDataSource.options.type === "sqljs"
) {
sql =
"SELECT \n" +
" m.name AS table_name,\n" +
" p.name AS column_name,\n" +
" p.type AS data_type,\n" +
" CASE \n" +
" WHEN p.\"notnull\" = 0 THEN 'YES' \n" +
" ELSE 'NO' \n" +
" END AS is_nullable \n" +
"FROM \n" +
" sqlite_master m \n" +
"JOIN \n" +
" pragma_table_info(m.name) p \n" +
"WHERE \n" +
" m.type = 'table' AND \n" +
" m.name NOT LIKE 'sqlite_%';\n";
const rep = await appDataSource.query(sql);
return formatToSqlTable(rep);
}
if (
appDataSource.options.type === "mysql" ||
appDataSource.options.type === "aurora-mysql"
) {
sql =
"SELECT " +
"TABLE_NAME AS table_name, " +
"COLUMN_NAME AS column_name, " +
"DATA_TYPE AS data_type, " +
"IS_NULLABLE AS is_nullable " +
"FROM INFORMATION_SCHEMA.COLUMNS " +
`WHERE TABLE_SCHEMA = '${appDataSource.options.database}';`;
const rep = await appDataSource.query(sql);
return formatToSqlTable(rep);
}
if (appDataSource.options.type === "mssql") {
const schema = appDataSource.options?.schema;
const sql = `SELECT
TABLE_NAME AS table_name,
COLUMN_NAME AS column_name,
DATA_TYPE AS data_type,
IS_NULLABLE AS is_nullable
FROM INFORMATION_SCHEMA.COLUMNS
${schema && `WHERE TABLE_SCHEMA = '${schema}'`}
ORDER BY TABLE_NAME, ORDINAL_POSITION;`;
const rep = await appDataSource.query(sql);
return formatToSqlTable(rep);
}
if (appDataSource.options.type === "sap") {
const schema = appDataSource.options?.schema ?? "public";
sql = `SELECT
TABLE_NAME,
COLUMN_NAME,
DATA_TYPE_NAME AS data_type,
CASE WHEN IS_NULLABLE='TRUE' THEN 'YES' ELSE 'NO' END AS is_nullable
FROM TABLE_COLUMNS
WHERE SCHEMA_NAME='${schema}'`;
const rep: Array<{ [key: string]: string }> = await appDataSource.query(
sql
);
const repLowerCase: Array<RawResultTableAndColumn> = [];
rep.forEach((_rep) =>
repLowerCase.push({
table_name: _rep.TABLE_NAME,
column_name: _rep.COLUMN_NAME,
data_type: _rep.DATA_TYPE,
is_nullable: _rep.IS_NULLABLE,
})
);
return formatToSqlTable(repLowerCase);
}
if (appDataSource.options.type === "oracle") {
const schemaName = appDataSource.options.schema;
const sql = `
SELECT
TABLE_NAME AS "table_name",
COLUMN_NAME AS "column_name",
DATA_TYPE AS "data_type",
NULLABLE AS "is_nullable"
FROM ALL_TAB_COLS
WHERE
OWNER = UPPER('${schemaName}')`;
const rep = await appDataSource.query(sql);
return formatToSqlTable(rep);
}
throw new Error("Database type not implemented yet");
};
const formatSqlResponseToSimpleTableString = (rawResult: unknown): string => {
if (!rawResult || !Array.isArray(rawResult) || rawResult.length === 0) {
return "";
}
let globalString = "";
for (const oneRow of rawResult) {
globalString += `${Object.values(oneRow).reduce(
(completeString, columnValue) => `${completeString} ${columnValue}`,
""
)}\n`;
}
return globalString;
};
export const generateTableInfoFromTables = async (
tables: Array<SqlTable> | undefined,
appDataSource: DataSource,
nbSampleRow: number,
customDescription?: Record<string, string>
): Promise<string> => {
if (!tables) {
return "";
}
let globalString = "";
for (const currentTable of tables) {
// Add the custom info of the table
const tableCustomDescription =
customDescription &&
Object.keys(customDescription).includes(currentTable.tableName)
? `${customDescription[currentTable.tableName]}\n`
: "";
// Add the creation of the table in SQL
let schema = null;
if (appDataSource.options.type === "postgres") {
schema = appDataSource.options?.schema ?? "public";
} else if (appDataSource.options.type === "mssql") {
schema = appDataSource.options?.schema;
} else if (appDataSource.options.type === "sap") {
schema =
appDataSource.options?.schema ??
appDataSource.options?.username ??
"public";
} else if (appDataSource.options.type === "oracle") {
schema = appDataSource.options.schema;
}
let sqlCreateTableQuery = schema
? `CREATE TABLE "${schema}"."${currentTable.tableName}" (\n`
: `CREATE TABLE ${currentTable.tableName} (\n`;
for (const [key, currentColumn] of currentTable.columns.entries()) {
if (key > 0) {
sqlCreateTableQuery += ", ";
}
sqlCreateTableQuery += `${currentColumn.columnName} ${
currentColumn.dataType
} ${currentColumn.isNullable ? "" : "NOT NULL"}`;
}
sqlCreateTableQuery += ") \n";
let sqlSelectInfoQuery;
if (appDataSource.options.type === "mysql") {
// We use backticks to quote the table names and thus allow for example spaces in table names
sqlSelectInfoQuery = `SELECT * FROM \`${currentTable.tableName}\` LIMIT ${nbSampleRow};\n`;
} else if (appDataSource.options.type === "postgres") {
const schema = appDataSource.options?.schema ?? "public";
sqlSelectInfoQuery = `SELECT * FROM "${schema}"."${currentTable.tableName}" LIMIT ${nbSampleRow};\n`;
} else if (appDataSource.options.type === "mssql") {
const schema = appDataSource.options?.schema;
sqlSelectInfoQuery = schema
? `SELECT TOP ${nbSampleRow} * FROM ${schema}.[${currentTable.tableName}];\n`
: `SELECT TOP ${nbSampleRow} * FROM [${currentTable.tableName}];\n`;
} else if (appDataSource.options.type === "sap") {
const schema =
appDataSource.options?.schema ??
appDataSource.options?.username ??
"public";
sqlSelectInfoQuery = `SELECT * FROM "${schema}"."${currentTable.tableName}" LIMIT ${nbSampleRow};\n`;
} else if (appDataSource.options.type === "oracle") {
sqlSelectInfoQuery = `SELECT * FROM "${schema}"."${currentTable.tableName}" WHERE ROWNUM <= '${nbSampleRow}'`;
} else {
sqlSelectInfoQuery = `SELECT * FROM "${currentTable.tableName}" LIMIT ${nbSampleRow};\n`;
}
const columnNamesConcatString = `${currentTable.columns.reduce(
(completeString, column) => `${completeString} ${column.columnName}`,
""
)}\n`;
let sample = "";
try {
const infoObjectResult = nbSampleRow
? await appDataSource.query(sqlSelectInfoQuery)
: null;
sample = formatSqlResponseToSimpleTableString(infoObjectResult);
} catch (error) {
// If the request fails we catch it and only display a log message
console.log(error);
}
globalString = globalString.concat(
tableCustomDescription +
sqlCreateTableQuery +
sqlSelectInfoQuery +
columnNamesConcatString +
sample
);
}
return globalString;
};
export const getPromptTemplateFromDataSource = (
appDataSource: DataSource
): PromptTemplate => {
if (appDataSource.options.type === "postgres") {
return SQL_POSTGRES_PROMPT;
}
if (appDataSource.options.type === "sqlite") {
return SQL_SQLITE_PROMPT;
}
if (appDataSource.options.type === "mysql") {
return SQL_MYSQL_PROMPT;
}
if (appDataSource.options.type === "mssql") {
return SQL_MSSQL_PROMPT;
}
if (appDataSource.options.type === "sap") {
return SQL_SAP_HANA_PROMPT;
}
if (appDataSource.options.type === "oracle") {
return SQL_ORACLE_PROMPT;
}
return DEFAULT_SQL_DATABASE_PROMPT;
};
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/hub.ts | import pRetry from "p-retry";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { FileLoader, LoadValues } from "./load.js";
import { extname } from "./extname.js";
const fetchWithTimeout = async (
url: string,
init: Omit<RequestInit, "signal"> & { timeout: number }
) => {
const { timeout, ...rest } = init;
const res = await fetch(url, {
...rest,
signal: AbortSignal.timeout(timeout),
});
return res;
};
const HUB_PATH_REGEX = /lc(@[^:]+)?:\/\/(.*)/;
const URL_PATH_SEPARATOR = "/";
export const loadFromHub = async <T>(
uri: string,
loader: FileLoader<T>,
validPrefix: string,
validSuffixes: Set<string>,
values: LoadValues = {}
): Promise<T | undefined> => {
const LANGCHAIN_HUB_DEFAULT_REF =
getEnvironmentVariable("LANGCHAIN_HUB_DEFAULT_REF") ?? "master";
const LANGCHAIN_HUB_URL_BASE =
getEnvironmentVariable("LANGCHAIN_HUB_URL_BASE") ??
"https://raw.githubusercontent.com/hwchase17/langchain-hub/";
const match = uri.match(HUB_PATH_REGEX);
if (!match) {
return undefined;
}
const [rawRef, remotePath] = match.slice(1);
const ref = rawRef ? rawRef.slice(1) : LANGCHAIN_HUB_DEFAULT_REF;
const parts = remotePath.split(URL_PATH_SEPARATOR);
if (parts[0] !== validPrefix) {
return undefined;
}
if (!validSuffixes.has(extname(remotePath).slice(1))) {
throw new Error("Unsupported file type.");
}
const url = [LANGCHAIN_HUB_URL_BASE, ref, remotePath].join("/");
const res = await pRetry(() => fetchWithTimeout(url, { timeout: 5000 }), {
retries: 6,
});
if (res.status !== 200) {
throw new Error(`Could not find file at ${url}`);
}
return loader(await res.text(), remotePath, values);
};
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/types.ts | /**
* Represents a string value with autocompleted, but not required, suggestions.
*/
export type StringWithAutocomplete<T> = T | (string & Record<never, never>);
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/openapi.ts | import * as yaml from "js-yaml";
import { OpenAPIV3, OpenAPIV3_1 } from "openapi-types";
export class OpenAPISpec {
constructor(public document: OpenAPIV3_1.Document) {}
get baseUrl() {
return this.document.servers ? this.document.servers[0].url : undefined;
}
getPathsStrict() {
if (!this.document.paths) {
throw new Error("No paths found in spec");
}
return this.document.paths;
}
getParametersStrict() {
if (!this.document.components?.parameters) {
throw new Error("No parameters found in spec");
}
return this.document.components.parameters;
}
getSchemasStrict() {
if (!this.document.components?.schemas) {
throw new Error("No schemas found in spec.");
}
return this.document.components.schemas;
}
getRequestBodiesStrict() {
if (!this.document.components?.requestBodies) {
throw new Error("No request body found in spec.");
}
return this.document.components.requestBodies;
}
getPathStrict(path: string) {
const pathItem = this.getPathsStrict()[path];
if (pathItem === undefined) {
throw new Error(`No path found for "${path}".`);
}
return pathItem;
}
getReferencedParameter(ref: OpenAPIV3_1.ReferenceObject) {
const refComponents = ref.$ref.split("/");
const refName = refComponents[refComponents.length - 1];
if (this.getParametersStrict()[refName] === undefined) {
throw new Error(`No parameter found for "${refName}".`);
}
return this.getParametersStrict()[refName];
}
getRootReferencedParameter(
ref: OpenAPIV3_1.ReferenceObject
): OpenAPIV3_1.ParameterObject {
let parameter = this.getReferencedParameter(ref);
while ((parameter as OpenAPIV3_1.ReferenceObject).$ref !== undefined) {
parameter = this.getReferencedParameter(
parameter as OpenAPIV3_1.ReferenceObject
);
}
return parameter as OpenAPIV3_1.ParameterObject;
}
getReferencedSchema(
ref: OpenAPIV3_1.ReferenceObject
): OpenAPIV3_1.SchemaObject {
const refComponents = ref.$ref.split("/");
const refName = refComponents[refComponents.length - 1];
const schema = this.getSchemasStrict()[refName];
if (schema === undefined) {
throw new Error(`No schema found for "${refName}".`);
}
return schema;
}
getSchema(
schema: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject
): OpenAPIV3_1.SchemaObject {
if ((schema as OpenAPIV3_1.ReferenceObject).$ref !== undefined) {
return this.getReferencedSchema(schema as OpenAPIV3_1.ReferenceObject);
}
return schema;
}
getRootReferencedSchema(ref: OpenAPIV3_1.ReferenceObject) {
let schema = this.getReferencedSchema(ref);
while ((schema as OpenAPIV3_1.ReferenceObject).$ref !== undefined) {
schema = this.getReferencedSchema(schema as OpenAPIV3_1.ReferenceObject);
}
return schema as OpenAPIV3_1.ParameterObject;
}
getReferencedRequestBody(ref: OpenAPIV3_1.ReferenceObject) {
const refComponents = ref.$ref.split("/");
const refName = refComponents[refComponents.length - 1];
const requestBodies = this.getRequestBodiesStrict();
if (requestBodies[refName] === undefined) {
throw new Error(`No request body found for "${refName}"`);
}
return requestBodies[refName];
}
getRootReferencedRequestBody(ref: OpenAPIV3_1.ReferenceObject) {
let requestBody = this.getReferencedRequestBody(ref);
while ((requestBody as OpenAPIV3_1.ReferenceObject).$ref !== undefined) {
requestBody = this.getReferencedRequestBody(
requestBody as OpenAPIV3_1.ReferenceObject
);
}
return requestBody as OpenAPIV3_1.RequestBodyObject;
}
getMethodsForPath(path: string): OpenAPIV3.HttpMethods[] {
const pathItem = this.getPathStrict(path);
// This is an enum in the underlying package.
// Werestate here to allow "import type" above and not cause warnings in certain envs.
const possibleMethods = [
"get",
"put",
"post",
"delete",
"options",
"head",
"patch",
"trace",
];
return possibleMethods.filter(
(possibleMethod) =>
pathItem[possibleMethod as OpenAPIV3.HttpMethods] !== undefined
) as OpenAPIV3.HttpMethods[];
}
getParametersForPath(path: string) {
const pathItem = this.getPathStrict(path);
if (pathItem.parameters === undefined) {
return [];
}
return pathItem.parameters.map((parameter) => {
if ((parameter as OpenAPIV3_1.ReferenceObject).$ref !== undefined) {
return this.getRootReferencedParameter(
parameter as OpenAPIV3_1.ReferenceObject
);
}
return parameter as OpenAPIV3_1.ParameterObject;
});
}
getOperation(path: string, method: OpenAPIV3.HttpMethods) {
const pathItem = this.getPathStrict(path);
if (pathItem[method] === undefined) {
throw new Error(`No ${method} method found for "path".`);
}
return pathItem[method];
}
getParametersForOperation(operation: OpenAPIV3_1.OperationObject) {
if (operation.parameters === undefined) {
return [];
}
return operation.parameters.map((parameter) => {
if ((parameter as OpenAPIV3_1.ReferenceObject).$ref !== undefined) {
return this.getRootReferencedParameter(
parameter as OpenAPIV3_1.ReferenceObject
);
}
return parameter as OpenAPIV3_1.ParameterObject;
});
}
getRequestBodyForOperation(
operation: OpenAPIV3_1.OperationObject
): OpenAPIV3_1.RequestBodyObject {
const { requestBody } = operation;
if ((requestBody as OpenAPIV3_1.ReferenceObject)?.$ref !== undefined) {
return this.getRootReferencedRequestBody(
requestBody as OpenAPIV3_1.ReferenceObject
);
}
return requestBody as OpenAPIV3_1.RequestBodyObject;
}
static getCleanedOperationId(
operation: OpenAPIV3_1.OperationObject,
path: string,
method: OpenAPIV3_1.HttpMethods
) {
let { operationId } = operation;
if (operationId === undefined) {
const updatedPath = path.replaceAll(/[^a-zA-Z0-9]/, "_");
operationId = `${
updatedPath.startsWith("/") ? updatedPath.slice(1) : updatedPath
}_${method}`;
}
return operationId
.replaceAll("-", "_")
.replaceAll(".", "_")
.replaceAll("/", "_");
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
static alertUnsupportedSpec(document: Record<string, any>) {
const warningMessage =
"This may result in degraded performance. Convert your OpenAPI spec to 3.1.0 for better support.";
const swaggerVersion = document.swagger;
const openAPIVersion = document.openapi;
if (openAPIVersion !== undefined && openAPIVersion !== "3.1.0") {
console.warn(
`Attempting to load an OpenAPI ${openAPIVersion} spec. ${warningMessage}`
);
} else if (swaggerVersion !== undefined) {
console.warn(
`Attempting to load a Swagger ${swaggerVersion} spec. ${warningMessage}`
);
} else {
throw new Error(
`Attempting to load an unsupported spec:\n\n${JSON.stringify(
document,
null,
2
)}.`
);
}
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
static fromObject(document: Record<string, any>) {
OpenAPISpec.alertUnsupportedSpec(document);
return new OpenAPISpec(document as OpenAPIV3_1.Document);
}
static fromString(rawString: string) {
let document;
try {
document = JSON.parse(rawString);
} catch (e) {
document = yaml.load(rawString);
}
return OpenAPISpec.fromObject(document);
}
static async fromURL(url: string) {
const response = await fetch(url);
const rawDocument = await response.text();
return OpenAPISpec.fromString(rawDocument);
}
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/env.ts | export {
isBrowser,
isWebWorker,
isJsDom,
isDeno,
isNode,
getEnv,
type RuntimeEnvironment,
getRuntimeEnvironment,
getEnvironmentVariable,
} from "@langchain/core/utils/env";
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/parse.ts | import * as yaml from "yaml";
import { extname } from "./extname.js";
export const loadFileContents = (contents: string, format: string) => {
switch (format) {
case ".json":
return JSON.parse(contents);
case ".yml":
case ".yaml":
return yaml.parse(contents);
default:
throw new Error(`Unsupported filetype ${format}`);
}
};
export const parseFileConfig = (
text: string,
path: string,
supportedTypes?: string[]
) => {
const suffix = extname(path);
if (
![".json", ".yaml"].includes(suffix) ||
(supportedTypes && !supportedTypes.includes(suffix))
) {
throw new Error(`Unsupported filetype ${suffix}`);
}
return loadFileContents(text, suffix);
};
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/document.ts | import { Document } from "@langchain/core/documents";
/**
* Given a list of documents, this util formats their contents
* into a string, separated by newlines.
*
* @param documents
* @returns A string of the documents page content, separated by newlines.
*/
export const formatDocumentsAsString = (documents: Document[]): string =>
documents.map((doc) => doc.pageContent).join("\n\n");
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/axios-fetch-adapter.d.ts | // eslint-disable-next-line import/no-extraneous-dependencies
import { AxiosRequestConfig, AxiosPromise } from "axios";
export default function fetchAdapter(config: AxiosRequestConfig): AxiosPromise;
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/event-source-parse.ts | export * from "@langchain/core/utils/event_source_parse";
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/azure.ts | export interface OpenAIEndpointConfig {
azureOpenAIApiDeploymentName?: string;
azureOpenAIApiInstanceName?: string;
azureOpenAIApiKey?: string;
azureOpenAIBasePath?: string;
baseURL?: string;
}
/**
* This function generates an endpoint URL for (Azure) OpenAI
* based on the configuration parameters provided.
*
* @param {OpenAIEndpointConfig} config - The configuration object for the (Azure) endpoint.
*
* @property {string} config.azureOpenAIApiDeploymentName - The deployment name of Azure OpenAI.
* @property {string} config.azureOpenAIApiInstanceName - The instance name of Azure OpenAI.
* @property {string} config.azureOpenAIApiKey - The API Key for Azure OpenAI.
* @property {string} config.azureOpenAIBasePath - The base path for Azure OpenAI.
* @property {string} config.baseURL - Some other custom base path URL.
*
* The function operates as follows:
* - If both `azureOpenAIBasePath` and `azureOpenAIApiDeploymentName` (plus `azureOpenAIApiKey`) are provided, it returns an URL combining these two parameters (`${azureOpenAIBasePath}/${azureOpenAIApiDeploymentName}`).
* - If `azureOpenAIApiKey` is provided, it checks for `azureOpenAIApiInstanceName` and `azureOpenAIApiDeploymentName` and throws an error if any of these is missing. If both are provided, it generates an URL incorporating these parameters.
* - If none of the above conditions are met, return any custom `baseURL`.
* - The function returns the generated URL as a string, or undefined if no custom paths are specified.
*
* @throws Will throw an error if the necessary parameters for generating the URL are missing.
*
* @returns {string | undefined} The generated (Azure) OpenAI endpoint URL.
*/
export function getEndpoint(config: OpenAIEndpointConfig) {
const {
azureOpenAIApiDeploymentName,
azureOpenAIApiInstanceName,
azureOpenAIApiKey,
azureOpenAIBasePath,
baseURL,
} = config;
if (
azureOpenAIApiKey &&
azureOpenAIBasePath &&
azureOpenAIApiDeploymentName
) {
return `${azureOpenAIBasePath}/${azureOpenAIApiDeploymentName}`;
}
if (azureOpenAIApiKey) {
if (!azureOpenAIApiInstanceName) {
throw new Error(
"azureOpenAIApiInstanceName is required when using azureOpenAIApiKey"
);
}
if (!azureOpenAIApiDeploymentName) {
throw new Error(
"azureOpenAIApiDeploymentName is a required parameter when using azureOpenAIApiKey"
);
}
return `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${azureOpenAIApiDeploymentName}`;
}
return baseURL;
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/math.ts | export * from "@langchain/core/utils/math";
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/time.ts | /**
* Sleep for a given amount of time.
* @param ms - The number of milliseconds to sleep for. Defaults to 1000.
* @returns A promise that resolves when the sleep is complete.
*/
export async function sleep(ms = 1000): Promise<void> {
return new Promise<void>((resolve) => {
setTimeout(resolve, ms);
});
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/entrypoint_deprecation.ts | import { getEnvironmentVariable } from "./env.js";
export function logVersion010MigrationWarning({
oldEntrypointName,
newEntrypointName,
newPackageName = "@langchain/community",
}: {
oldEntrypointName: string;
newEntrypointName?: string;
newPackageName?: string;
}) {
let finalEntrypointName = "";
if (newEntrypointName === undefined) {
finalEntrypointName = `/${oldEntrypointName}`;
} else if (newEntrypointName !== "") {
finalEntrypointName = `/${newEntrypointName}`;
}
let warningText = [
`[WARNING]: Importing from "langchain/${oldEntrypointName}" is deprecated.`,
``,
`Instead, please add the "${newPackageName}" package to your project with e.g.`,
``,
` $ npm install ${newPackageName}`,
``,
`and import from "${newPackageName}${finalEntrypointName}".`,
``,
`This will be mandatory after the next "langchain" minor version bump to 0.2.`,
].join("\n");
if (newPackageName === "@langchain/core") {
warningText = [
`[WARNING]: Importing from "langchain/${oldEntrypointName}" is deprecated.`,
``,
`Instead, please import from "${newPackageName}${finalEntrypointName}".`,
``,
`This will be mandatory after the next "langchain" minor version bump to 0.2.`,
].join("\n");
}
if (
getEnvironmentVariable("LANGCHAIN_SUPPRESS_MIGRATION_WARNINGS") !== "true"
) {
console.warn(warningText);
}
}
export function logVersion020MigrationWarning({
oldEntrypointName,
newEntrypointName,
newPackageName = "@langchain/community",
}: {
oldEntrypointName: string;
newEntrypointName?: string;
newPackageName?: string;
}) {
let finalEntrypointName = "";
if (newEntrypointName === undefined) {
finalEntrypointName = `/${oldEntrypointName}`;
} else if (newEntrypointName !== "") {
finalEntrypointName = `/${newEntrypointName}`;
}
let warningText = [
`[WARNING]: Importing from "langchain/${oldEntrypointName}" is deprecated.`,
``,
`Instead, please add the "${newPackageName}" package to your project with e.g.`,
``,
` $ npm install ${newPackageName}`,
``,
`and import from "${newPackageName}${finalEntrypointName}".`,
``,
`This will be mandatory after the next "langchain" minor version bump to 0.3.`,
].join("\n");
if (newPackageName === "@langchain/core") {
warningText = [
`[WARNING]: Importing from "langchain/${oldEntrypointName}" is deprecated.`,
``,
`Instead, please import from "${newPackageName}${finalEntrypointName}".`,
``,
`This will be mandatory after the next "langchain" minor version bump to 0.3.`,
].join("\n");
}
if (
getEnvironmentVariable("LANGCHAIN_SUPPRESS_MIGRATION_WARNINGS") !== "true"
) {
console.warn(warningText);
}
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/stream.ts | export * from "@langchain/core/utils/stream";
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/set.ts | /**
* Source: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Set#implementing_basic_set_operations
*/
/**
* returns intersection of two sets
*/
export function intersection<T>(setA: Set<T>, setB: Set<T>) {
const _intersection = new Set<T>();
for (const elem of setB) {
if (setA.has(elem)) {
_intersection.add(elem);
}
}
return _intersection;
}
/**
* returns union of two sets
*/
export function union<T>(setA: Set<T>, setB: Set<T>) {
const _union = new Set(setA);
for (const elem of setB) {
_union.add(elem);
}
return _union;
}
/**
* returns difference of two sets
*/
export function difference<T>(setA: Set<T>, setB: Set<T>) {
const _difference = new Set(setA);
for (const elem of setB) {
_difference.delete(elem);
}
return _difference;
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/util/load.ts | // eslint-disable-next-line @typescript-eslint/no-explicit-any
export type LoadValues = Record<string, any>;
export type FileLoader<T> = (
text: string,
filePath: string,
values: LoadValues
) => Promise<T>;
export const loadFromFile = async <T>(
uri: string,
loader: FileLoader<T>,
values: LoadValues = {}
): Promise<T> => {
try {
const fs = await import("node:fs/promises");
return loader(await fs.readFile(uri, { encoding: "utf-8" }), uri, values);
} catch (e) {
console.error(e);
throw new Error(`Could not load file at ${uri}`);
}
};
|
0 | lc_public_repos/langchainjs/langchain/src/util | lc_public_repos/langchainjs/langchain/src/util/tests/openai-stream.test.ts | import { describe, it, expect, jest } from "@jest/globals";
import { AxiosResponse } from "axios";
import fetchAdapter from "../axios-fetch-adapter.js";
const mockFetchForOpenAIStream = async ({
chunks,
status,
contentType,
}: {
chunks: Array<string>;
status: number;
contentType: string;
}) => {
// Mock stream response chunks.
const stream = new ReadableStream({
async start(controller) {
chunks.forEach((chunk) => {
controller.enqueue(new TextEncoder().encode(chunk));
});
controller.close();
},
});
// Mock Fetch API call.
jest.spyOn(global, "fetch").mockImplementation(
async () =>
new Response(stream, {
status,
headers: {
"Content-Type": contentType,
},
})
);
let error: Error | null = null;
let done = false;
const receivedChunks: Array<string> = [];
const resp = await fetchAdapter({
url: "https://example.com",
method: "POST",
responseType: "stream",
onmessage: (event: { data: string }) => {
if (event.data?.trim?.() === "[DONE]") {
done = true;
} else {
receivedChunks.push(
JSON.parse(event.data).choices[0].delta.content ?? ""
);
}
},
} as unknown as never).catch((err) => {
error = err;
return null;
});
return { resp, receivedChunks, error, done } as {
resp: AxiosResponse | null;
receivedChunks: Array<string>;
error: Error | null;
done: boolean;
};
};
describe("OpenAI Stream Tests", () => {
it("should return a 200 response chunk by chunk", async () => {
// When stream mode enabled, OpenAI responds with a stream of `data: {...}\n\n` chunks
// followed by `data: [DONE]\n\n`.
const { resp, receivedChunks, error, done } =
await mockFetchForOpenAIStream({
status: 200,
contentType: "text/event-stream",
chunks: [
'data: {"choices":[{"delta":{"role":"assistant"},"index":0,"finish_reason":null}]}\n\n',
'data: {"choices":[{"delta":{"content":"Hello"},"index":0,"finish_reason":null}]}\n\n',
'data: {"choices":[{"delta":{"content":" World"},"index":0,"finish_reason":null}]}\n\n',
'data: {"choices":[{"delta":{"content":"!"},"index":0,"finish_reason":null}]}\n\n',
'data: {"choices":[{"delta":{},"index":0,"finish_reason":"stop"}]}\n\n',
"data: [DONE]\n\n",
],
});
expect(error).toEqual(null);
expect(resp?.status).toEqual(200);
expect(receivedChunks).toEqual(["", "Hello", " World", "!", ""]);
expect(done).toBe(true);
});
it("should handle OpenAI 400 json error", async () => {
// OpenAI returns errors with application/json content type.
// Even if stream mode is enabled, the error is returned as a normal JSON body.
// Error information is in the `error` field.
const { resp, receivedChunks, error } = await mockFetchForOpenAIStream({
status: 400,
contentType: "application/json",
chunks: [
JSON.stringify({
error: {},
}),
],
});
expect(error).toEqual(null);
expect(resp?.status).toEqual(400);
expect(resp?.data).toEqual({ error: {} });
expect(receivedChunks).toEqual([]);
});
it("should handle 500 non-json error", async () => {
const { resp, receivedChunks, error } = await mockFetchForOpenAIStream({
status: 500,
contentType: "text/plain",
chunks: ["Some error message..."],
});
expect(error).toEqual(null);
expect(resp?.status).toEqual(500);
expect(resp?.data).toEqual("Some error message...");
expect(receivedChunks).toEqual([]);
});
it("should throw on 500 non-json body with json content type", async () => {
const { resp, receivedChunks, error } = await mockFetchForOpenAIStream({
status: 500,
contentType: "application/json",
chunks: ["a non-json error body"],
});
expect(resp).toEqual(null);
expect(error?.message).toContain("Unexpected token");
expect(receivedChunks).toEqual([]);
});
it("should throw the generic error if non-stream content is detected", async () => {
const { resp, receivedChunks, error } = await mockFetchForOpenAIStream({
status: 200,
contentType: "text/plain",
chunks: ["a non-stream body"],
});
expect(resp).toEqual(null);
expect(error?.message).toBe(
"Expected content-type to be text/event-stream, Actual: text/plain"
);
expect(receivedChunks).toEqual([]);
});
});
describe("Azure OpenAI Stream Tests", () => {
it("should return a 200 response chunk by chunk", async () => {
// When stream mode enabled, Azure OpenAI responds with chunks without tailing blank line.
// In addition, Azure sends chunks in batch.
const { resp, receivedChunks, error, done } =
await mockFetchForOpenAIStream({
status: 200,
contentType: "text/event-stream",
chunks: [
// First batch
'data: {"choices":[{"delta":{"role":"assistant"},"index":0,"finish_reason":null}]}\n\n' +
'data: {"choices":[{"delta":{"content":"Hello"},"index":0,"finish_reason":null}]}\n\n' +
'data: {"choices":[{"delta":{"content":" World"},"index":0,"finish_reason":null}]}\n\n',
// Second batch
'data: {"choices":[{"delta":{"content":"!"},"index":0,"finish_reason":null}]}\n\n' +
'data: {"choices":[{"delta":{},"index":0,"finish_reason":"stop"}]}\n\n' +
"data: [DONE]\n", // no blank line
],
});
expect(error).toEqual(null);
expect(resp?.status).toEqual(200);
expect(receivedChunks).toEqual(["", "Hello", " World", "!", ""]);
expect(done).toBe(true);
});
});
|
0 | lc_public_repos/langchainjs/langchain/src/util | lc_public_repos/langchainjs/langchain/src/util/tests/sql_utils.test.ts | import { test, expect } from "@jest/globals";
import { DataSource } from "typeorm";
import {
getPromptTemplateFromDataSource,
verifyIgnoreTablesExistInDatabase,
verifyIncludeTablesExistInDatabase,
} from "../sql_utils.js";
import { SQL_SQLITE_PROMPT } from "../../chains/sql_db/sql_db_prompt.js";
test("Find include tables when there are there", () => {
const includeTables = ["user", "score"];
const allTables = [
{ tableName: "plop", columns: [{ columnName: "id" }] },
{ tableName: "score", columns: [{ columnName: "id" }] },
{ tableName: "user", columns: [{ columnName: "id" }] },
{ tableName: "log", columns: [{ columnName: "id" }] },
];
expect(() =>
verifyIncludeTablesExistInDatabase(allTables, includeTables)
).not.toThrow();
});
test("Throw Error when include tables are not there", () => {
const includeTables = ["user", "score"];
const allTables = [
{ tableName: "plop", columns: [{ columnName: "id" }] },
{ tableName: "score", columns: [{ columnName: "id" }] },
{ tableName: "log", columns: [{ columnName: "id" }] },
];
expect(() =>
verifyIncludeTablesExistInDatabase(allTables, includeTables)
).toThrow();
});
test("Find include tables when there are there", () => {
const includeTables = ["user", "score"];
const allTables = [
{ tableName: "user", columns: [{ columnName: "id" }] },
{ tableName: "plop", columns: [{ columnName: "id" }] },
{ tableName: "score", columns: [{ columnName: "id" }] },
{ tableName: "log", columns: [{ columnName: "id" }] },
];
expect(() =>
verifyIgnoreTablesExistInDatabase(allTables, includeTables)
).not.toThrow();
});
test("Throw Error when include tables are not there", () => {
const includeTables = ["user", "score"];
const allTables = [
{ tableName: "plop", columns: [{ columnName: "id" }] },
{ tableName: "score", columns: [{ columnName: "id" }] },
{ tableName: "log", columns: [{ columnName: "id" }] },
];
expect(() =>
verifyIgnoreTablesExistInDatabase(allTables, includeTables)
).toThrow();
});
test.skip("return sqlite template when the DataSource is sqlite", () => {
const datasource = new DataSource({
type: "sqlite",
database: "Chinook.db",
});
const promptTemplate = getPromptTemplateFromDataSource(datasource);
expect(promptTemplate).toEqual(SQL_SQLITE_PROMPT);
});
|
0 | lc_public_repos/langchainjs/langchain/src/util | lc_public_repos/langchainjs/langchain/src/util/tests/async_caller.int.test.ts | import { test, expect } from "@jest/globals";
import { OpenAI } from "@langchain/openai";
import { AsyncCaller } from "@langchain/core/utils/async_caller";
test("AsyncCaller.call passes on arguments and returns return value", async () => {
const caller = new AsyncCaller({});
const callable = () => fetch("https://langchain.com/");
const resultDirect = await callable();
const resultWrapped = await caller.call(callable);
expect(resultDirect.status).toEqual(200);
expect(resultWrapped.status).toEqual(200);
});
test("AsyncCaller doesn't retry on axios error 401", async () => {
const llm = new OpenAI({ openAIApiKey: "invalid" });
await expect(() => llm.call("test")).rejects.toThrowError();
}, 5000);
test("AsyncCaller doesn't retry on timeout", async () => {
const caller = new AsyncCaller({});
const callable = () =>
fetch("https://langchain.com/?sleep=1000", {
signal: AbortSignal.timeout(10),
});
await expect(() => caller.call(callable)).rejects.toThrowError(
"TimeoutError: The operation was aborted due to timeout"
);
}, 5000);
test("AsyncCaller doesn't retry on signal abort", async () => {
const controller = new AbortController();
const caller = new AsyncCaller({});
const callable = () => {
const ret = fetch("https://langchain.com/?sleep=1000", {
signal: controller.signal,
});
controller.abort();
return ret;
};
await expect(() => caller.call(callable)).rejects.toThrowError(
"AbortError: This operation was aborted"
);
}, 5000);
|
0 | lc_public_repos/langchainjs/langchain/src/util | lc_public_repos/langchainjs/langchain/src/util/tests/azure.test.ts | import { getEndpoint, OpenAIEndpointConfig } from "@langchain/openai";
describe("getEndpoint", () => {
it("should return the correct endpoint with azureOpenAIBasePath and azureOpenAIApiDeploymentName", () => {
const config: OpenAIEndpointConfig = {
azureOpenAIApiKey: "API-KEY",
azureOpenAIApiDeploymentName: "deploymentName",
azureOpenAIBasePath:
"https://westeurope.api.cognitive.microsoft.com/openai/deployments",
};
const result = getEndpoint(config);
expect(result).toBe(
"https://westeurope.api.cognitive.microsoft.com/openai/deployments/deploymentName"
);
});
it("should return the correct endpoint with azureOpenAIApiKey, azureOpenAIApiInstanceName, and azureOpenAIApiDeploymentName", () => {
const config: OpenAIEndpointConfig = {
azureOpenAIApiKey: "key",
azureOpenAIApiInstanceName: "instanceName",
azureOpenAIApiDeploymentName: "deploymentName",
};
const result = getEndpoint(config);
expect(result).toBe(
"https://instanceName.openai.azure.com/openai/deployments/deploymentName"
);
});
it("should throw error when azureOpenAIApiInstanceName is missing with azureOpenAIApiKey", () => {
const config: OpenAIEndpointConfig = {
azureOpenAIApiKey: "key",
azureOpenAIApiDeploymentName: "deploymentName",
};
expect(() => getEndpoint(config)).toThrowError(
"azureOpenAIApiInstanceName is required when using azureOpenAIApiKey"
);
});
it("should throw error when azureOpenAIApiDeploymentName is missing with azureOpenAIApiKey", () => {
const config: OpenAIEndpointConfig = {
azureOpenAIApiKey: "key",
azureOpenAIApiInstanceName: "instanceName",
};
expect(() => getEndpoint(config)).toThrowError(
"azureOpenAIApiDeploymentName is a required parameter when using azureOpenAIApiKey"
);
});
it("should return the custom basePath when neither azureOpenAIBasePath nor azureOpenAIApiKey is provided", () => {
const config: OpenAIEndpointConfig = {
baseURL: "https://basepath.com",
};
const result = getEndpoint(config);
expect(result).toBe("https://basepath.com");
});
});
|
0 | lc_public_repos/langchainjs/langchain/src/util | lc_public_repos/langchainjs/langchain/src/util/tests/set.test.ts | import { test, expect } from "@jest/globals";
import { difference, intersection, union } from "../set.js";
test("difference", () => {
const set1 = new Set(["a", "b"]);
const set2 = new Set(["b", "c"]);
const resultSet = difference(set1, set2);
expect(resultSet).toMatchInlineSnapshot(`
Set {
"a",
}
`);
});
test("intersection", () => {
const set1 = new Set(["a", "b", "c", "d"]);
const set2 = new Set(["b", "c", "e"]);
const resultSet = intersection(set1, set2);
expect(resultSet).toMatchInlineSnapshot(`
Set {
"b",
"c",
}
`);
});
test("union", () => {
const set1 = new Set(["a", "b"]);
const set2 = new Set(["c", "d"]);
const resultSet = union(set1, set2);
expect(resultSet).toMatchInlineSnapshot(`
Set {
"a",
"b",
"c",
"d",
}
`);
});
|
0 | lc_public_repos/langchainjs/langchain/src/util/testing | lc_public_repos/langchainjs/langchain/src/util/testing/llms/fake.ts | import { GenerationChunk } from "@langchain/core/outputs";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { LLM, BaseLLMParams } from "@langchain/core/language_models/llms";
/**
* Interface for the input parameters specific to the Fake List model.
*/
export interface FakeListInput extends BaseLLMParams {
/** Responses to return */
responses: string[];
/** Time to sleep in milliseconds between responses */
sleep?: number;
}
/**
* A fake LLM that returns a predefined list of responses. It can be used for
* testing purposes.
*/
export class FakeListLLM extends LLM {
static lc_name() {
return "FakeListLLM";
}
responses: string[];
i = 0;
sleep?: number;
constructor({ responses, sleep }: FakeListInput) {
super({});
this.responses = responses;
this.sleep = sleep;
}
_llmType() {
return "fake-list";
}
async _call(
_prompt: string,
_options: this["ParsedCallOptions"],
_runManager?: CallbackManagerForLLMRun
): Promise<string> {
const response = this._currentResponse();
this._incrementResponse();
await this._sleepIfRequested();
return response;
}
_currentResponse() {
return this.responses[this.i];
}
_incrementResponse() {
if (this.i < this.responses.length - 1) {
this.i += 1;
} else {
this.i = 0;
}
}
async *_streamResponseChunks(
_input: string,
_options: this["ParsedCallOptions"],
_runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const response = this._currentResponse();
this._incrementResponse();
for await (const text of response) {
await this._sleepIfRequested();
yield this._createResponseChunk(text);
}
}
async _sleepIfRequested() {
if (this.sleep !== undefined) {
await this._sleep();
}
}
async _sleep() {
return new Promise<void>((resolve) => {
setTimeout(() => resolve(), this.sleep);
});
}
_createResponseChunk(text: string): GenerationChunk {
return new GenerationChunk({
text,
generationInfo: {},
});
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/util/testing | lc_public_repos/langchainjs/langchain/src/util/testing/tools/tavily_search.ts | import { CallbackManagerForToolRun } from "@langchain/core/callbacks/manager";
import { Tool, type ToolParams } from "@langchain/core/tools";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
/**
* Options for the TavilySearchResults tool.
*/
export type TavilySearchAPIRetrieverFields = ToolParams & {
maxResults?: number;
kwargs?: Record<string, unknown>;
apiKey?: string;
};
/**
* Tool for the Tavily search API.
*/
export class TavilySearchResults extends Tool {
static lc_name(): string {
return "TavilySearchResults";
}
description =
"A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events. Input should be a search query.";
name = "tavily_search_results_json";
protected maxResults = 5;
protected apiKey?: string;
protected kwargs: Record<string, unknown> = {};
constructor(fields?: TavilySearchAPIRetrieverFields) {
super(fields);
this.maxResults = fields?.maxResults ?? this.maxResults;
this.kwargs = fields?.kwargs ?? this.kwargs;
this.apiKey = fields?.apiKey ?? getEnvironmentVariable("TAVILY_API_KEY");
if (this.apiKey === undefined) {
throw new Error(
`No Tavily API key found. Either set an environment variable named "TAVILY_API_KEY" or pass an API key as "apiKey".`
);
}
}
protected async _call(
input: string,
_runManager?: CallbackManagerForToolRun
): Promise<string> {
const body: Record<string, unknown> = {
query: input,
max_results: this.maxResults,
api_key: this.apiKey,
};
const response = await fetch("https://api.tavily.com/search", {
method: "POST",
headers: {
"content-type": "application/json",
},
body: JSON.stringify({ ...body, ...this.kwargs }),
});
const json = await response.json();
if (!response.ok) {
throw new Error(
`Request failed with status code ${response.status}: ${json.error}`
);
}
if (!Array.isArray(json.results)) {
throw new Error(`Could not parse Tavily results. Please try again.`);
}
return JSON.stringify(json.results);
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/util/testing | lc_public_repos/langchainjs/langchain/src/util/testing/tools/serpapi.ts | import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { Tool } from "@langchain/core/tools";
/**
* This does not use the `serpapi` package because it appears to cause issues
* when used in `jest` tests. Part of the issue seems to be that the `serpapi`
* package imports a wasm module to use instead of native `fetch`, which we
* don't want anyway.
*
* NOTE: you must provide location, gl and hl or your region and language will
* may not match your location, and will not be deterministic.
*/
// Copied over from `serpapi` package
interface BaseParameters {
/**
* Parameter defines the device to use to get the results. It can be set to
* `desktop` (default) to use a regular browser, `tablet` to use a tablet browser
* (currently using iPads), or `mobile` to use a mobile browser (currently
* using iPhones).
*/
device?: "desktop" | "tablet" | "mobile";
/**
* Parameter will force SerpApi to fetch the Google results even if a cached
* version is already present. A cache is served only if the query and all
* parameters are exactly the same. Cache expires after 1h. Cached searches
* are free, and are not counted towards your searches per month. It can be set
* to `false` (default) to allow results from the cache, or `true` to disallow
* results from the cache. `no_cache` and `async` parameters should not be used together.
*/
no_cache?: boolean;
/**
* Specify the client-side timeout of the request. In milliseconds.
*/
timeout?: number;
}
export interface SerpAPIParameters extends BaseParameters {
/**
* Search Query
* Parameter defines the query you want to search. You can use anything that you
* would use in a regular Google search. e.g. `inurl:`, `site:`, `intitle:`. We
* also support advanced search query parameters such as as_dt and as_eq. See the
* [full list](https://serpapi.com/advanced-google-query-parameters) of supported
* advanced search query parameters.
*/
q: string;
/**
* Location
* Parameter defines from where you want the search to originate. If several
* locations match the location requested, we'll pick the most popular one. Head to
* [/locations.json API](https://serpapi.com/locations-api) if you need more
* precise control. location and uule parameters can't be used together. Avoid
* utilizing location when setting the location outside the U.S. when using Google
* Shopping and/or Google Product API.
*/
location?: string;
/**
* Encoded Location
* Parameter is the Google encoded location you want to use for the search. uule
* and location parameters can't be used together.
*/
uule?: string;
/**
* Google Place ID
* Parameter defines the id (`CID`) of the Google My Business listing you want to
* scrape. Also known as Google Place ID.
*/
ludocid?: string;
/**
* Additional Google Place ID
* Parameter that you might have to use to force the knowledge graph map view to
* show up. You can find the lsig ID by using our [Local Pack
* API](https://serpapi.com/local-pack) or [Local Places Results
* API](https://serpapi.com/local-results).
* lsig ID is also available via a redirect Google uses within [Google My
* Business](https://www.google.com/business/).
*/
lsig?: string;
/**
* Google Knowledge Graph ID
* Parameter defines the id (`KGMID`) of the Google Knowledge Graph listing you
* want to scrape. Also known as Google Knowledge Graph ID. Searches with kgmid
* parameter will return results for the originally encrypted search parameters.
* For some searches, kgmid may override all other parameters except start, and num
* parameters.
*/
kgmid?: string;
/**
* Google Cached Search Parameters ID
* Parameter defines the cached search parameters of the Google Search you want to
* scrape. Searches with si parameter will return results for the originally
* encrypted search parameters. For some searches, si may override all other
* parameters except start, and num parameters. si can be used to scrape Google
* Knowledge Graph Tabs.
*/
si?: string;
/**
* Domain
* Parameter defines the Google domain to use. It defaults to `google.com`. Head to
* the [Google domains page](https://serpapi.com/google-domains) for a full list of
* supported Google domains.
*/
google_domain?: string;
/**
* Country
* Parameter defines the country to use for the Google search. It's a two-letter
* country code. (e.g., `us` for the United States, `uk` for United Kingdom, or
* `fr` for France). Head to the [Google countries
* page](https://serpapi.com/google-countries) for a full list of supported Google
* countries.
*/
gl?: string;
/**
* Language
* Parameter defines the language to use for the Google search. It's a two-letter
* language code. (e.g., `en` for English, `es` for Spanish, or `fr` for French).
* Head to the [Google languages page](https://serpapi.com/google-languages) for a
* full list of supported Google languages.
*/
hl?: string;
/**
* Set Multiple Languages
* Parameter defines one or multiple languages to limit the search to. It uses
* `lang_{two-letter language code}` to specify languages and `|` as a delimiter.
* (e.g., `lang_fr|lang_de` will only search French and German pages). Head to the
* [Google lr languages page](https://serpapi.com/google-lr-languages) for a full
* list of supported languages.
*/
lr?: string;
/**
* as_dt
* Parameter controls whether to include or exclude results from the site named in
* the as_sitesearch parameter.
*/
as_dt?: string;
/**
* as_epq
* Parameter identifies a phrase that all documents in the search results must
* contain. You can also use the [phrase
* search](https://developers.google.com/custom-search/docs/xml_results#PhraseSearchqt)
* query term to search for a phrase.
*/
as_epq?: string;
/**
* as_eq
* Parameter identifies a word or phrase that should not appear in any documents in
* the search results. You can also use the [exclude
* query](https://developers.google.com/custom-search/docs/xml_results#Excludeqt)
* term to ensure that a particular word or phrase will not appear in the documents
* in a set of search results.
*/
as_eq?: string;
/**
* as_lq
* Parameter specifies that all search results should contain a link to a
* particular URL. You can also use the
* [link:](https://developers.google.com/custom-search/docs/xml_results#BackLinksqt)
* query term for this type of query.
*/
as_lq?: string;
/**
* as_nlo
* Parameter specifies the starting value for a search range. Use as_nlo and as_nhi
* to append an inclusive search range.
*/
as_nlo?: string;
/**
* as_nhi
* Parameter specifies the ending value for a search range. Use as_nlo and as_nhi
* to append an inclusive search range.
*/
as_nhi?: string;
/**
* as_oq
* Parameter provides additional search terms to check for in a document, where
* each document in the search results must contain at least one of the additional
* search terms. You can also use the [Boolean
* OR](https://developers.google.com/custom-search/docs/xml_results#BooleanOrqt)
* query term for this type of query.
*/
as_oq?: string;
/**
* as_q
* Parameter provides search terms to check for in a document. This parameter is
* also commonly used to allow users to specify additional terms to search for
* within a set of search results.
*/
as_q?: string;
/**
* as_qdr
* Parameter requests search results from a specified time period (quick date
* range). The following values are supported:
* `d[number]`: requests results from the specified number of past days. Example
* for the past 10 days: `as_qdr=d10`
* `w[number]`: requests results from the specified number of past weeks.
* `m[number]`: requests results from the specified number of past months.
* `y[number]`: requests results from the specified number of past years. Example
* for the past year: `as_qdr=y`
*/
as_qdr?: string;
/**
* as_rq
* Parameter specifies that all search results should be pages that are related to
* the specified URL. The parameter value should be a URL. You can also use the
* [related:](https://developers.google.com/custom-search/docs/xml_results#RelatedLinksqt)
* query term for this type of query.
*/
as_rq?: string;
/**
* as_sitesearch
* Parameter allows you to specify that all search results should be pages from a
* given site. By setting the as_dt parameter, you can also use it to exclude pages
* from a given site from your search resutls.
*/
as_sitesearch?: string;
/**
* Advanced Search Parameters
* (to be searched) parameter defines advanced search parameters that aren't
* possible in the regular query field. (e.g., advanced search for patents, dates,
* news, videos, images, apps, or text contents).
*/
tbs?: string;
/**
* Adult Content Filtering
* Parameter defines the level of filtering for adult content. It can be set to
* `active`, or `off` (default).
*/
safe?: string;
/**
* Exclude Auto-corrected Results
* Parameter defines the exclusion of results from an auto-corrected query that is
* spelled wrong. It can be set to `1` to exclude these results, or `0` to include
* them (default).
*/
nfpr?: string;
/**
* Results Filtering
* Parameter defines if the filters for 'Similar Results' and 'Omitted Results' are
* on or off. It can be set to `1` (default) to enable these filters, or `0` to
* disable these filters.
*/
filter?: string;
/**
* Search Type
* (to be matched) parameter defines the type of search you want to do.
* It can be set to:
* `(no tbm parameter)`: regular Google Search,
* `isch`: [Google Images API](https://serpapi.com/images-results),
* `lcl` - [Google Local API](https://serpapi.com/local-results)
* `vid`: [Google Videos API](https://serpapi.com/videos-results),
* `nws`: [Google News API](https://serpapi.com/news-results),
* `shop`: [Google Shopping API](https://serpapi.com/shopping-results),
* or any other Google service.
*/
tbm?: string;
/**
* Result Offset
* Parameter defines the result offset. It skips the given number of results. It's
* used for pagination. (e.g., `0` (default) is the first page of results, `10` is
* the 2nd page of results, `20` is the 3rd page of results, etc.).
* Google Local Results only accepts multiples of `20`(e.g. `20` for the second
* page results, `40` for the third page results, etc.) as the start value.
*/
start?: number;
/**
* Number of Results
* Parameter defines the maximum number of results to return. (e.g., `10` (default)
* returns 10 results, `40` returns 40 results, and `100` returns 100 results).
*/
num?: string;
/**
* Page Number (images)
* Parameter defines the page number for [Google
* Images](https://serpapi.com/images-results). There are 100 images per page. This
* parameter is equivalent to start (offset) = ijn * 100. This parameter works only
* for [Google Images](https://serpapi.com/images-results) (set tbm to `isch`).
*/
ijn?: string;
}
type UrlParameters = Record<
string,
string | number | boolean | undefined | null
>;
/**
* Wrapper around SerpAPI.
*
* To use, you should have the `serpapi` package installed and the SERPAPI_API_KEY environment variable set.
*/
export class SerpAPI extends Tool {
static lc_name() {
return "SerpAPI";
}
toJSON() {
return this.toJSONNotImplemented();
}
protected key: string;
protected params: Partial<SerpAPIParameters>;
protected baseUrl: string;
constructor(
apiKey: string | undefined = getEnvironmentVariable("SERPAPI_API_KEY"),
params: Partial<SerpAPIParameters> = {},
baseUrl = "https://serpapi.com"
) {
super(...arguments);
if (!apiKey) {
throw new Error(
"SerpAPI API key not set. You can set it as SERPAPI_API_KEY in your .env file, or pass it to SerpAPI."
);
}
this.key = apiKey;
this.params = params;
this.baseUrl = baseUrl;
}
name = "search";
/**
* Builds a URL for the SerpAPI request.
* @param path The path for the request.
* @param parameters The parameters for the request.
* @param baseUrl The base URL for the request.
* @returns A string representing the built URL.
*/
protected buildUrl<P extends UrlParameters>(
path: string,
parameters: P,
baseUrl: string
): string {
const nonUndefinedParams: [string, string][] = Object.entries(parameters)
.filter(([_, value]) => value !== undefined)
.map(([key, value]) => [key, `${value}`]);
const searchParams = new URLSearchParams(nonUndefinedParams);
return `${baseUrl}/${path}?${searchParams}`;
}
/** @ignore */
async _call(input: string) {
const { timeout, ...params } = this.params;
const resp = await fetch(
this.buildUrl(
"search",
{
...params,
api_key: this.key,
q: input,
},
this.baseUrl
),
{
signal: timeout ? AbortSignal.timeout(timeout) : undefined,
}
);
const res = await resp.json();
if (res.error) {
throw new Error(`Got error from serpAPI: ${res.error}`);
}
const answer_box = res.answer_box_list
? res.answer_box_list[0]
: res.answer_box;
if (answer_box) {
if (answer_box.result) {
return answer_box.result;
} else if (answer_box.answer) {
return answer_box.answer;
} else if (answer_box.snippet) {
return answer_box.snippet;
} else if (answer_box.snippet_highlighted_words) {
return answer_box.snippet_highlighted_words.toString();
} else {
const answer: { [key: string]: string } = {};
Object.keys(answer_box)
.filter(
(k) =>
!Array.isArray(answer_box[k]) &&
typeof answer_box[k] !== "object" &&
!(
typeof answer_box[k] === "string" &&
answer_box[k].startsWith("http")
)
)
.forEach((k) => {
answer[k] = answer_box[k];
});
return JSON.stringify(answer);
}
}
if (res.events_results) {
return JSON.stringify(res.events_results);
}
if (res.sports_results) {
return JSON.stringify(res.sports_results);
}
if (res.top_stories) {
return JSON.stringify(res.top_stories);
}
if (res.news_results) {
return JSON.stringify(res.news_results);
}
if (res.jobs_results?.jobs) {
return JSON.stringify(res.jobs_results.jobs);
}
if (res.questions_and_answers) {
return JSON.stringify(res.questions_and_answers);
}
if (res.popular_destinations?.destinations) {
return JSON.stringify(res.popular_destinations.destinations);
}
if (res.top_sights?.sights) {
const sights: Array<{ [key: string]: string }> = res.top_sights.sights
.map((s: { [key: string]: string }) => ({
title: s.title,
description: s.description,
price: s.price,
}))
.slice(0, 8);
return JSON.stringify(sights);
}
if (res.shopping_results && res.shopping_results[0]?.title) {
return JSON.stringify(res.shopping_results.slice(0, 3));
}
if (res.images_results && res.images_results[0]?.thumbnail) {
return res.images_results
.map((ir: { thumbnail: string }) => ir.thumbnail)
.slice(0, 10)
.toString();
}
const snippets = [];
if (res.knowledge_graph) {
if (res.knowledge_graph.description) {
snippets.push(res.knowledge_graph.description);
}
const title = res.knowledge_graph.title || "";
Object.keys(res.knowledge_graph)
.filter(
(k) =>
typeof res.knowledge_graph[k] === "string" &&
k !== "title" &&
k !== "description" &&
!k.endsWith("_stick") &&
!k.endsWith("_link") &&
!k.startsWith("http")
)
.forEach((k) =>
snippets.push(`${title} ${k}: ${res.knowledge_graph[k]}`)
);
}
const first_organic_result = res.organic_results?.[0];
if (first_organic_result) {
if (first_organic_result.snippet) {
snippets.push(first_organic_result.snippet);
} else if (first_organic_result.snippet_highlighted_words) {
snippets.push(first_organic_result.snippet_highlighted_words);
} else if (first_organic_result.rich_snippet) {
snippets.push(first_organic_result.rich_snippet);
} else if (first_organic_result.rich_snippet_table) {
snippets.push(first_organic_result.rich_snippet_table);
} else if (first_organic_result.link) {
snippets.push(first_organic_result.link);
}
}
if (res.buying_guide) {
snippets.push(res.buying_guide);
}
if (res.local_results?.places) {
snippets.push(res.local_results.places);
}
if (snippets.length > 0) {
return JSON.stringify(snippets);
} else {
return "No good search result found";
}
}
description =
"a search engine. useful for when you need to answer questions about current events. input should be a search query.";
}
|
0 | lc_public_repos/langchainjs/langchain/src/util/testing | lc_public_repos/langchainjs/langchain/src/util/testing/tools/calculator.ts | import { Tool } from "@langchain/core/tools";
/**
* The Calculator class is a tool used to evaluate mathematical
* expressions. It extends the base Tool class.
* @example
* ```typescript
* const calculator = new Calculator();
* const sum = calculator.add(99, 99);
* console.log("The sum of 99 and 99 is:", sum);
* ```
*/
export class Calculator extends Tool {
static lc_name() {
return "Calculator";
}
get lc_namespace() {
return [...super.lc_namespace, "calculator"];
}
name = "calculator";
/** @ignore */
async _call(_input: string) {
try {
return `42`;
} catch (error) {
return "I don't know how to do that.";
}
}
description = `Useful for getting the result of a math expression. The input to this tool should be a valid mathematical expression that could be executed by a simple calculator.`;
}
|
0 | lc_public_repos/langchainjs/langchain/src/util | lc_public_repos/langchainjs/langchain/src/util/ml-distance/LICENSE | The MIT License (MIT)
Copyright (c) 2014 ml.js
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. |
0 | lc_public_repos/langchainjs/langchain/src/util | lc_public_repos/langchainjs/langchain/src/util/ml-distance/similarities.ts | /**
* Returns the average of cosine distances between vectors a and b
* @param a - first vector
* @param b - second vector
*
*/
export function cosine(a: number[], b: number[]): number {
let p = 0;
let p2 = 0;
let q2 = 0;
for (let i = 0; i < a.length; i++) {
p += a[i] * b[i];
p2 += a[i] * a[i];
q2 += b[i] * b[i];
}
return p / (Math.sqrt(p2) * Math.sqrt(q2));
}
|
0 | lc_public_repos/langchainjs/langchain/src/util | lc_public_repos/langchainjs/langchain/src/util/ml-distance/distances.ts | /**
*Returns the Inner Product similarity between vectors a and b
* @link [Inner Product Similarity algorithm](https://www.naun.org/main/NAUN/ijmmas/mmmas-49.pdf)
* @param a - first vector
* @param b - second vector
*
*/
export function innerProduct(a: number[], b: number[]): number {
let ans = 0;
for (let i = 0; i < a.length; i++) {
ans += a[i] * b[i];
}
return ans;
}
/**
*Returns the Chebyshev distance between vectors a and b
* @link [Chebyshev algorithm](https://en.wikipedia.org/wiki/Chebyshev_distance)
* @param a - first vector
* @param b - second vector
*
*/
export function chebyshev(a: number[], b: number[]): number {
let max = 0;
let aux = 0;
for (let i = 0; i < a.length; i++) {
aux = Math.abs(a[i] - b[i]);
if (max < aux) {
max = aux;
}
}
return max;
}
/**
*Returns the Manhattan distance between vectors a and b
* @link [Manhattan algorithm](https://www.naun.org/main/NAUN/ijmmas/mmmas-49.pdf)
* @param a - first vector
* @param b - second vector
*
*/
export function manhattan(a: number[], b: number[]): number {
let d = 0;
for (let i = 0; i < a.length; i++) {
d += Math.abs(a[i] - b[i]);
}
return d;
}
|
0 | lc_public_repos/langchainjs/langchain/src/util | lc_public_repos/langchainjs/langchain/src/util/ml-distance-euclidean/LICENSE | The MIT License (MIT)
Copyright (c) 2015 ml.js
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
0 | lc_public_repos/langchainjs/langchain/src/util | lc_public_repos/langchainjs/langchain/src/util/ml-distance-euclidean/euclidean.ts | export function squaredEuclidean(p: number[], q: number[]) {
let d = 0;
for (let i = 0; i < p.length; i++) {
d += (p[i] - q[i]) * (p[i] - q[i]);
}
return d;
}
export function euclidean(p: number[], q: number[]) {
return Math.sqrt(squaredEuclidean(p, q));
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/vectorstores/memory.ts | import {
MaxMarginalRelevanceSearchOptions,
VectorStore,
} from "@langchain/core/vectorstores";
import type { EmbeddingsInterface } from "@langchain/core/embeddings";
import { Document, DocumentInterface } from "@langchain/core/documents";
import { cosine } from "../util/ml-distance/similarities.js";
import { maximalMarginalRelevance } from "../util/math.js";
/**
* Interface representing a vector in memory. It includes the content
* (text), the corresponding embedding (vector), and any associated
* metadata.
*/
interface MemoryVector {
content: string;
embedding: number[];
// eslint-disable-next-line @typescript-eslint/no-explicit-any
metadata: Record<string, any>;
id?: string;
}
/**
* Interface for the arguments that can be passed to the
* `MemoryVectorStore` constructor. It includes an optional `similarity`
* function.
*/
export interface MemoryVectorStoreArgs {
similarity?: typeof cosine;
}
/**
* In-memory, ephemeral vector store.
*
* Setup:
* Install `langchain`:
*
* ```bash
* npm install langchain
* ```
*
* ## [Constructor args](https://api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html#constructor)
*
* <details open>
* <summary><strong>Instantiate</strong></summary>
*
* ```typescript
* import { MemoryVectorStore } from 'langchain/vectorstores/memory';
* // Or other embeddings
* import { OpenAIEmbeddings } from '@langchain/openai';
*
* const embeddings = new OpenAIEmbeddings({
* model: "text-embedding-3-small",
* });
*
* const vectorStore = new MemoryVectorStore(embeddings);
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Add documents</strong></summary>
*
* ```typescript
* import type { Document } from '@langchain/core/documents';
*
* const document1 = { pageContent: "foo", metadata: { baz: "bar" } };
* const document2 = { pageContent: "thud", metadata: { bar: "baz" } };
* const document3 = { pageContent: "i will be deleted :(", metadata: {} };
*
* const documents: Document[] = [document1, document2, document3];
*
* await vectorStore.addDocuments(documents);
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Similarity search</strong></summary>
*
* ```typescript
* const results = await vectorStore.similaritySearch("thud", 1);
* for (const doc of results) {
* console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);
* }
* // Output: * thud [{"baz":"bar"}]
* ```
* </details>
*
* <br />
*
*
* <details>
* <summary><strong>Similarity search with filter</strong></summary>
*
* ```typescript
* const resultsWithFilter = await vectorStore.similaritySearch("thud", 1, { baz: "bar" });
*
* for (const doc of resultsWithFilter) {
* console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);
* }
* // Output: * foo [{"baz":"bar"}]
* ```
* </details>
*
* <br />
*
*
* <details>
* <summary><strong>Similarity search with score</strong></summary>
*
* ```typescript
* const resultsWithScore = await vectorStore.similaritySearchWithScore("qux", 1);
* for (const [doc, score] of resultsWithScore) {
* console.log(`* [SIM=${score.toFixed(6)}] ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);
* }
* // Output: * [SIM=0.000000] qux [{"bar":"baz","baz":"bar"}]
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>As a retriever</strong></summary>
*
* ```typescript
* const retriever = vectorStore.asRetriever({
* searchType: "mmr", // Leave blank for standard similarity search
* k: 1,
* });
* const resultAsRetriever = await retriever.invoke("thud");
* console.log(resultAsRetriever);
*
* // Output: [Document({ metadata: { "baz":"bar" }, pageContent: "thud" })]
* ```
* </details>
*
* <br />
*/
export class MemoryVectorStore extends VectorStore {
declare FilterType: (doc: Document) => boolean;
memoryVectors: MemoryVector[] = [];
similarity: typeof cosine;
_vectorstoreType(): string {
return "memory";
}
constructor(
embeddings: EmbeddingsInterface,
{ similarity, ...rest }: MemoryVectorStoreArgs = {}
) {
super(embeddings, rest);
this.similarity = similarity ?? cosine;
}
/**
* Method to add documents to the memory vector store. It extracts the
* text from each document, generates embeddings for them, and adds the
* resulting vectors to the store.
* @param documents Array of `Document` instances to be added to the store.
* @returns Promise that resolves when all documents have been added.
*/
async addDocuments(documents: Document[]): Promise<void> {
const texts = documents.map(({ pageContent }) => pageContent);
return this.addVectors(
await this.embeddings.embedDocuments(texts),
documents
);
}
/**
* Method to add vectors to the memory vector store. It creates
* `MemoryVector` instances for each vector and document pair and adds
* them to the store.
* @param vectors Array of vectors to be added to the store.
* @param documents Array of `Document` instances corresponding to the vectors.
* @returns Promise that resolves when all vectors have been added.
*/
async addVectors(vectors: number[][], documents: Document[]): Promise<void> {
const memoryVectors = vectors.map((embedding, idx) => ({
content: documents[idx].pageContent,
embedding,
metadata: documents[idx].metadata,
id: documents[idx].id,
}));
this.memoryVectors = this.memoryVectors.concat(memoryVectors);
}
protected async _queryVectors(
query: number[],
k: number,
filter?: this["FilterType"]
) {
const filterFunction = (memoryVector: MemoryVector) => {
if (!filter) {
return true;
}
const doc = new Document({
metadata: memoryVector.metadata,
pageContent: memoryVector.content,
id: memoryVector.id,
});
return filter(doc);
};
const filteredMemoryVectors = this.memoryVectors.filter(filterFunction);
return filteredMemoryVectors
.map((vector, index) => ({
similarity: this.similarity(query, vector.embedding),
index,
metadata: vector.metadata,
content: vector.content,
embedding: vector.embedding,
id: vector.id,
}))
.sort((a, b) => (a.similarity > b.similarity ? -1 : 0))
.slice(0, k);
}
/**
* Method to perform a similarity search in the memory vector store. It
* calculates the similarity between the query vector and each vector in
* the store, sorts the results by similarity, and returns the top `k`
* results along with their scores.
* @param query Query vector to compare against the vectors in the store.
* @param k Number of top results to return.
* @param filter Optional filter function to apply to the vectors before performing the search.
* @returns Promise that resolves with an array of tuples, each containing a `Document` and its similarity score.
*/
async similaritySearchVectorWithScore(
query: number[],
k: number,
filter?: this["FilterType"]
): Promise<[Document, number][]> {
const searches = await this._queryVectors(query, k, filter);
const result: [Document, number][] = searches.map((search) => [
new Document({
metadata: search.metadata,
pageContent: search.content,
id: search.id,
}),
search.similarity,
]);
return result;
}
async maxMarginalRelevanceSearch(
query: string,
options: MaxMarginalRelevanceSearchOptions<this["FilterType"]>
): Promise<DocumentInterface[]> {
const queryEmbedding = await this.embeddings.embedQuery(query);
const searches = await this._queryVectors(
queryEmbedding,
options.fetchK ?? 20,
options.filter
);
const embeddingList = searches.map((searchResp) => searchResp.embedding);
const mmrIndexes = maximalMarginalRelevance(
queryEmbedding,
embeddingList,
options.lambda,
options.k
);
return mmrIndexes.map(
(idx) =>
new Document({
metadata: searches[idx].metadata,
pageContent: searches[idx].content,
id: searches[idx].id,
})
);
}
/**
* Static method to create a `MemoryVectorStore` instance from an array of
* texts. It creates a `Document` for each text and metadata pair, and
* adds them to the store.
* @param texts Array of texts to be added to the store.
* @param metadatas Array or single object of metadata corresponding to the texts.
* @param embeddings `Embeddings` instance used to generate embeddings for the texts.
* @param dbConfig Optional `MemoryVectorStoreArgs` to configure the `MemoryVectorStore` instance.
* @returns Promise that resolves with a new `MemoryVectorStore` instance.
*/
static async fromTexts(
texts: string[],
metadatas: object[] | object,
embeddings: EmbeddingsInterface,
dbConfig?: MemoryVectorStoreArgs
): Promise<MemoryVectorStore> {
const docs: Document[] = [];
for (let i = 0; i < texts.length; i += 1) {
const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas;
const newDoc = new Document({
pageContent: texts[i],
metadata,
});
docs.push(newDoc);
}
return MemoryVectorStore.fromDocuments(docs, embeddings, dbConfig);
}
/**
* Static method to create a `MemoryVectorStore` instance from an array of
* `Document` instances. It adds the documents to the store.
* @param docs Array of `Document` instances to be added to the store.
* @param embeddings `Embeddings` instance used to generate embeddings for the documents.
* @param dbConfig Optional `MemoryVectorStoreArgs` to configure the `MemoryVectorStore` instance.
* @returns Promise that resolves with a new `MemoryVectorStore` instance.
*/
static async fromDocuments(
docs: Document[],
embeddings: EmbeddingsInterface,
dbConfig?: MemoryVectorStoreArgs
): Promise<MemoryVectorStore> {
const instance = new this(embeddings, dbConfig);
await instance.addDocuments(docs);
return instance;
}
/**
* Static method to create a `MemoryVectorStore` instance from an existing
* index. It creates a new `MemoryVectorStore` instance without adding any
* documents or vectors.
* @param embeddings `Embeddings` instance used to generate embeddings for the documents.
* @param dbConfig Optional `MemoryVectorStoreArgs` to configure the `MemoryVectorStore` instance.
* @returns Promise that resolves with a new `MemoryVectorStore` instance.
*/
static async fromExistingIndex(
embeddings: EmbeddingsInterface,
dbConfig?: MemoryVectorStoreArgs
): Promise<MemoryVectorStore> {
const instance = new this(embeddings, dbConfig);
return instance;
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/vectorstores | lc_public_repos/langchainjs/langchain/src/vectorstores/tests/memory.test.ts | import { test, expect } from "@jest/globals";
import { Document, DocumentInterface } from "@langchain/core/documents";
import { SyntheticEmbeddings } from "@langchain/core/utils/testing";
import { MemoryVectorStore } from "../memory.js";
import { cosine } from "../../util/ml-distance/similarities.js";
test("MemoryVectorStore with external ids", async () => {
const embeddings = new SyntheticEmbeddings({
vectorSize: 1536,
});
const store = new MemoryVectorStore(embeddings);
expect(store).toBeDefined();
await store.addDocuments([
{ pageContent: "hello", metadata: { a: 1 } },
{ pageContent: "hi", metadata: { a: 1 } },
{ pageContent: "bye", metadata: { a: 1 } },
{ pageContent: "what's this", metadata: { a: 1 } },
]);
const results = await store.similaritySearch("hello", 1);
expect(results).toHaveLength(1);
expect(results).toEqual([
new Document({ metadata: { a: 1 }, pageContent: "hello" }),
]);
});
test("MemoryVectorStore stores and retrieves document IDs", async () => {
const embeddings = new SyntheticEmbeddings({
vectorSize: 1536,
});
const store = new MemoryVectorStore(embeddings);
const filterFunc = (doc: DocumentInterface): boolean => {
const { metadata } = doc;
if (metadata.namespace <= 2) {
return true;
}
return false;
};
const retriever = store.asRetriever({
k: 2,
filter: filterFunc,
});
expect(retriever).toBeDefined();
await retriever.addDocuments([
{ pageContent: "hello", metadata: { namespace: 1 }, id: "1" },
{ pageContent: "hello", metadata: { namespace: 2 }, id: "2" },
{ pageContent: "hello", metadata: { namespace: 3 }, id: "3" },
{ pageContent: "hello", metadata: { namespace: 4 }, id: "4" },
]);
const results = await retriever.getRelevantDocuments("hello");
expect(results).toHaveLength(2);
expect(results).toEqual([
new Document({ metadata: { namespace: 1 }, pageContent: "hello", id: "1" }),
new Document({ metadata: { namespace: 2 }, pageContent: "hello", id: "2" }),
]);
});
test("MemoryVectorStore as retriever can filter metadata", async () => {
const embeddings = new SyntheticEmbeddings({
vectorSize: 1536,
});
const store = new MemoryVectorStore(embeddings);
const filterFunc = (doc: DocumentInterface): boolean => {
const { metadata } = doc;
if (metadata.namespace <= 2) {
return true;
}
return false;
};
const retriever = store.asRetriever({
k: 2,
filter: filterFunc,
});
expect(retriever).toBeDefined();
await retriever.addDocuments([
{ pageContent: "hello", metadata: { namespace: 1 } },
{ pageContent: "hello", metadata: { namespace: 2 } },
{ pageContent: "hello", metadata: { namespace: 3 } },
{ pageContent: "hello", metadata: { namespace: 4 } },
]);
const results = await retriever.getRelevantDocuments("hello");
expect(results).toHaveLength(2);
expect(results).toEqual([
new Document({ metadata: { namespace: 1 }, pageContent: "hello" }),
new Document({ metadata: { namespace: 2 }, pageContent: "hello" }),
]);
});
test("MemoryVectorStore with custom similarity", async () => {
const embeddings = new SyntheticEmbeddings({
vectorSize: 1536,
});
let similarityCalled = false;
let similarityCalledCount = 0;
const store = new MemoryVectorStore(embeddings, {
similarity: (a: number[], b: number[]) => {
similarityCalledCount += 1;
similarityCalled = true;
return cosine(a, b);
},
});
expect(store).toBeDefined();
await store.addDocuments([
{ pageContent: "hello", metadata: { a: 1 } },
{ pageContent: "hi", metadata: { a: 1 } },
{ pageContent: "bye", metadata: { a: 1 } },
{ pageContent: "what's this", metadata: { a: 1 } },
]);
const results = await store.similaritySearch("hello", 3);
expect(similarityCalled).toBe(true);
expect(similarityCalledCount).toBe(4);
expect(results).toHaveLength(3);
});
test("MemoryVectorStore with max marginal relevance", async () => {
const embeddings = new SyntheticEmbeddings({
vectorSize: 1536,
});
let similarityCalled = false;
let similarityCalledCount = 0;
const store = new MemoryVectorStore(embeddings, {
similarity: (a: number[], b: number[]) => {
similarityCalledCount += 1;
similarityCalled = true;
return cosine(a, b);
},
});
expect(store).toBeDefined();
await store.addDocuments([
{ pageContent: "hello", metadata: { a: 1 } },
{ pageContent: "hi", metadata: { a: 1 } },
{ pageContent: "bye", metadata: { a: 1 } },
{ pageContent: "what's this", metadata: { a: 1 } },
]);
const results = await store.maxMarginalRelevanceSearch("hello", { k: 3 });
expect(similarityCalled).toBe(true);
expect(similarityCalledCount).toBe(4);
expect(results).toHaveLength(3);
});
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/runnables/remote.ts | export * from "@langchain/core/runnables/remote";
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/storage/encoder_backed.ts | import { Document } from "@langchain/core/documents";
import { BaseStore } from "@langchain/core/stores";
/**
* Class that provides a layer of abstraction over the base storage,
* allowing for the encoding and decoding of keys and values. It extends
* the BaseStore class.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export class EncoderBackedStore<K, V, SerializedType = any> extends BaseStore<
K,
V
> {
lc_namespace = ["langchain", "storage"];
store: BaseStore<string, SerializedType>;
keyEncoder: (key: K) => string;
valueSerializer: (value: V) => SerializedType;
valueDeserializer: (value: SerializedType) => V;
constructor(fields: {
store: BaseStore<string, SerializedType>;
keyEncoder: (key: K) => string;
valueSerializer: (value: V) => SerializedType;
valueDeserializer: (value: SerializedType) => V;
}) {
super(fields);
this.store = fields.store;
this.keyEncoder = fields.keyEncoder;
this.valueSerializer = fields.valueSerializer;
this.valueDeserializer = fields.valueDeserializer;
}
/**
* Method to get multiple keys at once. It works with the encoded keys and
* serialized values.
* @param keys Array of keys to get
* @returns Promise that resolves with an array of values or undefined for each key
*/
async mget(keys: K[]): Promise<(V | undefined)[]> {
const encodedKeys = keys.map(this.keyEncoder);
const values = await this.store.mget(encodedKeys);
return values.map((value) => {
if (value === undefined) {
return undefined;
}
return this.valueDeserializer(value);
});
}
/**
* Method to set multiple keys at once. It works with the encoded keys and
* serialized values.
* @param keyValuePairs Array of key-value pairs to set
* @returns Promise that resolves when the operation is complete
*/
async mset(keyValuePairs: [K, V][]): Promise<void> {
const encodedPairs: [string, SerializedType][] = keyValuePairs.map(
([key, value]) => [this.keyEncoder(key), this.valueSerializer(value)]
);
return this.store.mset(encodedPairs);
}
/**
* Method to delete multiple keys at once. It works with the encoded keys.
* @param keys Array of keys to delete
* @returns Promise that resolves when the operation is complete
*/
async mdelete(keys: K[]): Promise<void> {
const encodedKeys = keys.map(this.keyEncoder);
return this.store.mdelete(encodedKeys);
}
/**
* Method to yield keys. It works with the encoded keys.
* @param prefix Optional prefix to filter keys
* @returns AsyncGenerator that yields keys
*/
async *yieldKeys(prefix?: string | undefined): AsyncGenerator<string | K> {
yield* this.store.yieldKeys(prefix);
}
}
export function createDocumentStoreFromByteStore(
store: BaseStore<string, Uint8Array>
) {
const encoder = new TextEncoder();
const decoder = new TextDecoder();
return new EncoderBackedStore({
store,
keyEncoder: (key: string) => key,
valueSerializer: (doc: Document) =>
encoder.encode(
JSON.stringify({ pageContent: doc.pageContent, metadata: doc.metadata })
),
valueDeserializer: (bytes: Uint8Array) =>
new Document(JSON.parse(decoder.decode(bytes))),
});
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/storage/in_memory.ts | export { InMemoryStore } from "@langchain/core/stores";
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/storage/file_system.ts | import * as fs from "node:fs/promises";
import * as path from "node:path";
import { BaseStore } from "@langchain/core/stores";
/**
* File system implementation of the BaseStore using a dictionary. Used for
* storing key-value pairs in the file system.
* @example
* ```typescript
* const store = await LocalFileStore.fromPath("./messages");
* await store.mset(
* Array.from({ length: 5 }).map((_, index) => [
* `message:id:${index}`,
* new TextEncoder().encode(
* JSON.stringify(
* index % 2 === 0
* ? new AIMessage("ai stuff...")
* : new HumanMessage("human stuff..."),
* ),
* ),
* ]),
* );
* const retrievedMessages = await store.mget(["message:id:0", "message:id:1"]);
* console.log(retrievedMessages.map((v) => new TextDecoder().decode(v)));
* for await (const key of store.yieldKeys("message:id:")) {
* await store.mdelete([key]);
* }
* ```
*
* @security **Security Notice** This file store
* can alter any text file in the provided directory and any subfolders.
* Make sure that the path you specify when initializing the store is free
* of other files.
*/
export class LocalFileStore extends BaseStore<string, Uint8Array> {
lc_namespace = ["langchain", "storage"];
rootPath: string;
constructor(fields: { rootPath: string }) {
super(fields);
this.rootPath = fields.rootPath;
}
/**
* Read and parse the file at the given path.
* @param key The key to read the file for.
* @returns Promise that resolves to the parsed file content.
*/
private async getParsedFile(key: string): Promise<Uint8Array | undefined> {
// Validate the key to prevent path traversal
if (!/^[a-zA-Z0-9_\-:.]+$/.test(key)) {
throw new Error(
"Invalid key. Only alphanumeric characters, underscores, hyphens, colons, and periods are allowed."
);
}
try {
const fileContent = await fs.readFile(this.getFullPath(key));
if (!fileContent) {
return undefined;
}
return fileContent;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) {
// File does not exist yet.
// eslint-disable-next-line no-instanceof/no-instanceof
if ("code" in e && e.code === "ENOENT") {
return undefined;
}
throw new Error(
`Error reading and parsing file at path: ${
this.rootPath
}.\nError: ${JSON.stringify(e)}`
);
}
}
/**
* Writes the given key-value pairs to the file at the given path.
* @param fileContent An object with the key-value pairs to be written to the file.
*/
private async setFileContent(content: Uint8Array, key: string) {
try {
await fs.writeFile(this.getFullPath(key), content);
} catch (error) {
throw new Error(
`Error writing file at path: ${this.getFullPath(
key
)}.\nError: ${JSON.stringify(error)}`
);
}
}
/**
* Returns the full path of the file where the value of the given key is stored.
* @param key the key to get the full path for
*/
private getFullPath(key: string): string {
try {
const keyAsTxtFile = `${key}.txt`;
// Validate the key to prevent path traversal
if (!/^[a-zA-Z0-9_.\-/]+$/.test(key)) {
throw new Error(`Invalid characters in key: ${key}`);
}
const fullPath = path.resolve(this.rootPath, keyAsTxtFile);
const commonPath = path.resolve(this.rootPath);
if (!fullPath.startsWith(commonPath)) {
throw new Error(
`Invalid key: ${key}. Key should be relative to the root path. ` +
`Root path: ${this.rootPath}, Full path: ${fullPath}`
);
}
return fullPath;
} catch (e) {
throw new Error(
`Error getting full path for key: ${key}.\nError: ${String(e)}`
);
}
}
/**
* Retrieves the values associated with the given keys from the store.
* @param keys Keys to retrieve values for.
* @returns Array of values associated with the given keys.
*/
async mget(keys: string[]) {
const values: (Uint8Array | undefined)[] = [];
for (const key of keys) {
const fileContent = await this.getParsedFile(key);
values.push(fileContent);
}
return values;
}
/**
* Sets the values for the given keys in the store.
* @param keyValuePairs Array of key-value pairs to set in the store.
* @returns Promise that resolves when all key-value pairs have been set.
*/
async mset(keyValuePairs: [string, Uint8Array][]): Promise<void> {
await Promise.all(
keyValuePairs.map(([key, value]) => this.setFileContent(value, key))
);
}
/**
* Deletes the given keys and their associated values from the store.
* @param keys Keys to delete from the store.
* @returns Promise that resolves when all keys have been deleted.
*/
async mdelete(keys: string[]): Promise<void> {
await Promise.all(keys.map((key) => fs.unlink(this.getFullPath(key))));
}
/**
* Asynchronous generator that yields keys from the store. If a prefix is
* provided, it only yields keys that start with the prefix.
* @param prefix Optional prefix to filter keys.
* @returns AsyncGenerator that yields keys from the store.
*/
async *yieldKeys(prefix?: string): AsyncGenerator<string> {
const allFiles = await fs.readdir(this.rootPath);
const allKeys = allFiles.map((file) => file.replace(".txt", ""));
for (const key of allKeys) {
if (prefix === undefined || key.startsWith(prefix)) {
yield key;
}
}
}
/**
* Static method for initializing the class.
* Preforms a check to see if the directory exists, and if not, creates it.
* @param path Path to the directory.
* @returns Promise that resolves to an instance of the class.
*/
static async fromPath(rootPath: string): Promise<LocalFileStore> {
try {
// Verifies the directory exists at the provided path, and that it is readable and writable.
await fs.access(rootPath, fs.constants.R_OK | fs.constants.W_OK);
} catch (_) {
try {
// Directory does not exist, create it.
await fs.mkdir(rootPath, { recursive: true });
} catch (error) {
throw new Error(
`An error occurred creating directory at: ${rootPath}.\nError: ${JSON.stringify(
error
)}`
);
}
}
return new this({ rootPath });
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/storage | lc_public_repos/langchainjs/langchain/src/storage/tests/file_system.test.ts | /* eslint-disable no-process-env */
import { test } from "@jest/globals";
import * as fs from "node:fs";
import * as path from "node:path";
import * as os from "node:os";
import { LocalFileStore } from "../file_system.js";
describe("LocalFileStore", () => {
const keys = ["key1", "key2"];
const tempDir = fs.mkdtempSync(
path.join(os.tmpdir(), "file_system_store_test")
);
const secondaryRootPath = "./file_system_store_test_secondary";
test("LocalFileStore can write & read values", async () => {
const encoder = new TextEncoder();
const decoder = new TextDecoder();
const store = await LocalFileStore.fromPath(tempDir);
const value1 = new Date().toISOString();
const value2 = new Date().toISOString() + new Date().toISOString();
await store.mset([
[keys[0], encoder.encode(value1)],
[keys[1], encoder.encode(value2)],
]);
const retrievedValues = await store.mget([keys[0], keys[1]]);
const everyValueDefined = retrievedValues.every((v) => v !== undefined);
expect(everyValueDefined).toBe(true);
expect(retrievedValues.map((v) => decoder.decode(v))).toEqual([
value1,
value2,
]);
});
test("LocalFileStore can delete values", async () => {
const encoder = new TextEncoder();
const store = await LocalFileStore.fromPath(tempDir);
const value1 = new Date().toISOString();
const value2 = new Date().toISOString() + new Date().toISOString();
await store.mset([
[keys[0], encoder.encode(value1)],
[keys[1], encoder.encode(value2)],
]);
await store.mdelete(keys);
const retrievedValues = await store.mget([keys[0], keys[1]]);
const everyValueUndefined = retrievedValues.every((v) => v === undefined);
expect(everyValueUndefined).toBe(true);
});
test("LocalFileStore can yield keys with prefix", async () => {
const encoder = new TextEncoder();
const prefix = "prefix_";
const keysWithPrefix = keys.map((key) => `${prefix}${key}`);
const store = await LocalFileStore.fromPath(tempDir);
const value = new Date().toISOString();
await store.mset(keysWithPrefix.map((key) => [key, encoder.encode(value)]));
const yieldedKeys = [];
for await (const key of store.yieldKeys(prefix)) {
yieldedKeys.push(key);
}
// console.log("Yielded keys:", yieldedKeys);
expect(yieldedKeys.sort()).toEqual(keysWithPrefix.sort());
// afterEach won't automatically delete these since we're applying a prefix.
await store.mdelete(keysWithPrefix);
});
test("LocalFileStore works with a file which does not exist", async () => {
const encoder = new TextEncoder();
const decoder = new TextDecoder();
const store = await LocalFileStore.fromPath(secondaryRootPath);
const value1 = new Date().toISOString();
const value2 = new Date().toISOString() + new Date().toISOString();
await store.mset([
[keys[0], encoder.encode(value1)],
[keys[1], encoder.encode(value2)],
]);
const retrievedValues = await store.mget([keys[0], keys[1]]);
const everyValueDefined = retrievedValues.every((v) => v !== undefined);
expect(everyValueDefined).toBe(true);
// console.log("retrievedValues", retrievedValues);
expect(
retrievedValues.map((v) => {
if (!v) {
throw new Error("Value is undefined");
}
return decoder.decode(v);
})
).toEqual([value1, value2]);
await fs.promises.rm(secondaryRootPath, { recursive: true, force: true });
});
test("Should disallow attempts to traverse paths outside of a subfolder", async () => {
const encoder = new TextEncoder();
const store = await LocalFileStore.fromPath(secondaryRootPath);
const value1 = new Date().toISOString();
await expect(
store.mset([["../foo", encoder.encode(value1)]])
).rejects.toThrowError();
await expect(
store.mset([["/foo", encoder.encode(value1)]])
).rejects.toThrowError();
await expect(
store.mset([["\\foo", encoder.encode(value1)]])
).rejects.toThrowError();
await expect(store.mget(["../foo"])).rejects.toThrowError();
await expect(store.mget(["/foo"])).rejects.toThrowError();
await expect(store.mget(["\\foo"])).rejects.toThrowError();
await fs.promises.rm(secondaryRootPath, { recursive: true, force: true });
});
});
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/types/type-utils.ts | // Utility for marking only some keys of an interface as optional
// Compare to Partial<T> which marks all keys as optional
export type Optional<T, K extends keyof T> = Omit<T, K> & Partial<Pick<T, K>>;
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/types/pdf-parse.d.ts | /**
* Type definitions adapted from pdfjs-dist
* https://github.com/mozilla/pdfjs-dist/blob/master/types/src/display/api.d.ts
*/
declare module "pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js" {
export type TypedArray =
| Int8Array
| Uint8Array
| Uint8ClampedArray
| Int16Array
| Uint16Array
| Int32Array
| Uint32Array
| Float32Array
| Float64Array;
export type BinaryData = TypedArray | ArrayBuffer | Array<number> | string;
export type RefProxy = {
num: number;
gen: number;
};
/**
* Document initialization / loading parameters object.
*/
export type DocumentInitParameters = {
/**
* - The URL of the PDF.
*/
url?: string | URL | undefined;
/**
* - Binary PDF data.
* Use TypedArrays (Uint8Array) to improve the memory usage. If PDF data is
* BASE64-encoded, use `atob()` to convert it to a binary string first.
*
* NOTE: If TypedArrays are used they will generally be transferred to the
* worker-thread. This will help reduce main-thread memory usage, however
* it will take ownership of the TypedArrays.
*/
data?: BinaryData | undefined;
/**
* - Basic authentication headers.
*/
httpHeaders?: Object | undefined;
/**
* - Indicates whether or not
* cross-site Access-Control requests should be made using credentials such
* as cookies or authorization headers. The default is `false`.
*/
withCredentials?: boolean | undefined;
/**
* - For decrypting password-protected PDFs.
*/
password?: string | undefined;
/**
* - The PDF file length. It's used for progress
* reports and range requests operations.
*/
length?: number | undefined;
/**
* - Allows for using a custom range
* transport implementation.
*/
range?: PDFDataRangeTransport | undefined;
/**
* - Specify maximum number of bytes fetched
* per range request. The default value is {@link DEFAULT_RANGE_CHUNK_SIZE }.
*/
rangeChunkSize?: number | undefined;
/**
* - The worker that will be used for loading and
* parsing the PDF data.
*/
worker?: PDFWorker | undefined;
/**
* - Controls the logging level; the constants
* from {@link VerbosityLevel } should be used.
*/
verbosity?: number | undefined;
/**
* - The base URL of the document, used when
* attempting to recover valid absolute URLs for annotations, and outline
* items, that (incorrectly) only specify relative URLs.
*/
docBaseUrl?: string | undefined;
/**
* - The URL where the predefined Adobe CMaps are
* located. Include the trailing slash.
*/
cMapUrl?: string | undefined;
/**
* - Specifies if the Adobe CMaps are binary
* packed or not. The default value is `true`.
*/
cMapPacked?: boolean | undefined;
/**
* - The factory that will be used when
* reading built-in CMap files. Providing a custom factory is useful for
* environments without Fetch API or `XMLHttpRequest` support, such as
* Node.js. The default value is {DOMCMapReaderFactory}.
*/
CMapReaderFactory?: Object | undefined;
/**
* - When `true`, fonts that aren't
* embedded in the PDF document will fallback to a system font.
* The default value is `true` in web environments and `false` in Node.js;
* unless `disableFontFace === true` in which case this defaults to `false`
* regardless of the environment (to prevent completely broken fonts).
*/
useSystemFonts?: boolean | undefined;
/**
* - The URL where the standard font
* files are located. Include the trailing slash.
*/
standardFontDataUrl?: string | undefined;
/**
* - The factory that will be used
* when reading the standard font files. Providing a custom factory is useful
* for environments without Fetch API or `XMLHttpRequest` support, such as
* Node.js. The default value is {DOMStandardFontDataFactory}.
*/
StandardFontDataFactory?: Object | undefined;
/**
* - Enable using the Fetch API in the
* worker-thread when reading CMap and standard font files. When `true`,
* the `CMapReaderFactory` and `StandardFontDataFactory` options are ignored.
* The default value is `true` in web environments and `false` in Node.js.
*/
useWorkerFetch?: boolean | undefined;
/**
* - Reject certain promises, e.g.
* `getOperatorList`, `getTextContent`, and `RenderTask`, when the associated
* PDF data cannot be successfully parsed, instead of attempting to recover
* whatever possible of the data. The default value is `false`.
*/
stopAtErrors?: boolean | undefined;
/**
* - The maximum allowed image size in total
* pixels, i.e. width * height. Images above this value will not be rendered.
* Use -1 for no limit, which is also the default value.
*/
maxImageSize?: number | undefined;
/**
* - Determines if we can evaluate strings
* as JavaScript. Primarily used to improve performance of font rendering, and
* when parsing PDF functions. The default value is `true`.
*/
isEvalSupported?: boolean | undefined;
/**
* - Determines if we can use
* `OffscreenCanvas` in the worker. Primarily used to improve performance of
* image conversion/rendering.
* The default value is `true` in web environments and `false` in Node.js.
*/
isOffscreenCanvasSupported?: boolean | undefined;
/**
* - The integer value is used to
* know when an image must be resized (uses `OffscreenCanvas` in the worker).
* If it's -1 then a possibly slow algorithm is used to guess the max value.
*/
canvasMaxAreaInBytes?: boolean | undefined;
/**
* - By default fonts are converted to
* OpenType fonts and loaded via the Font Loading API or `@font-face` rules.
* If disabled, fonts will be rendered using a built-in font renderer that
* constructs the glyphs with primitive path commands.
* The default value is `false` in web environments and `true` in Node.js.
*/
disableFontFace?: boolean | undefined;
/**
* - Include additional properties,
* which are unused during rendering of PDF documents, when exporting the
* parsed font data from the worker-thread. This may be useful for debugging
* purposes (and backwards compatibility), but note that it will lead to
* increased memory usage. The default value is `false`.
*/
fontExtraProperties?: boolean | undefined;
/**
* - Render Xfa forms if any.
* The default value is `false`.
*/
enableXfa?: boolean | undefined;
/**
* - Specify an explicit document
* context to create elements with and to load resources, such as fonts,
* into. Defaults to the current document.
*/
ownerDocument?: HTMLDocument | undefined;
/**
* - Disable range request loading of PDF
* files. When enabled, and if the server supports partial content requests,
* then the PDF will be fetched in chunks. The default value is `false`.
*/
disableRange?: boolean | undefined;
/**
* - Disable streaming of PDF file data.
* By default PDF.js attempts to load PDF files in chunks. The default value
* is `false`.
*/
disableStream?: boolean | undefined;
/**
* - Disable pre-fetching of PDF file
* data. When range requests are enabled PDF.js will automatically keep
* fetching more data even if it isn't needed to display the current page.
* The default value is `false`.
*
* NOTE: It is also necessary to disable streaming, see above, in order for
* disabling of pre-fetching to work correctly.
*/
disableAutoFetch?: boolean | undefined;
/**
* - Enables special hooks for debugging PDF.js
* (see `web/debugger.js`). The default value is `false`.
*/
pdfBug?: boolean | undefined;
/**
* - The factory instance that will be used
* when creating canvases. The default value is {new DOMCanvasFactory()}.
*/
canvasFactory?: Object | undefined;
/**
* - A factory instance that will be used
* to create SVG filters when rendering some images on the main canvas.
*/
filterFactory?: Object | undefined;
};
export type OnProgressParameters = {
/**
* - Currently loaded number of bytes.
*/
loaded: number;
/**
* - Total number of bytes in the PDF file.
*/
total: number;
};
/**
* Page getViewport parameters.
*/
export type GetViewportParameters = {
/**
* - The desired scale of the viewport.
*/
scale: number;
/**
* - The desired rotation, in degrees, of
* the viewport. If omitted it defaults to the page rotation.
*/
rotation?: number | undefined;
/**
* - The horizontal, i.e. x-axis, offset.
* The default value is `0`.
*/
offsetX?: number | undefined;
/**
* - The vertical, i.e. y-axis, offset.
* The default value is `0`.
*/
offsetY?: number | undefined;
/**
* - If true, the y-axis will not be
* flipped. The default value is `false`.
*/
dontFlip?: boolean | undefined;
};
/**
* Page getTextContent parameters.
*/
export type getTextContentParameters = {
/**
* - When true include marked
* content items in the items array of TextContent. The default is `false`.
*/
includeMarkedContent?: boolean | undefined;
};
/**
* Page text content.
*/
export type TextContent = {
/**
* - Array of
* {@link TextItem } and {@link TextMarkedContent } objects. TextMarkedContent
* items are included when includeMarkedContent is true.
*/
items: Array<TextItem | TextMarkedContent>;
/**
* - {@link TextStyle } objects,
* indexed by font name.
*/
styles: {
[x: string]: TextStyle;
};
};
/**
* Page text content part.
*/
export type TextItem = {
/**
* - Text content.
*/
str: string;
/**
* - Text direction: 'ttb', 'ltr' or 'rtl'.
*/
dir: string;
/**
* - Transformation matrix.
*/
transform: Array<any>;
/**
* - Width in device space.
*/
width: number;
/**
* - Height in device space.
*/
height: number;
/**
* - Font name used by PDF.js for converted font.
*/
fontName: string;
/**
* - Indicating if the text content is followed by a
* line-break.
*/
hasEOL: boolean;
};
/**
* Page text marked content part.
*/
export type TextMarkedContent = {
/**
* - Either 'beginMarkedContent',
* 'beginMarkedContentProps', or 'endMarkedContent'.
*/
type: string;
/**
* - The marked content identifier. Only used for type
* 'beginMarkedContentProps'.
*/
id: string;
};
/**
* Text style.
*/
export type TextStyle = {
/**
* - Font ascent.
*/
ascent: number;
/**
* - Font descent.
*/
descent: number;
/**
* - Whether or not the text is in vertical mode.
*/
vertical: boolean;
/**
* - The possible font family.
*/
fontFamily: string;
};
/**
* Page annotation parameters.
*/
export type GetAnnotationsParameters = {
/**
* - Determines the annotations that are fetched,
* can be 'display' (viewable annotations), 'print' (printable annotations),
* or 'any' (all annotations). The default value is 'display'.
*/
intent?: string | undefined;
};
/**
* Page render parameters.
*/
export type RenderParameters = {
/**
* - A 2D context of a DOM
* Canvas object.
*/
canvasContext: CanvasRenderingContext2D;
/**
* - Rendering viewport obtained by calling
* the `PDFPageProxy.getViewport` method.
*/
viewport: PageViewport;
/**
* - Rendering intent, can be 'display', 'print',
* or 'any'. The default value is 'display'.
*/
intent?: string | undefined;
/**
* Controls which annotations are rendered
* onto the canvas, for annotations with appearance-data; the values from
* {@link AnnotationMode } should be used. The following values are supported:
* - `AnnotationMode.DISABLE`, which disables all annotations.
* - `AnnotationMode.ENABLE`, which includes all possible annotations (thus
* it also depends on the `intent`-option, see above).
* - `AnnotationMode.ENABLE_FORMS`, which excludes annotations that contain
* interactive form elements (those will be rendered in the display layer).
* - `AnnotationMode.ENABLE_STORAGE`, which includes all possible annotations
* (as above) but where interactive form elements are updated with data
* from the {@link AnnotationStorage }-instance; useful e.g. for printing.
* The default value is `AnnotationMode.ENABLE`.
*/
annotationMode?: number | undefined;
/**
* - Additional transform, applied just
* before viewport transform.
*/
transform?: any[] | undefined;
/**
* - Background
* to use for the canvas.
* Any valid `canvas.fillStyle` can be used: a `DOMString` parsed as CSS
* <color> value, a `CanvasGradient` object (a linear or radial gradient) or
* a `CanvasPattern` object (a repetitive image). The default value is
* 'rgb(255,255,255)'.
*
* NOTE: This option may be partially, or completely, ignored when the
* `pageColors`-option is used.
*/
background?: string | CanvasGradient | CanvasPattern | undefined;
/**
* - Overwrites background and foreground colors
* with user defined ones in order to improve readability in high contrast
* mode.
*/
pageColors?: Object | undefined;
/**
* -
* A promise that should resolve with an {@link OptionalContentConfig }created from `PDFDocumentProxy.getOptionalContentConfig`. If `null`,
* the configuration will be fetched automatically with the default visibility
* states set.
*/
optionalContentConfigPromise?: Promise<OptionalContentConfig> | undefined;
/**
* - Map some
* annotation ids with canvases used to render them.
*/
annotationCanvasMap?: Map<string, HTMLCanvasElement> | undefined;
printAnnotationStorage?: PrintAnnotationStorage | undefined;
};
/**
* Page getOperatorList parameters.
*/
export type GetOperatorListParameters = {
/**
* - Rendering intent, can be 'display', 'print',
* or 'any'. The default value is 'display'.
*/
intent?: string | undefined;
/**
* Controls which annotations are included
* in the operatorList, for annotations with appearance-data; the values from
* {@link AnnotationMode } should be used. The following values are supported:
* - `AnnotationMode.DISABLE`, which disables all annotations.
* - `AnnotationMode.ENABLE`, which includes all possible annotations (thus
* it also depends on the `intent`-option, see above).
* - `AnnotationMode.ENABLE_FORMS`, which excludes annotations that contain
* interactive form elements (those will be rendered in the display layer).
* - `AnnotationMode.ENABLE_STORAGE`, which includes all possible annotations
* (as above) but where interactive form elements are updated with data
* from the {@link AnnotationStorage }-instance; useful e.g. for printing.
* The default value is `AnnotationMode.ENABLE`.
*/
annotationMode?: number | undefined;
printAnnotationStorage?: PrintAnnotationStorage | undefined;
};
/**
* Structure tree node. The root node will have a role "Root".
*/
export type StructTreeNode = {
/**
* - Array of
* {@link StructTreeNode } and {@link StructTreeContent } objects.
*/
children: Array<StructTreeNode | StructTreeContent>;
/**
* - element's role, already mapped if a role map exists
* in the PDF.
*/
role: string;
};
/**
* Structure tree content.
*/
export type StructTreeContent = {
/**
* - either "content" for page and stream structure
* elements or "object" for object references.
*/
type: string;
/**
* - unique id that will map to the text layer.
*/
id: string;
};
/**
* PDF page operator list.
*/
export type PDFOperatorList = {
/**
* - Array containing the operator functions.
*/
fnArray: Array<number>;
/**
* - Array containing the arguments of the
* functions.
*/
argsArray: Array<any>;
};
export type PDFWorkerParameters = {
/**
* - The name of the worker.
*/
name?: string | undefined;
/**
* - The `workerPort` object.
*/
port?: Worker | undefined;
/**
* - Controls the logging level;
* the constants from {@link VerbosityLevel } should be used.
*/
verbosity?: number | undefined;
};
/** @type {string} */
export const build: string;
export let DefaultCanvasFactory: typeof DOMCanvasFactory;
export let DefaultCMapReaderFactory: typeof DOMCMapReaderFactory;
export let DefaultFilterFactory: typeof DOMFilterFactory;
export let DefaultStandardFontDataFactory: typeof DOMStandardFontDataFactory;
/**
* @typedef { Int8Array | Uint8Array | Uint8ClampedArray |
* Int16Array | Uint16Array |
* Int32Array | Uint32Array | Float32Array |
* Float64Array
* } TypedArray
*/
/**
* @typedef { TypedArray | ArrayBuffer | Array<number> | string } BinaryData
*/
/**
* @typedef {Object} RefProxy
* @property {number} num
* @property {number} gen
*/
/**
* Document initialization / loading parameters object.
*
* @typedef {Object} DocumentInitParameters
* @property {string | URL} [url] - The URL of the PDF.
* @property {BinaryData} [data] - Binary PDF data.
* Use TypedArrays (Uint8Array) to improve the memory usage. If PDF data is
* BASE64-encoded, use `atob()` to convert it to a binary string first.
*
* NOTE: If TypedArrays are used they will generally be transferred to the
* worker-thread. This will help reduce main-thread memory usage, however
* it will take ownership of the TypedArrays.
* @property {Object} [httpHeaders] - Basic authentication headers.
* @property {boolean} [withCredentials] - Indicates whether or not
* cross-site Access-Control requests should be made using credentials such
* as cookies or authorization headers. The default is `false`.
* @property {string} [password] - For decrypting password-protected PDFs.
* @property {number} [length] - The PDF file length. It's used for progress
* reports and range requests operations.
* @property {PDFDataRangeTransport} [range] - Allows for using a custom range
* transport implementation.
* @property {number} [rangeChunkSize] - Specify maximum number of bytes fetched
* per range request. The default value is {@link DEFAULT_RANGE_CHUNK_SIZE}.
* @property {PDFWorker} [worker] - The worker that will be used for loading and
* parsing the PDF data.
* @property {number} [verbosity] - Controls the logging level; the constants
* from {@link VerbosityLevel} should be used.
* @property {string} [docBaseUrl] - The base URL of the document, used when
* attempting to recover valid absolute URLs for annotations, and outline
* items, that (incorrectly) only specify relative URLs.
* @property {string} [cMapUrl] - The URL where the predefined Adobe CMaps are
* located. Include the trailing slash.
* @property {boolean} [cMapPacked] - Specifies if the Adobe CMaps are binary
* packed or not. The default value is `true`.
* @property {Object} [CMapReaderFactory] - The factory that will be used when
* reading built-in CMap files. Providing a custom factory is useful for
* environments without Fetch API or `XMLHttpRequest` support, such as
* Node.js. The default value is {DOMCMapReaderFactory}.
* @property {boolean} [useSystemFonts] - When `true`, fonts that aren't
* embedded in the PDF document will fallback to a system font.
* The default value is `true` in web environments and `false` in Node.js;
* unless `disableFontFace === true` in which case this defaults to `false`
* regardless of the environment (to prevent completely broken fonts).
* @property {string} [standardFontDataUrl] - The URL where the standard font
* files are located. Include the trailing slash.
* @property {Object} [StandardFontDataFactory] - The factory that will be used
* when reading the standard font files. Providing a custom factory is useful
* for environments without Fetch API or `XMLHttpRequest` support, such as
* Node.js. The default value is {DOMStandardFontDataFactory}.
* @property {boolean} [useWorkerFetch] - Enable using the Fetch API in the
* worker-thread when reading CMap and standard font files. When `true`,
* the `CMapReaderFactory` and `StandardFontDataFactory` options are ignored.
* The default value is `true` in web environments and `false` in Node.js.
* @property {boolean} [stopAtErrors] - Reject certain promises, e.g.
* `getOperatorList`, `getTextContent`, and `RenderTask`, when the associated
* PDF data cannot be successfully parsed, instead of attempting to recover
* whatever possible of the data. The default value is `false`.
* @property {number} [maxImageSize] - The maximum allowed image size in total
* pixels, i.e. width * height. Images above this value will not be rendered.
* Use -1 for no limit, which is also the default value.
* @property {boolean} [isEvalSupported] - Determines if we can evaluate strings
* as JavaScript. Primarily used to improve performance of font rendering, and
* when parsing PDF functions. The default value is `true`.
* @property {boolean} [isOffscreenCanvasSupported] - Determines if we can use
* `OffscreenCanvas` in the worker. Primarily used to improve performance of
* image conversion/rendering.
* The default value is `true` in web environments and `false` in Node.js.
* @property {boolean} [canvasMaxAreaInBytes] - The integer value is used to
* know when an image must be resized (uses `OffscreenCanvas` in the worker).
* If it's -1 then a possibly slow algorithm is used to guess the max value.
* @property {boolean} [disableFontFace] - By default fonts are converted to
* OpenType fonts and loaded via the Font Loading API or `@font-face` rules.
* If disabled, fonts will be rendered using a built-in font renderer that
* constructs the glyphs with primitive path commands.
* The default value is `false` in web environments and `true` in Node.js.
* @property {boolean} [fontExtraProperties] - Include additional properties,
* which are unused during rendering of PDF documents, when exporting the
* parsed font data from the worker-thread. This may be useful for debugging
* purposes (and backwards compatibility), but note that it will lead to
* increased memory usage. The default value is `false`.
* @property {boolean} [enableXfa] - Render Xfa forms if any.
* The default value is `false`.
* @property {HTMLDocument} [ownerDocument] - Specify an explicit document
* context to create elements with and to load resources, such as fonts,
* into. Defaults to the current document.
* @property {boolean} [disableRange] - Disable range request loading of PDF
* files. When enabled, and if the server supports partial content requests,
* then the PDF will be fetched in chunks. The default value is `false`.
* @property {boolean} [disableStream] - Disable streaming of PDF file data.
* By default PDF.js attempts to load PDF files in chunks. The default value
* is `false`.
* @property {boolean} [disableAutoFetch] - Disable pre-fetching of PDF file
* data. When range requests are enabled PDF.js will automatically keep
* fetching more data even if it isn't needed to display the current page.
* The default value is `false`.
*
* NOTE: It is also necessary to disable streaming, see above, in order for
* disabling of pre-fetching to work correctly.
* @property {boolean} [pdfBug] - Enables special hooks for debugging PDF.js
* (see `web/debugger.js`). The default value is `false`.
* @property {Object} [canvasFactory] - The factory instance that will be used
* when creating canvases. The default value is {new DOMCanvasFactory()}.
* @property {Object} [filterFactory] - A factory instance that will be used
* to create SVG filters when rendering some images on the main canvas.
*/
/**
* This is the main entry point for loading a PDF and interacting with it.
*
* NOTE: If a URL is used to fetch the PDF data a standard Fetch API call (or
* XHR as fallback) is used, which means it must follow same origin rules,
* e.g. no cross-domain requests without CORS.
*
* @param {string | URL | TypedArray | ArrayBuffer | DocumentInitParameters}
* src - Can be a URL where a PDF file is located, a typed array (Uint8Array)
* already populated with data, or a parameter object.
* @returns {PDFDocumentLoadingTask}
*/
export function getDocument(
src: string | URL | TypedArray | ArrayBuffer | DocumentInitParameters
): PDFDocumentLoadingTask;
export class LoopbackPort {
postMessage(obj: any, transfer: any): void;
addEventListener(name: any, listener: any): void;
removeEventListener(name: any, listener: any): void;
terminate(): void;
#private;
}
/**
* @typedef {Object} OnProgressParameters
* @property {number} loaded - Currently loaded number of bytes.
* @property {number} total - Total number of bytes in the PDF file.
*/
/**
* The loading task controls the operations required to load a PDF document
* (such as network requests) and provides a way to listen for completion,
* after which individual pages can be rendered.
*/
export class PDFDocumentLoadingTask {
static "__#16@#docId": number;
_capability: import("../shared/util.js").PromiseCapability;
_transport: any;
_worker: any;
/**
* Unique identifier for the document loading task.
* @type {string}
*/
docId: string;
/**
* Whether the loading task is destroyed or not.
* @type {boolean}
*/
destroyed: boolean;
/**
* Callback to request a password if a wrong or no password was provided.
* The callback receives two parameters: a function that should be called
* with the new password, and a reason (see {@link PasswordResponses}).
* @type {function}
*/
onPassword: Function;
/**
* Callback to be able to monitor the loading progress of the PDF file
* (necessary to implement e.g. a loading bar).
* The callback receives an {@link OnProgressParameters} argument.
* @type {function}
*/
onProgress: Function;
/**
* Promise for document loading task completion.
* @type {Promise<PDFDocumentProxy>}
*/
get promise(): Promise<PDFDocumentProxy>;
/**
* Abort all network requests and destroy the worker.
* @returns {Promise<void>} A promise that is resolved when destruction is
* completed.
*/
destroy(): Promise<void>;
}
/**
* Proxy to a `PDFDocument` in the worker thread.
*/
export class PDFDocumentProxy {
constructor(pdfInfo: any, transport: any);
_pdfInfo: any;
_transport: any;
/**
* @type {AnnotationStorage} Storage for annotation data in forms.
*/
get annotationStorage(): AnnotationStorage;
/**
* @type {Object} The filter factory instance.
*/
get filterFactory(): Object;
/**
* @type {number} Total number of pages in the PDF file.
*/
get numPages(): number;
/**
* @type {Array<string, string|null>} A (not guaranteed to be) unique ID to
* identify the PDF document.
* NOTE: The first element will always be defined for all PDF documents,
* whereas the second element is only defined for *modified* PDF documents.
*/
get fingerprints(): string[];
/**
* @type {boolean} True if only XFA form.
*/
get isPureXfa(): boolean;
/**
* NOTE: This is (mostly) intended to support printing of XFA forms.
*
* @type {Object | null} An object representing a HTML tree structure
* to render the XFA, or `null` when no XFA form exists.
*/
get allXfaHtml(): Object | null;
/**
* @param {number} pageNumber - The page number to get. The first page is 1.
* @returns {Promise<PDFPageProxy>} A promise that is resolved with
* a {@link PDFPageProxy} object.
*/
getPage(pageNumber: number): Promise<PDFPageProxy>;
/**
* @param {RefProxy} ref - The page reference.
* @returns {Promise<number>} A promise that is resolved with the page index,
* starting from zero, that is associated with the reference.
*/
getPageIndex(ref: RefProxy): Promise<number>;
/**
* @returns {Promise<Object<string, Array<any>>>} A promise that is resolved
* with a mapping from named destinations to references.
*
* This can be slow for large documents. Use `getDestination` instead.
*/
getDestinations(): Promise<{
[x: string]: Array<any>;
}>;
/**
* @param {string} id - The named destination to get.
* @returns {Promise<Array<any> | null>} A promise that is resolved with all
* information of the given named destination, or `null` when the named
* destination is not present in the PDF file.
*/
getDestination(id: string): Promise<Array<any> | null>;
/**
* @returns {Promise<Array<string> | null>} A promise that is resolved with
* an {Array} containing the page labels that correspond to the page
* indexes, or `null` when no page labels are present in the PDF file.
*/
getPageLabels(): Promise<Array<string> | null>;
/**
* @returns {Promise<string>} A promise that is resolved with a {string}
* containing the page layout name.
*/
getPageLayout(): Promise<string>;
/**
* @returns {Promise<string>} A promise that is resolved with a {string}
* containing the page mode name.
*/
getPageMode(): Promise<string>;
/**
* @returns {Promise<Object | null>} A promise that is resolved with an
* {Object} containing the viewer preferences, or `null` when no viewer
* preferences are present in the PDF file.
*/
getViewerPreferences(): Promise<Object | null>;
/**
* @returns {Promise<any | null>} A promise that is resolved with an {Array}
* containing the destination, or `null` when no open action is present
* in the PDF.
*/
getOpenAction(): Promise<any | null>;
/**
* @returns {Promise<any>} A promise that is resolved with a lookup table
* for mapping named attachments to their content.
*/
getAttachments(): Promise<any>;
/**
* @returns {Promise<Array<string> | null>} A promise that is resolved with
* an {Array} of all the JavaScript strings in the name tree, or `null`
* if no JavaScript exists.
*/
getJavaScript(): Promise<Array<string> | null>;
/**
* @returns {Promise<Object | null>} A promise that is resolved with
* an {Object} with the JavaScript actions:
* - from the name tree (like getJavaScript);
* - from A or AA entries in the catalog dictionary.
* , or `null` if no JavaScript exists.
*/
getJSActions(): Promise<Object | null>;
/**
* @typedef {Object} OutlineNode
* @property {string} title
* @property {boolean} bold
* @property {boolean} italic
* @property {Uint8ClampedArray} color - The color in RGB format to use for
* display purposes.
* @property {string | Array<any> | null} dest
* @property {string | null} url
* @property {string | undefined} unsafeUrl
* @property {boolean | undefined} newWindow
* @property {number | undefined} count
* @property {Array<OutlineNode>} items
*/
/**
* @returns {Promise<Array<OutlineNode>>} A promise that is resolved with an
* {Array} that is a tree outline (if it has one) of the PDF file.
*/
getOutline(): Promise<
{
title: string;
bold: boolean;
italic: boolean;
/**
* - The color in RGB format to use for
* display purposes.
*/
color: Uint8ClampedArray;
dest: string | Array<any> | null;
url: string | null;
unsafeUrl: string | undefined;
newWindow: boolean | undefined;
count: number | undefined;
items: any[];
}[]
>;
/**
* @returns {Promise<OptionalContentConfig>} A promise that is resolved with
* an {@link OptionalContentConfig} that contains all the optional content
* groups (assuming that the document has any).
*/
getOptionalContentConfig(): Promise<OptionalContentConfig>;
/**
* @returns {Promise<Array<number> | null>} A promise that is resolved with
* an {Array} that contains the permission flags for the PDF document, or
* `null` when no permissions are present in the PDF file.
*/
getPermissions(): Promise<Array<number> | null>;
/**
* @returns {Promise<{ info: Object, metadata: Metadata }>} A promise that is
* resolved with an {Object} that has `info` and `metadata` properties.
* `info` is an {Object} filled with anything available in the information
* dictionary and similarly `metadata` is a {Metadata} object with
* information from the metadata section of the PDF.
*/
getMetadata(): Promise<{
info: Object;
metadata: Metadata;
}>;
/**
* @typedef {Object} MarkInfo
* Properties correspond to Table 321 of the PDF 32000-1:2008 spec.
* @property {boolean} Marked
* @property {boolean} UserProperties
* @property {boolean} Suspects
*/
/**
* @returns {Promise<MarkInfo | null>} A promise that is resolved with
* a {MarkInfo} object that contains the MarkInfo flags for the PDF
* document, or `null` when no MarkInfo values are present in the PDF file.
*/
getMarkInfo(): Promise<{
Marked: boolean;
UserProperties: boolean;
Suspects: boolean;
} | null>;
/**
* @returns {Promise<Uint8Array>} A promise that is resolved with a
* {Uint8Array} containing the raw data of the PDF document.
*/
getData(): Promise<Uint8Array>;
/**
* @returns {Promise<Uint8Array>} A promise that is resolved with a
* {Uint8Array} containing the full data of the saved document.
*/
saveDocument(): Promise<Uint8Array>;
/**
* @returns {Promise<{ length: number }>} A promise that is resolved when the
* document's data is loaded. It is resolved with an {Object} that contains
* the `length` property that indicates size of the PDF data in bytes.
*/
getDownloadInfo(): Promise<{
length: number;
}>;
/**
* Cleans up resources allocated by the document on both the main and worker
* threads.
*
* NOTE: Do not, under any circumstances, call this method when rendering is
* currently ongoing since that may lead to rendering errors.
*
* @param {boolean} [keepLoadedFonts] - Let fonts remain attached to the DOM.
* NOTE: This will increase persistent memory usage, hence don't use this
* option unless absolutely necessary. The default value is `false`.
* @returns {Promise} A promise that is resolved when clean-up has finished.
*/
cleanup(keepLoadedFonts?: boolean | undefined): Promise<any>;
/**
* Destroys the current document instance and terminates the worker.
*/
destroy(): Promise<void>;
/**
* @type {DocumentInitParameters} A subset of the current
* {DocumentInitParameters}, which are needed in the viewer.
*/
get loadingParams(): DocumentInitParameters;
/**
* @type {PDFDocumentLoadingTask} The loadingTask for the current document.
*/
get loadingTask(): PDFDocumentLoadingTask;
/**
* @returns {Promise<Object<string, Array<Object>> | null>} A promise that is
* resolved with an {Object} containing /AcroForm field data for the JS
* sandbox, or `null` when no field data is present in the PDF file.
*/
getFieldObjects(): Promise<{
[x: string]: Array<Object>;
} | null>;
/**
* @returns {Promise<boolean>} A promise that is resolved with `true`
* if some /AcroForm fields have JavaScript actions.
*/
hasJSActions(): Promise<boolean>;
/**
* @returns {Promise<Array<string> | null>} A promise that is resolved with an
* {Array<string>} containing IDs of annotations that have a calculation
* action, or `null` when no such annotations are present in the PDF file.
*/
getCalculationOrderIds(): Promise<Array<string> | null>;
}
/**
* Page getViewport parameters.
*
* @typedef {Object} GetViewportParameters
* @property {number} scale - The desired scale of the viewport.
* @property {number} [rotation] - The desired rotation, in degrees, of
* the viewport. If omitted it defaults to the page rotation.
* @property {number} [offsetX] - The horizontal, i.e. x-axis, offset.
* The default value is `0`.
* @property {number} [offsetY] - The vertical, i.e. y-axis, offset.
* The default value is `0`.
* @property {boolean} [dontFlip] - If true, the y-axis will not be
* flipped. The default value is `false`.
*/
/**
* Page getTextContent parameters.
*
* @typedef {Object} getTextContentParameters
* @property {boolean} [includeMarkedContent] - When true include marked
* content items in the items array of TextContent. The default is `false`.
*/
/**
* Page text content.
*
* @typedef {Object} TextContent
* @property {Array<TextItem | TextMarkedContent>} items - Array of
* {@link TextItem} and {@link TextMarkedContent} objects. TextMarkedContent
* items are included when includeMarkedContent is true.
* @property {Object<string, TextStyle>} styles - {@link TextStyle} objects,
* indexed by font name.
*/
/**
* Page text content part.
*
* @typedef {Object} TextItem
* @property {string} str - Text content.
* @property {string} dir - Text direction: 'ttb', 'ltr' or 'rtl'.
* @property {Array<any>} transform - Transformation matrix.
* @property {number} width - Width in device space.
* @property {number} height - Height in device space.
* @property {string} fontName - Font name used by PDF.js for converted font.
* @property {boolean} hasEOL - Indicating if the text content is followed by a
* line-break.
*/
/**
* Page text marked content part.
*
* @typedef {Object} TextMarkedContent
* @property {string} type - Either 'beginMarkedContent',
* 'beginMarkedContentProps', or 'endMarkedContent'.
* @property {string} id - The marked content identifier. Only used for type
* 'beginMarkedContentProps'.
*/
/**
* Text style.
*
* @typedef {Object} TextStyle
* @property {number} ascent - Font ascent.
* @property {number} descent - Font descent.
* @property {boolean} vertical - Whether or not the text is in vertical mode.
* @property {string} fontFamily - The possible font family.
*/
/**
* Page annotation parameters.
*
* @typedef {Object} GetAnnotationsParameters
* @property {string} [intent] - Determines the annotations that are fetched,
* can be 'display' (viewable annotations), 'print' (printable annotations),
* or 'any' (all annotations). The default value is 'display'.
*/
/**
* Page render parameters.
*
* @typedef {Object} RenderParameters
* @property {CanvasRenderingContext2D} canvasContext - A 2D context of a DOM
* Canvas object.
* @property {PageViewport} viewport - Rendering viewport obtained by calling
* the `PDFPageProxy.getViewport` method.
* @property {string} [intent] - Rendering intent, can be 'display', 'print',
* or 'any'. The default value is 'display'.
* @property {number} [annotationMode] Controls which annotations are rendered
* onto the canvas, for annotations with appearance-data; the values from
* {@link AnnotationMode} should be used. The following values are supported:
* - `AnnotationMode.DISABLE`, which disables all annotations.
* - `AnnotationMode.ENABLE`, which includes all possible annotations (thus
* it also depends on the `intent`-option, see above).
* - `AnnotationMode.ENABLE_FORMS`, which excludes annotations that contain
* interactive form elements (those will be rendered in the display layer).
* - `AnnotationMode.ENABLE_STORAGE`, which includes all possible annotations
* (as above) but where interactive form elements are updated with data
* from the {@link AnnotationStorage}-instance; useful e.g. for printing.
* The default value is `AnnotationMode.ENABLE`.
* @property {Array<any>} [transform] - Additional transform, applied just
* before viewport transform.
* @property {CanvasGradient | CanvasPattern | string} [background] - Background
* to use for the canvas.
* Any valid `canvas.fillStyle` can be used: a `DOMString` parsed as CSS
* <color> value, a `CanvasGradient` object (a linear or radial gradient) or
* a `CanvasPattern` object (a repetitive image). The default value is
* 'rgb(255,255,255)'.
*
* NOTE: This option may be partially, or completely, ignored when the
* `pageColors`-option is used.
* @property {Object} [pageColors] - Overwrites background and foreground colors
* with user defined ones in order to improve readability in high contrast
* mode.
* @property {Promise<OptionalContentConfig>} [optionalContentConfigPromise] -
* A promise that should resolve with an {@link OptionalContentConfig}
* created from `PDFDocumentProxy.getOptionalContentConfig`. If `null`,
* the configuration will be fetched automatically with the default visibility
* states set.
* @property {Map<string, HTMLCanvasElement>} [annotationCanvasMap] - Map some
* annotation ids with canvases used to render them.
* @property {PrintAnnotationStorage} [printAnnotationStorage]
*/
/**
* Page getOperatorList parameters.
*
* @typedef {Object} GetOperatorListParameters
* @property {string} [intent] - Rendering intent, can be 'display', 'print',
* or 'any'. The default value is 'display'.
* @property {number} [annotationMode] Controls which annotations are included
* in the operatorList, for annotations with appearance-data; the values from
* {@link AnnotationMode} should be used. The following values are supported:
* - `AnnotationMode.DISABLE`, which disables all annotations.
* - `AnnotationMode.ENABLE`, which includes all possible annotations (thus
* it also depends on the `intent`-option, see above).
* - `AnnotationMode.ENABLE_FORMS`, which excludes annotations that contain
* interactive form elements (those will be rendered in the display layer).
* - `AnnotationMode.ENABLE_STORAGE`, which includes all possible annotations
* (as above) but where interactive form elements are updated with data
* from the {@link AnnotationStorage}-instance; useful e.g. for printing.
* The default value is `AnnotationMode.ENABLE`.
* @property {PrintAnnotationStorage} [printAnnotationStorage]
*/
/**
* Structure tree node. The root node will have a role "Root".
*
* @typedef {Object} StructTreeNode
* @property {Array<StructTreeNode | StructTreeContent>} children - Array of
* {@link StructTreeNode} and {@link StructTreeContent} objects.
* @property {string} role - element's role, already mapped if a role map exists
* in the PDF.
*/
/**
* Structure tree content.
*
* @typedef {Object} StructTreeContent
* @property {string} type - either "content" for page and stream structure
* elements or "object" for object references.
* @property {string} id - unique id that will map to the text layer.
*/
/**
* PDF page operator list.
*
* @typedef {Object} PDFOperatorList
* @property {Array<number>} fnArray - Array containing the operator functions.
* @property {Array<any>} argsArray - Array containing the arguments of the
* functions.
*/
/**
* Proxy to a `PDFPage` in the worker thread.
*/
export class PDFPageProxy {
constructor(
pageIndex: any,
pageInfo: any,
transport: any,
pdfBug?: boolean
);
_pageIndex: any;
_pageInfo: any;
_transport: any;
_stats: StatTimer | null;
_pdfBug: boolean;
/** @type {PDFObjects} */
commonObjs: PDFObjects;
objs: PDFObjects;
_maybeCleanupAfterRender: boolean;
_intentStates: Map<any, any>;
destroyed: boolean;
/**
* @type {number} Page number of the page. First page is 1.
*/
get pageNumber(): number;
/**
* @type {number} The number of degrees the page is rotated clockwise.
*/
get rotate(): number;
/**
* @type {RefProxy | null} The reference that points to this page.
*/
get ref(): RefProxy | null;
/**
* @type {number} The default size of units in 1/72nds of an inch.
*/
get userUnit(): number;
/**
* @type {Array<number>} An array of the visible portion of the PDF page in
* user space units [x1, y1, x2, y2].
*/
get view(): number[];
/**
* @param {GetViewportParameters} params - Viewport parameters.
* @returns {PageViewport} Contains 'width' and 'height' properties
* along with transforms required for rendering.
*/
getViewport({
scale,
rotation,
offsetX,
offsetY,
dontFlip,
}?: GetViewportParameters): PageViewport;
/**
* @param {GetAnnotationsParameters} params - Annotation parameters.
* @returns {Promise<Array<any>>} A promise that is resolved with an
* {Array} of the annotation objects.
*/
getAnnotations({ intent }?: GetAnnotationsParameters): Promise<Array<any>>;
/**
* @returns {Promise<Object>} A promise that is resolved with an
* {Object} with JS actions.
*/
getJSActions(): Promise<Object>;
/**
* @type {boolean} True if only XFA form.
*/
get isPureXfa(): boolean;
/**
* @returns {Promise<Object | null>} A promise that is resolved with
* an {Object} with a fake DOM object (a tree structure where elements
* are {Object} with a name, attributes (class, style, ...), value and
* children, very similar to a HTML DOM tree), or `null` if no XFA exists.
*/
getXfa(): Promise<Object | null>;
/**
* Begins the process of rendering a page to the desired context.
*
* @param {RenderParameters} params - Page render parameters.
* @returns {RenderTask} An object that contains a promise that is
* resolved when the page finishes rendering.
*/
render(
{
canvasContext,
viewport,
intent,
annotationMode,
transform,
background,
optionalContentConfigPromise,
annotationCanvasMap,
pageColors,
printAnnotationStorage,
}: RenderParameters,
...args: any[]
): RenderTask;
/**
* @param {GetOperatorListParameters} params - Page getOperatorList
* parameters.
* @returns {Promise<PDFOperatorList>} A promise resolved with an
* {@link PDFOperatorList} object that represents the page's operator list.
*/
getOperatorList({
intent,
annotationMode,
printAnnotationStorage,
}?: GetOperatorListParameters): Promise<PDFOperatorList>;
/**
* NOTE: All occurrences of whitespace will be replaced by
* standard spaces (0x20).
*
* @param {getTextContentParameters} params - getTextContent parameters.
* @returns {ReadableStream} Stream for reading text content chunks.
*/
streamTextContent({
includeMarkedContent,
}?: getTextContentParameters): ReadableStream;
/**
* NOTE: All occurrences of whitespace will be replaced by
* standard spaces (0x20).
*
* @param {getTextContentParameters} params - getTextContent parameters.
* @returns {Promise<TextContent>} A promise that is resolved with a
* {@link TextContent} object that represents the page's text content.
*/
getTextContent(params?: getTextContentParameters): Promise<TextContent>;
/**
* @returns {Promise<StructTreeNode>} A promise that is resolved with a
* {@link StructTreeNode} object that represents the page's structure tree,
* or `null` when no structure tree is present for the current page.
*/
getStructTree(): Promise<StructTreeNode>;
/**
* Destroys the page object.
* @private
*/
private _destroy;
/**
* Cleans up resources allocated by the page.
*
* @param {boolean} [resetStats] - Reset page stats, if enabled.
* The default value is `false`.
* @returns {boolean} Indicates if clean-up was successfully run.
*/
cleanup(resetStats?: boolean | undefined): boolean;
/**
* @private
*/
private _startRenderPage;
/**
* @private
*/
private _renderPageChunk;
/**
* @private
*/
private _pumpOperatorList;
/**
* @private
*/
private _abortOperatorList;
/**
* @type {StatTimer | null} Returns page stats, if enabled; returns `null`
* otherwise.
*/
get stats(): StatTimer | null;
#private;
}
/**
* PDF.js web worker abstraction that controls the instantiation of PDF
* documents. Message handlers are used to pass information from the main
* thread to the worker thread and vice versa. If the creation of a web
* worker is not possible, a "fake" worker will be used instead.
*
* @param {PDFWorkerParameters} params - The worker initialization parameters.
*/
export class PDFWorker {
static "__#19@#workerPorts": WeakMap<object, any>;
/**
* @param {PDFWorkerParameters} params - The worker initialization parameters.
*/
static fromPort(params: PDFWorkerParameters): any;
/**
* The current `workerSrc`, when it exists.
* @type {string}
*/
static get workerSrc(): string;
static get _mainThreadWorkerMessageHandler(): any;
static get _setupFakeWorkerGlobal(): any;
constructor({
name,
port,
verbosity,
}?: {
name?: null | undefined;
port?: null | undefined;
verbosity?: number | undefined;
});
name: any;
destroyed: boolean;
verbosity: number;
_readyCapability: import("../shared/util.js").PromiseCapability;
_port: any;
_webWorker: Worker | null;
_messageHandler: MessageHandler | null;
/**
* Promise for worker initialization completion.
* @type {Promise<void>}
*/
get promise(): Promise<void>;
/**
* The current `workerPort`, when it exists.
* @type {Worker}
*/
get port(): Worker;
/**
* The current MessageHandler-instance.
* @type {MessageHandler}
*/
get messageHandler(): MessageHandler;
_initializeFromPort(port: any): void;
_initialize(): void;
_setupFakeWorker(): void;
/**
* Destroys the worker instance.
*/
destroy(): void;
}
export namespace PDFWorkerUtil {
const isWorkerDisabled: boolean;
const fallbackWorkerSrc: null;
const fakeWorkerId: number;
}
/**
* Allows controlling of the rendering tasks.
*/
export class RenderTask {
constructor(internalRenderTask: any);
/**
* Callback for incremental rendering -- a function that will be called
* each time the rendering is paused. To continue rendering call the
* function that is the first argument to the callback.
* @type {function}
*/
onContinue: Function;
/**
* Promise for rendering task completion.
* @type {Promise<void>}
*/
get promise(): Promise<void>;
/**
* Cancels the rendering task. If the task is currently rendering it will
* not be cancelled until graphics pauses with a timeout. The promise that
* this object extends will be rejected when cancelled.
*
* @param {number} [extraDelay]
*/
cancel(extraDelay?: number | undefined): void;
/**
* Whether form fields are rendered separately from the main operatorList.
* @type {boolean}
*/
get separateAnnots(): boolean;
#private;
}
/** @type {string} */
export const version: string;
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/types/expression-parser.d.ts | declare interface ParseOptions {
filename?: string;
startRule?: "Start";
tracer?: any;
[key: string]: any;
}
declare type ParseFunction = <Options extends ParseOptions>(
input: string,
options?: Options
) => Options extends { startRule: infer StartRule }
? StartRule extends "Start"
? Start
: Start
: Start;
// These types were autogenerated by ts-pegjs
declare type Start = Program;
declare type Identifier = IdentifierName;
declare type IdentifierName = { type: "Identifier"; name: string };
declare type Literal =
| NullLiteral
| BooleanLiteral
| NumericLiteral
| StringLiteral;
declare type NullLiteral = { type: "NullLiteral"; value: null };
declare type BooleanLiteral =
| { type: "BooleanLiteral"; value: true }
| { type: "BooleanLiteral"; value: false };
declare type NumericLiteral = DecimalLiteral;
declare type DecimalLiteral = { type: "NumericLiteral"; value: number };
declare type StringLiteral = { type: "StringLiteral"; value: string };
declare type PrimaryExpression =
| Identifier
| Literal
| ArrayExpression
| ObjectExpression
| Expression;
declare type ArrayExpression = {
type: "ArrayExpression";
elements: ElementList;
};
declare type ElementList = PrimaryExpression[];
declare type ObjectExpression =
| { type: "ObjectExpression"; properties: [] }
| { type: "ObjectExpression"; properties: PropertyNameAndValueList };
declare type PropertyNameAndValueList = PrimaryExpression[];
declare type PropertyAssignment = {
type: "PropertyAssignment";
key: PropertyName;
value: Expression;
kind: "init";
};
declare type PropertyName = IdentifierName | StringLiteral | NumericLiteral;
declare type MemberExpression =
| {
type: "MemberExpression";
property: StringLiteral;
computed: true;
object: MemberExpression | Identifier | StringLiteral;
}
| {
type: "MemberExpression";
property: Identifier;
computed: false;
object: MemberExpression | Identifier | StringLiteral;
};
declare type CallExpression = {
type: "CallExpression";
arguments: Arguments;
callee: MemberExpression | Identifier;
};
declare type Arguments = PrimaryExpression[];
declare type Expression = CallExpression | MemberExpression;
declare type ExpressionStatement = {
type: "ExpressionStatement";
expression: Expression;
};
declare type Program = { type: "Program"; body: ExpressionStatement };
declare type ExpressionNode =
| Program
| ExpressionStatement
| ArrayExpression
| BooleanLiteral
| CallExpression
| Identifier
| MemberExpression
| NumericLiteral
| ObjectExpression
| PropertyAssignment
| NullLiteral
| StringLiteral;
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/indexes/index.ts | import {
type CleanupMode,
type IndexOptions,
index,
_batch,
_deduplicateInOrder,
_getSourceIdAssigner,
_isBaseDocumentLoader,
_HashedDocument,
} from "@langchain/core/indexing";
export {
type CleanupMode,
type IndexOptions,
index,
_batch,
_deduplicateInOrder,
_getSourceIdAssigner,
_isBaseDocumentLoader,
_HashedDocument,
};
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.