index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchainjs/langchain/src/experimental/openai_files | lc_public_repos/langchainjs/langchain/src/experimental/openai_files/tests/test.jsonl | {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/openai_files | lc_public_repos/langchainjs/langchain/src/experimental/openai_files/tests/openai_file.int.test.ts | import * as fs from "fs";
import * as path from "path";
import { fileURLToPath } from "url";
import { dirname } from "path";
import { OpenAIFiles } from "../index.js";
/**
* Otherwise we got the error __dirname doesn't exist
*/
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
test("Use file with Open AI", async () => {
const openAIFiles = new OpenAIFiles();
const file = await openAIFiles.createFile({
file: fs.createReadStream(path.resolve(__dirname, `./test.jsonl`)),
purpose: "fine-tune",
});
expect(file.id).toBeDefined();
expect(file.object).toBe("file");
/**
* Output
{
"id": "file-BK7bzQj3FfZFXr7DbL6xJwfo",
"object": "file",
"bytes": 120000,
"created_at": 1677610602,
"filename": "salesOverview.pdf",
"purpose": "assistants",
}
*/
const fileContent = await openAIFiles.retrieveFileContent({
fileId: file.id,
});
// console.log(fileContent);
expect(fileContent).toBeDefined();
/**
* Output
{
"id": "file-BK7bzQj3FfZFXr7DbL6xJwfo",
"object": "file",
"bytes": 120000,
"created_at": 1677610602,
"filename": "salesOverview.pdf",
"purpose": "assistants",
}
*/
const retrievedFile = await openAIFiles.retrieveFile({
fileId: file.id,
});
expect(retrievedFile.id).toBeDefined();
expect(retrievedFile.object).toBe("file");
/**
* Output
{
"id": "file-BK7bzQj3FfZFXr7DbL6xJwfo",
"object": "file",
"bytes": 120000,
"created_at": 1677610602,
"filename": "salesOverview.pdf",
"purpose": "assistants",
}
*/
const list = await openAIFiles.listFiles();
expect(list).toBeDefined();
expect(!!list.data.find((f) => f.id === file.id)).toBeTruthy();
/**
* Output
{
"id": "file-BK7bzQj3FfZFXr7DbL6xJwfo",
"object": "file",
"bytes": 120000,
"created_at": 1677610602,
"filename": "salesOverview.pdf",
"purpose": "assistants",
}
*/
const result = await openAIFiles.deleteFile({ fileId: file.id });
expect(result.id).toBe(file.id);
expect(result.deleted).toBeTruthy();
/**
* Output:
{
"id": "file-abc123",
"object": "file",
"deleted": true
}
*/
});
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/openai_assistant/schema.ts | import type { AgentFinish, AgentAction } from "@langchain/core/agents";
export type OpenAIAssistantFinish = AgentFinish & {
runId: string;
threadId: string;
};
export type OpenAIAssistantAction = AgentAction & {
toolCallId: string;
runId: string;
threadId: string;
};
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export type OpenAIToolType = Array<any>;
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/openai_assistant/index.ts | import { type ClientOptions, OpenAIClient } from "@langchain/openai";
import { StructuredTool } from "@langchain/core/tools";
import { Runnable, RunnableConfig } from "@langchain/core/runnables";
import { formatToOpenAIAssistantTool } from "@langchain/openai";
import { sleep } from "../../util/time.js";
import type {
OpenAIAssistantFinish,
OpenAIAssistantAction,
OpenAIToolType,
} from "./schema.js";
// eslint-disable-next-line @typescript-eslint/no-explicit-any
type ThreadMessage = any;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
type RequiredActionFunctionToolCall = any;
type ExtractRunOutput<AsAgent extends boolean | undefined> =
AsAgent extends true
? OpenAIAssistantFinish | OpenAIAssistantAction[]
: ThreadMessage[] | RequiredActionFunctionToolCall[];
export type OpenAIAssistantRunnableInput<
AsAgent extends boolean | undefined = undefined
> = {
client?: OpenAIClient;
clientOptions?: ClientOptions;
assistantId: string;
pollIntervalMs?: number;
asAgent?: AsAgent;
};
export class OpenAIAssistantRunnable<
AsAgent extends boolean | undefined,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunInput extends Record<string, any> = Record<string, any>
> extends Runnable<RunInput, ExtractRunOutput<AsAgent>> {
lc_namespace = ["langchain", "experimental", "openai_assistant"];
private client: OpenAIClient;
assistantId: string;
pollIntervalMs = 1000;
asAgent?: AsAgent;
constructor(fields: OpenAIAssistantRunnableInput<AsAgent>) {
super(fields);
this.client = fields.client ?? new OpenAIClient(fields?.clientOptions);
this.assistantId = fields.assistantId;
this.asAgent = fields.asAgent ?? this.asAgent;
}
static async createAssistant<AsAgent extends boolean>({
model,
name,
instructions,
tools,
client,
clientOptions,
asAgent,
pollIntervalMs,
fileIds,
}: Omit<OpenAIAssistantRunnableInput<AsAgent>, "assistantId"> & {
model: string;
name?: string;
instructions?: string;
tools?: OpenAIToolType | Array<StructuredTool>;
fileIds?: string[];
}) {
const formattedTools =
tools?.map((tool) => {
// eslint-disable-next-line no-instanceof/no-instanceof
if (tool instanceof StructuredTool) {
return formatToOpenAIAssistantTool(tool);
}
return tool;
}) ?? [];
const oaiClient = client ?? new OpenAIClient(clientOptions);
const assistant = await oaiClient.beta.assistants.create({
name,
instructions,
tools: formattedTools,
model,
file_ids: fileIds,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any);
return new this({
client: oaiClient,
assistantId: assistant.id,
asAgent,
pollIntervalMs,
});
}
async invoke(
input: RunInput,
_options?: RunnableConfig
): Promise<ExtractRunOutput<AsAgent>> {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
let run: any;
if (this.asAgent && input.steps && input.steps.length > 0) {
const parsedStepsInput = await this._parseStepsInput(input);
run = await this.client.beta.threads.runs.submitToolOutputs(
parsedStepsInput.threadId,
parsedStepsInput.runId,
{
tool_outputs: parsedStepsInput.toolOutputs,
}
);
} else if (!("threadId" in input)) {
const thread = {
messages: [
{
role: "user",
content: input.content,
file_ids: input.fileIds,
metadata: input.messagesMetadata,
},
],
metadata: input.threadMetadata,
};
run = await this._createThreadAndRun({
...input,
thread,
});
} else if (!("runId" in input)) {
await this.client.beta.threads.messages.create(input.threadId, {
content: input.content,
role: "user",
file_ids: input.file_ids,
metadata: input.messagesMetadata,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any);
run = await this._createRun(input);
} else {
// Submitting tool outputs to an existing run, outside the AgentExecutor
// framework.
run = await this.client.beta.threads.runs.submitToolOutputs(
input.threadId,
input.runId,
{
tool_outputs: input.toolOutputs,
}
);
}
return this._getResponse(run.id, run.thread_id);
}
/**
* Delete an assistant.
*
* @link {https://platform.openai.com/docs/api-reference/assistants/deleteAssistant}
* @returns {Promise<AssistantDeleted>}
*/
public async deleteAssistant() {
return await this.client.beta.assistants.del(this.assistantId);
}
/**
* Retrieves an assistant.
*
* @link {https://platform.openai.com/docs/api-reference/assistants/getAssistant}
* @returns {Promise<OpenAIClient.Beta.Assistants.Assistant>}
*/
public async getAssistant() {
return await this.client.beta.assistants.retrieve(this.assistantId);
}
/**
* Modifies an assistant.
*
* @link {https://platform.openai.com/docs/api-reference/assistants/modifyAssistant}
* @returns {Promise<OpenAIClient.Beta.Assistants.Assistant>}
*/
public async modifyAssistant<AsAgent extends boolean>({
model,
name,
instructions,
fileIds,
}: Omit<OpenAIAssistantRunnableInput<AsAgent>, "assistantId" | "tools"> & {
model?: string;
name?: string;
instructions?: string;
fileIds?: string[];
}) {
return await this.client.beta.assistants.update(this.assistantId, {
name,
instructions,
model,
file_ids: fileIds,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any);
}
private async _parseStepsInput(input: RunInput): Promise<RunInput> {
const {
action: { runId, threadId },
} = input.steps[input.steps.length - 1];
const run = await this._waitForRun(runId, threadId);
const toolCalls = run.required_action?.submit_tool_outputs.tool_calls;
if (!toolCalls) {
return input;
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const toolOutputs = toolCalls.flatMap((toolCall: any) => {
const matchedAction = (
input.steps as {
action: OpenAIAssistantAction;
observation: string;
}[]
).find((step) => step.action.toolCallId === toolCall.id);
return matchedAction
? [
{
output: matchedAction.observation,
tool_call_id: matchedAction.action.toolCallId,
},
]
: [];
});
return { toolOutputs, runId, threadId } as unknown as RunInput;
}
private async _createRun({
instructions,
model,
tools,
metadata,
threadId,
}: RunInput) {
const run = this.client.beta.threads.runs.create(threadId, {
assistant_id: this.assistantId,
instructions,
model,
tools,
metadata,
});
return run;
}
private async _createThreadAndRun(input: RunInput) {
const params: Record<string, unknown> = [
"instructions",
"model",
"tools",
"run_metadata",
]
.filter((key) => key in input)
.reduce((obj, key) => {
const newObj = obj;
newObj[key] = input[key];
return newObj;
}, {} as Record<string, unknown>);
const run = this.client.beta.threads.createAndRun({
...params,
thread: input.thread,
assistant_id: this.assistantId,
});
return run;
}
private async _waitForRun(runId: string, threadId: string) {
let inProgress = true;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
let run = {} as any;
while (inProgress) {
run = await this.client.beta.threads.runs.retrieve(threadId, runId);
inProgress = ["in_progress", "queued"].includes(run.status);
if (inProgress) {
await sleep(this.pollIntervalMs);
}
}
return run;
}
private async _getResponse(
runId: string,
threadId: string
): Promise<ExtractRunOutput<AsAgent>>;
private async _getResponse(
runId: string,
threadId: string
): Promise<
| OpenAIAssistantFinish
| OpenAIAssistantAction[]
| ThreadMessage[]
| RequiredActionFunctionToolCall[]
> {
const run = await this._waitForRun(runId, threadId);
if (run.status === "completed") {
const messages = await this.client.beta.threads.messages.list(threadId, {
order: "desc",
});
const newMessages = messages.data.filter((msg) => msg.run_id === runId);
if (!this.asAgent) {
return newMessages;
}
const answer = newMessages.flatMap((msg) => msg.content);
if (answer.every((item) => item.type === "text")) {
const answerString = answer
.map((item) => item.type === "text" && item.text.value)
.join("\n");
return {
returnValues: {
output: answerString,
runId,
threadId,
},
log: "",
runId,
threadId,
};
}
} else if (run.status === "requires_action") {
if (!this.asAgent) {
return run.required_action?.submit_tool_outputs.tool_calls ?? [];
}
const actions: OpenAIAssistantAction[] = [];
run.required_action?.submit_tool_outputs.tool_calls.forEach(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(item: any) => {
const functionCall = item.function;
const args = JSON.parse(functionCall.arguments);
actions.push({
tool: functionCall.name,
toolInput: args,
toolCallId: item.id,
log: "",
runId,
threadId,
});
}
);
return actions;
}
const runInfo = JSON.stringify(run, null, 2);
throw new Error(
`Unexpected run status ${run.status}.\nFull run info:\n\n${runInfo}`
);
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/openai_assistant | lc_public_repos/langchainjs/langchain/src/experimental/openai_assistant/tests/openai_assistant.int.test.ts | /* eslint-disable no-process-env */
/* eslint-disable @typescript-eslint/no-non-null-assertion */
import { z } from "zod";
import { StructuredTool } from "@langchain/core/tools";
import { AgentExecutor } from "../../../agents/executor.js";
import { OpenAIAssistantRunnable } from "../index.js";
function getCurrentWeather(location: string, _unit = "fahrenheit") {
if (location.toLowerCase().includes("tokyo")) {
return JSON.stringify({ location, temperature: "10", unit: "celsius" });
} else if (location.toLowerCase().includes("san francisco")) {
return JSON.stringify({ location, temperature: "72", unit: "fahrenheit" });
} else {
return JSON.stringify({ location, temperature: "22", unit: "celsius" });
}
}
function convertWeatherToHumanReadable(location: string, temperature: string) {
if (temperature.length > 1) {
return JSON.stringify({ location, temperature, readable: "warm" });
}
return JSON.stringify({ location, temperature, readable: "cold" });
}
class WeatherTool extends StructuredTool {
schema = z.object({
location: z.string().describe("The city and state, e.g. San Francisco, CA"),
unit: z.enum(["celsius", "fahrenheit"]).optional(),
});
name = "get_current_weather";
description = "Get the current weather in a given location";
constructor() {
super(...arguments);
}
async _call(input: { location: string; unit: string }) {
const { location, unit } = input;
const result = getCurrentWeather(location, unit);
return result;
}
}
class HumanReadableChecker extends StructuredTool {
schema = z.object({
location: z.string().describe("The city and state, e.g. San Francisco, CA"),
temperature: z.string().describe("The temperature in degrees"),
});
name = "get_human_readable_weather";
description =
"Check whether or not the weather in a given location is warm or cold";
constructor() {
super(...arguments);
}
async _call(input: { location: string; temperature: string }) {
const { location, temperature } = input;
const result = convertWeatherToHumanReadable(location, temperature);
return result;
}
}
test.skip("New OpenAIAssistantRunnable can be passed as an agent", async () => {
const tools = [new WeatherTool(), new HumanReadableChecker()];
const agent = await OpenAIAssistantRunnable.createAssistant({
model: "gpt-3.5-turbo-1106",
instructions:
"You are a weather bot. Use the provided functions to answer questions.",
name: "Weather Assistant",
tools,
asAgent: true,
});
const agentExecutor = AgentExecutor.fromAgentAndTools({
agent,
tools,
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const assistantResponse = await agentExecutor.invoke({
content:
"What's the weather in San Francisco and Tokyo? And will it be warm or cold in those places?",
});
// console.log(assistantResponse);
/**
{
output: "The weather in San Francisco, CA is currently 72°F and it's warm. In Tokyo, Japan, the temperature is 10°C and it's also warm."
}
*/
});
test("OpenAIAssistantRunnable create and delete assistant", async () => {
const assistant = await OpenAIAssistantRunnable.createAssistant({
name: "Personal Assistant",
model: "gpt-4-1106-preview",
});
const deleteStatus = await assistant.deleteAssistant();
expect(deleteStatus).toEqual({
id: assistant.assistantId,
object: "assistant.deleted",
deleted: true,
});
// console.log(deleteStatus);
/**
{
id: 'asst_jwkJPzFkIL2ei9Kn1SZzmR6Y',
object: 'assistant.deleted',
deleted: true
}
*/
});
test("OpenAIAssistantRunnable create and modify assistant", async () => {
const assistant = await OpenAIAssistantRunnable.createAssistant({
name: "Personal Assistant",
model: "gpt-4-1106-preview",
});
const assistantResponse = await assistant.getAssistant();
expect(assistantResponse.name).toEqual("Personal Assistant");
const assistantResponseModified = await assistant.modifyAssistant({
name: "Personal Assistant 2",
});
expect(assistantResponseModified.name).toEqual("Personal Assistant 2");
expect(assistantResponseModified.model).toEqual("gpt-4-1106-preview");
});
test("OpenAIAssistantRunnable can be passed as an agent", async () => {
const tools = [new WeatherTool(), new HumanReadableChecker()];
const agent = new OpenAIAssistantRunnable({
assistantId: process.env.TEST_OPENAI_ASSISTANT_ID!,
asAgent: true,
});
const agentExecutor = AgentExecutor.fromAgentAndTools({
agent,
tools,
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const assistantResponse = await agentExecutor.invoke({
content:
"What's the weather in San Francisco and Tokyo? And will it be warm or cold in those places?",
});
// console.log(assistantResponse);
/**
{
output: "The weather in San Francisco, CA is currently 72°F and it's warm. In Tokyo, Japan, the temperature is 10°C and it's also warm."
}
*/
});
test.skip("Created OpenAIAssistantRunnable is invokeable", async () => {
const assistant = await OpenAIAssistantRunnable.createAssistant({
model: "gpt-4",
instructions:
"You are a helpful assistant that provides answers to math problems.",
name: "Math Assistant",
tools: [{ type: "code_interpreter" }],
});
const assistantResponse = await assistant.invoke({
content: "What's 10 - 4 raised to the 2.7",
});
// console.log(assistantResponse);
/**
[
{
id: 'msg_egqSo3AZTWJ0DAelzR6DdKbs',
object: 'thread.message',
created_at: 1699409656,
thread_id: 'thread_lAktOZkUetJ7Gl3hzMFdi42E',
role: 'assistant',
content: [ [Object] ],
file_ids: [],
assistant_id: 'asst_fPjLqVmN21EFGLNQb8iZckEy',
run_id: 'run_orPmWI9ri1HnqBXmX7LCWWax',
metadata: {}
}
]
*/
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const content = // eslint-disable-next-line @typescript-eslint/no-explicit-any
(assistantResponse as any[]).flatMap((res) => res.content);
// console.log(content);
/**
[
{
type: 'text',
text: {
value: '10 - 4 raised to the 2.7 is approximately -32.22.',
annotations: []
}
}
]
*/
});
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/babyagi/task_creation.ts | import { PromptTemplate } from "@langchain/core/prompts";
import { LLMChain, LLMChainInput } from "../../chains/llm_chain.js";
/** Chain to generate tasks. */
export class TaskCreationChain extends LLMChain {
static lc_name() {
return "TaskCreationChain";
}
/**
* Creates a new TaskCreationChain instance. It takes an object of type
* LLMChainInput as input, omitting the 'prompt' field. It uses the
* PromptTemplate class to create a new prompt based on the task creation
* template and the input variables. The new TaskCreationChain instance is
* then created with this prompt and the remaining fields from the input
* object.
* @param fields An object of type LLMChainInput, omitting the 'prompt' field.
* @returns A new instance of TaskCreationChain.
*/
static fromLLM(fields: Omit<LLMChainInput, "prompt">): LLMChain {
const taskCreationTemplate =
`You are an task creation AI that uses the result of an execution agent` +
` to create new tasks with the following objective: {objective},` +
` The last completed task has the result: {result}.` +
` This result was based on this task description: {task_description}.` +
` These are incomplete tasks: {incomplete_tasks}.` +
` Based on the result, create new tasks to be completed` +
` by the AI system that do not overlap with incomplete tasks.` +
` Return the tasks as an array.`;
const prompt = new PromptTemplate({
template: taskCreationTemplate,
inputVariables: [
"result",
"task_description",
"incomplete_tasks",
"objective",
],
});
return new TaskCreationChain({ prompt, ...fields });
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/babyagi/agent.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import type { VectorStoreInterface } from "@langchain/core/vectorstores";
import { Document } from "@langchain/core/documents";
import { ChainValues } from "@langchain/core/utils/types";
import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager";
import { BaseChain, ChainInputs } from "../../chains/base.js";
import { SerializedBaseChain } from "../../chains/serde.js";
import { Optional } from "../../types/type-utils.js";
import { TaskCreationChain } from "./task_creation.js";
import { TaskExecutionChain } from "./task_execution.js";
import { TaskPrioritizationChain } from "./task_prioritization.js";
/**
* Interface defining the structure of a task. A task has a `taskID` and a
* `taskName`.
*/
export interface Task {
taskID: string;
taskName: string;
}
/**
* Interface defining the structure of the inputs for the `BabyAGI` class.
* It extends the `ChainInputs` interface, omitting the 'memory' and
* 'callbackManager' properties, and adds properties specific to
* `BabyAGI`.
*/
export interface BabyAGIInputs
extends Omit<ChainInputs, "memory" | "callbackManager"> {
creationChain: BaseChain;
prioritizationChain: BaseChain;
executionChain: BaseChain;
vectorstore: VectorStoreInterface;
maxIterations?: number;
}
/**
* Class responsible for managing tasks, including their creation,
* prioritization, and execution. It uses three chains for these
* operations: `creationChain`, `prioritizationChain`, and
* `executionChain`.
* @example
* ```typescript
* const babyAGI = BabyAGI.fromLLM({
* llm: new OpenAI({ temperature: 0 }),
* vectorstore: new MemoryVectorStore(new OpenAIEmbeddings()),
* maxIterations: 3,
* });
*
* const result = await babyAGI.call({
* objective: "Write a weather report for SF today",
* });
* ```
*/
export class BabyAGI extends BaseChain implements BabyAGIInputs {
static lc_name() {
return "BabyAGI";
}
taskList: Task[];
creationChain: BaseChain;
prioritizationChain: BaseChain;
executionChain: BaseChain;
taskIDCounter: number;
vectorstore: VectorStoreInterface;
maxIterations: number;
constructor({
creationChain,
prioritizationChain,
executionChain,
vectorstore,
maxIterations = 100,
verbose,
callbacks,
}: BabyAGIInputs) {
super(undefined, verbose, callbacks);
this.taskList = [];
this.creationChain = creationChain;
this.prioritizationChain = prioritizationChain;
this.executionChain = executionChain;
this.taskIDCounter = 1;
this.vectorstore = vectorstore;
this.maxIterations = maxIterations;
}
_chainType() {
return "BabyAGI" as const;
}
get inputKeys() {
return ["objective", "firstTask"];
}
get outputKeys() {
return [];
}
/**
* Adds a task to the task list.
* @param task The task to be added.
* @returns Promise resolving to void.
*/
async addTask(task: Task) {
this.taskList.push(task);
}
/**
* Prints the current task list to the console.
* @returns void
*/
printTaskList() {
console.log("\x1b[95m\x1b[1m\n*****TASK LIST*****\n\x1b[0m\x1b[0m");
for (const t of this.taskList) {
console.log(`${t.taskID}: ${t.taskName}`);
}
}
/**
* Prints the next task to the console.
* @param task The next task to be printed.
* @returns void
*/
printNextTask(task: Task) {
console.log("\x1b[92m\x1b[1m\n*****NEXT TASK*****\n\x1b[0m\x1b[0m");
console.log(`${task.taskID}: ${task.taskName}`);
}
/**
* Prints the result of a task to the console.
* @param result The result of the task.
* @returns void
*/
printTaskResult(result: string) {
console.log("\x1b[93m\x1b[1m\n*****TASK RESULT*****\n\x1b[0m\x1b[0m");
console.log(result.trim());
}
/**
* Generates the next tasks based on the result of the previous task, the
* task description, and the objective.
* @param result The result of the previous task.
* @param task_description The description of the task.
* @param objective The objective of the task.
* @param runManager Optional CallbackManagerForChainRun instance.
* @returns Promise resolving to an array of tasks without taskID.
*/
async getNextTasks(
result: string,
task_description: string,
objective: string,
runManager?: CallbackManagerForChainRun
): Promise<Optional<Task, "taskID">[]> {
const taskNames = this.taskList.map((t) => t.taskName);
const incomplete_tasks = taskNames.join(", ");
const { [this.creationChain.outputKeys[0]]: text } =
await this.creationChain.call(
{
result,
task_description,
incomplete_tasks,
objective,
},
runManager?.getChild()
);
const newTasks = (text as string).split("\n");
return newTasks
.filter((taskName) => taskName.trim())
.map((taskName) => ({ taskName }));
}
/**
* Prioritizes the tasks based on the current task ID and the objective.
* @param thisTaskID The ID of the current task.
* @param objective The objective of the task.
* @param runManager Optional CallbackManagerForChainRun instance.
* @returns Promise resolving to an array of prioritized tasks.
*/
async prioritizeTasks(
thisTaskID: number,
objective: string,
runManager?: CallbackManagerForChainRun
) {
const taskNames = this.taskList.map((t) => t.taskName);
const nextTaskID = thisTaskID + 1;
const { [this.prioritizationChain.outputKeys[0]]: text } =
await this.prioritizationChain.call(
{
task_names: taskNames.join(", "),
next_task_id: String(nextTaskID),
objective,
},
runManager?.getChild()
);
const newTasks = (text as string).trim().split("\n");
const prioritizedTaskList = [];
for (const taskString of newTasks) {
const taskParts = taskString.trim().split(".", 2);
if (taskParts.length === 2) {
const taskID = taskParts[0].trim();
const taskName = taskParts[1].trim();
prioritizedTaskList.push({ taskID, taskName });
}
}
return prioritizedTaskList;
}
/**
* Retrieves the top tasks that are most similar to the given query.
* @param query The query to search for.
* @param k The number of top tasks to retrieve.
* @returns Promise resolving to an array of top tasks.
*/
async getTopTasks(query: string, k = 5) {
const results = await this.vectorstore.similaritySearch(query, k);
if (!results) {
return [];
}
return results.map((item) => String(item.metadata.task));
}
/**
* Executes a task based on the objective and the task description.
* @param objective The objective of the task.
* @param task The task to be executed.
* @param runManager Optional CallbackManagerForChainRun instance.
* @returns Promise resolving to the result of the task execution as a string.
*/
async executeTask(
objective: string,
task: string,
runManager?: CallbackManagerForChainRun
) {
const context = await this.getTopTasks(objective);
const { [this.executionChain.outputKeys[0]]: text } =
await this.executionChain.call(
{
objective,
context: context.join("\n"),
task,
},
runManager?.getChild()
);
return text as string;
}
async _call(
{ objective, firstTask = "Make a todo list" }: ChainValues,
runManager?: CallbackManagerForChainRun
) {
this.taskList = [];
this.taskIDCounter = 1;
await this.addTask({ taskID: "1", taskName: firstTask });
let numIters = 0;
while (numIters < this.maxIterations && this.taskList.length > 0) {
this.printTaskList();
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
const task = this.taskList.shift()!;
this.printNextTask(task);
const result = await this.executeTask(
objective,
task.taskName,
runManager
);
const thisTaskID = parseInt(task.taskID, 10);
this.printTaskResult(result);
await this.vectorstore.addDocuments([
new Document({
pageContent: result,
metadata: { task: task.taskName },
}),
]);
const newTasks = await this.getNextTasks(
result,
task.taskName,
objective,
runManager
);
for (const newTask of newTasks) {
this.taskIDCounter += 1;
newTask.taskID = this.taskIDCounter.toFixed();
await this.addTask(newTask as Task);
}
this.taskList = await this.prioritizeTasks(
thisTaskID,
objective,
runManager
);
numIters += 1;
}
return {};
}
serialize(): SerializedBaseChain {
throw new Error("Method not implemented.");
}
/**
* Static method to create a new BabyAGI instance from a
* BaseLanguageModel.
* @param llm BaseLanguageModel instance used to generate a new BabyAGI instance.
* @param vectorstore VectorStore instance used to store and retrieve vectors.
* @param executionChain Optional BaseChain instance used to execute tasks.
* @param verbose Optional boolean indicating whether to log verbose output.
* @param callbacks Optional callbacks to be used during the execution of tasks.
* @param rest Optional additional parameters.
* @returns A new instance of BabyAGI.
*/
static fromLLM({
llm,
vectorstore,
executionChain,
verbose,
callbacks,
...rest
}: Optional<
BabyAGIInputs,
"executionChain" | "creationChain" | "prioritizationChain"
> & { llm: BaseLanguageModelInterface }) {
const creationChain = TaskCreationChain.fromLLM({
llm,
verbose,
callbacks,
});
const prioritizationChain = TaskPrioritizationChain.fromLLM({
llm,
verbose,
callbacks,
});
return new BabyAGI({
creationChain,
prioritizationChain,
executionChain:
executionChain ||
TaskExecutionChain.fromLLM({ llm, verbose, callbacks }),
vectorstore,
verbose,
callbacks,
...rest,
});
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/babyagi/task_execution.ts | import { PromptTemplate } from "@langchain/core/prompts";
import { LLMChain, LLMChainInput } from "../../chains/llm_chain.js";
/** Chain to execute tasks. */
export class TaskExecutionChain extends LLMChain {
static lc_name() {
return "TaskExecutionChain";
}
/**
* A static factory method that creates an instance of TaskExecutionChain.
* It constructs a prompt template for task execution, which is then used
* to create a new instance of TaskExecutionChain. The prompt template
* instructs an AI to perform a task based on a given objective, taking
* into account previously completed tasks.
* @param fields An object of type LLMChainInput, excluding the "prompt" field.
* @returns An instance of LLMChain.
*/
static fromLLM(fields: Omit<LLMChainInput, "prompt">): LLMChain {
const executionTemplate =
`You are an AI who performs one task based on the following objective: ` +
`{objective}.` +
`Take into account these previously completed tasks: {context}.` +
` Your task: {task}. Response:`;
const prompt = new PromptTemplate({
template: executionTemplate,
inputVariables: ["objective", "context", "task"],
});
return new TaskExecutionChain({ prompt, ...fields });
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/babyagi/task_prioritization.ts | import { PromptTemplate } from "@langchain/core/prompts";
import { LLMChain, LLMChainInput } from "../../chains/llm_chain.js";
/** Chain to prioritize tasks. */
export class TaskPrioritizationChain extends LLMChain {
static lc_name() {
return "TaskPrioritizationChain";
}
/**
* Static method to create a new TaskPrioritizationChain from a
* BaseLanguageModel. It generates a prompt using the PromptTemplate class
* and the task prioritization template, and returns a new instance of
* TaskPrioritizationChain.
* @param fields Object with fields used to initialize the chain, excluding the prompt.
* @returns A new instance of TaskPrioritizationChain.
*/
static fromLLM(fields: Omit<LLMChainInput, "prompt">): LLMChain {
const taskPrioritizationTemplate =
`You are a task prioritization AI tasked with cleaning the formatting of ` +
`and reprioritizing the following tasks: {task_names}.` +
` Consider the ultimate objective of your team: {objective}.` +
` Do not remove any tasks. Return the result as a numbered list, like:` +
` #. First task` +
` #. Second task` +
` Start the task list with number {next_task_id}.`;
const prompt = new PromptTemplate({
template: taskPrioritizationTemplate,
inputVariables: ["task_names", "next_task_id", "objective"],
});
return new TaskPrioritizationChain({ prompt, ...fields });
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/babyagi/index.ts | export { TaskCreationChain } from "./task_creation.js";
export { TaskExecutionChain } from "./task_execution.js";
export { TaskPrioritizationChain } from "./task_prioritization.js";
export { BabyAGI, type Task, type BabyAGIInputs } from "./agent.js";
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/plan_and_execute/outputParser.ts | import { BaseOutputParser } from "@langchain/core/output_parsers";
import { Plan } from "./base.js";
import { PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE } from "./prompt.js";
/**
* Specific implementation of the `BaseOutputParser` class designed to
* parse the output text into a `Plan` object.
*/
export class PlanOutputParser extends BaseOutputParser<Plan> {
lc_namespace = ["langchain", "experimental", "plan_and_execute"];
/**
* Parses the output text into a `Plan` object. The steps are extracted by
* splitting the text on newline followed by a number and a period,
* indicating the start of a new step. The `<END_OF_PLAN>` tag is then
* removed from each step.
* @param text The output text to be parsed.
* @returns A `Plan` object consisting of a series of steps.
*/
async parse(text: string): Promise<Plan> {
return {
steps: text
.split(/\n\d+\.\s?/)
.slice(1)
.map((step) => ({ text: step.replace(`<END_OF_PLAN>`, "") })),
};
}
/**
* Returns a string that represents the format instructions for the plan.
* This is defined by the `PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE`
* constant.
* @returns A string representing the format instructions for the plan.
*/
getFormatInstructions(): string {
return PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE;
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/plan_and_execute/agent_executor.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import { ChainValues } from "@langchain/core/utils/types";
import { Tool, DynamicStructuredTool } from "@langchain/core/tools";
import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager";
import { BaseChain, ChainInputs } from "../../chains/base.js";
import {
BasePlanner,
BaseStepContainer,
BaseStepExecutor,
ListStepContainer,
LLMPlanner,
ChainStepExecutor,
} from "./base.js";
import { AgentExecutor } from "../../agents/executor.js";
import {
DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE,
getPlannerChatPrompt,
} from "./prompt.js";
import { LLMChain } from "../../chains/llm_chain.js";
import { PlanOutputParser } from "./outputParser.js";
import { ChatAgent } from "../../agents/chat/index.js";
import { StructuredChatAgent } from "../../agents/index.js";
import { SerializedLLMChain } from "../../chains/serde.js";
/**
* A utility function to distiguish a dynamicstructuredtool over other tools.
* @param tool the tool to test
* @returns bool
*/
export function isDynamicStructuredTool(
tool: Tool | DynamicStructuredTool
): tool is DynamicStructuredTool {
// We check for the existence of the static lc_name method in the object's constructor
return (
// eslint-disable-next-line @typescript-eslint/no-explicit-any
typeof (tool.constructor as any).lc_name === "function" &&
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(tool.constructor as any).lc_name() === "DynamicStructuredTool"
);
}
/**
* Interface for the input to the PlanAndExecuteAgentExecutor class. It
* extends ChainInputs and includes additional properties for the planner,
* step executor, step container, and input and output keys.
*/
export interface PlanAndExecuteAgentExecutorInput extends ChainInputs {
planner: BasePlanner;
stepExecutor: BaseStepExecutor;
stepContainer?: BaseStepContainer;
inputKey?: string;
outputKey?: string;
}
/**
* Class representing a plan-and-execute agent executor. This agent
* decides on the full sequence of actions upfront, then executes them all
* without updating the plan. This is suitable for complex or long-running
* tasks that require maintaining long-term objectives and focus.
*/
export class PlanAndExecuteAgentExecutor extends BaseChain {
static lc_name() {
return "PlanAndExecuteAgentExecutor";
}
private planner: BasePlanner;
private stepExecutor: BaseStepExecutor;
private stepContainer: BaseStepContainer = new ListStepContainer();
private inputKey = "input";
private outputKey = "output";
constructor(input: PlanAndExecuteAgentExecutorInput) {
super(input);
this.planner = input.planner;
this.stepExecutor = input.stepExecutor;
this.stepContainer = input.stepContainer ?? this.stepContainer;
this.inputKey = input.inputKey ?? this.inputKey;
this.outputKey = input.outputKey ?? this.outputKey;
}
get inputKeys() {
return [this.inputKey];
}
get outputKeys() {
return [this.outputKey];
}
/**
* Static method that returns a default planner for the agent. It creates
* a new LLMChain with a given LLM and a fixed prompt, and uses it to
* create a new LLMPlanner with a PlanOutputParser.
* @param llm The Large Language Model (LLM) used to generate responses.
* @returns A new LLMPlanner instance.
*/
static async getDefaultPlanner({
llm,
tools,
}: {
llm: BaseLanguageModelInterface;
tools: Tool[] | DynamicStructuredTool[];
}) {
const plannerLlmChain = new LLMChain({
llm,
prompt: await getPlannerChatPrompt(tools),
});
return new LLMPlanner(plannerLlmChain, new PlanOutputParser());
}
/**
* Static method that returns a default step executor for the agent. It
* creates a new ChatAgent from a given LLM and a set of tools, and uses
* it to create a new ChainStepExecutor.
* @param llm The Large Language Model (LLM) used to generate responses.
* @param tools The set of tools used by the agent.
* @param humanMessageTemplate The template for human messages. If not provided, a default template is used.
* @returns A new ChainStepExecutor instance.
*/
static getDefaultStepExecutor({
llm,
tools,
humanMessageTemplate = DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE,
}: {
llm: BaseLanguageModelInterface;
tools: Tool[] | DynamicStructuredTool[];
humanMessageTemplate?: string;
}) {
let agent;
if (tools.length > 0 && isDynamicStructuredTool(tools[0])) {
agent = StructuredChatAgent.fromLLMAndTools(llm, tools, {
humanMessageTemplate,
inputVariables: ["previous_steps", "current_step", "agent_scratchpad"],
});
return new ChainStepExecutor(
AgentExecutor.fromAgentAndTools({
agent,
tools,
})
);
}
agent = ChatAgent.fromLLMAndTools(llm, tools as Tool[], {
humanMessageTemplate,
});
return new ChainStepExecutor(
AgentExecutor.fromAgentAndTools({
agent,
tools,
})
);
}
/**
* Static method that creates a new PlanAndExecuteAgentExecutor from a
* given LLM, a set of tools, and optionally a human message template. It
* uses the getDefaultPlanner and getDefaultStepExecutor methods to create
* the planner and step executor for the new agent executor.
* @param llm The Large Language Model (LLM) used to generate responses.
* @param tools The set of tools used by the agent.
* @param humanMessageTemplate The template for human messages. If not provided, a default template is used.
* @returns A new PlanAndExecuteAgentExecutor instance.
*/
static async fromLLMAndTools({
llm,
tools,
humanMessageTemplate,
}: {
llm: BaseLanguageModelInterface;
tools: Tool[] | DynamicStructuredTool[];
humanMessageTemplate?: string;
} & Omit<PlanAndExecuteAgentExecutorInput, "planner" | "stepExecutor">) {
const executor = new PlanAndExecuteAgentExecutor({
planner: await PlanAndExecuteAgentExecutor.getDefaultPlanner({
llm,
tools,
}),
stepExecutor: PlanAndExecuteAgentExecutor.getDefaultStepExecutor({
llm,
tools,
humanMessageTemplate,
}),
});
return executor;
}
/** @ignore */
async _call(
inputs: ChainValues,
runManager?: CallbackManagerForChainRun
): Promise<ChainValues> {
const plan = await this.planner.plan(inputs.input, runManager?.getChild());
if (!plan.steps?.length) {
throw new Error(
"Could not create and parse a plan to answer your question - please try again."
);
}
plan.steps[
plan.steps.length - 1
].text += ` The original question was: ${inputs.input}.`;
for (const step of plan.steps) {
const newInputs = {
...inputs,
previous_steps: JSON.stringify(this.stepContainer.getSteps()),
current_step: step.text,
};
const response = await this.stepExecutor.step(
newInputs,
runManager?.getChild()
);
this.stepContainer.addStep(step, response);
}
return { [this.outputKey]: this.stepContainer.getFinalResponse() };
}
_chainType() {
return "agent_executor" as const;
}
serialize(): SerializedLLMChain {
throw new Error("Cannot serialize an AgentExecutor");
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/plan_and_execute/index.ts | export { PlanAndExecuteAgentExecutor } from "./agent_executor.js";
export {
BasePlanner,
BaseStepContainer,
BaseStepExecutor,
type StepAction,
type StepResult,
type Step,
type Plan,
ListStepContainer,
LLMPlanner,
ChainStepExecutor,
} from "./base.js";
export { PlanOutputParser } from "./outputParser.js";
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/plan_and_execute/base.ts | import { BaseOutputParser } from "@langchain/core/output_parsers";
import { ChainValues } from "@langchain/core/utils/types";
import { CallbackManager } from "@langchain/core/callbacks/manager";
import { BaseChain } from "../../chains/base.js";
import { LLMChain } from "../../chains/llm_chain.js";
/**
* Represents an action to be performed in a step.
*/
export type StepAction = {
text: string;
};
/**
* Represents the result of a step.
*/
export type StepResult = {
response: string;
};
/**
* Represents a step, which includes an action and its result.
*/
export type Step = {
action: StepAction;
result: StepResult;
};
/**
* Represents a plan, which is a sequence of step actions.
*/
export type Plan = {
steps: StepAction[];
};
/**
* Abstract class that defines the structure for a planner. Planners are
* responsible for generating a plan based on inputs.
*/
export abstract class BasePlanner {
abstract plan(
inputs: ChainValues,
runManager?: CallbackManager
): Promise<Plan>;
}
/**
* Abstract class that defines the structure for a step executor. Step
* executors are responsible for executing a step based on inputs.
*/
export abstract class BaseStepExecutor {
abstract step(
inputs: ChainValues,
runManager?: CallbackManager
): Promise<StepResult>;
}
/**
* Abstract class that defines the structure for a step container. Step
* containers are responsible for managing steps.
*/
export abstract class BaseStepContainer {
abstract addStep(action: StepAction, result: StepResult): void;
abstract getSteps(): Step[];
abstract getFinalResponse(): string;
}
/**
* Class that extends BaseStepContainer and provides an implementation for
* its methods. It maintains a list of steps and provides methods to add a
* step, get all steps, and get the final response.
*/
export class ListStepContainer extends BaseStepContainer {
private steps: Step[] = [];
addStep(action: StepAction, result: StepResult) {
this.steps.push({ action, result });
}
getSteps() {
return this.steps;
}
getFinalResponse(): string {
return this.steps[this.steps.length - 1]?.result?.response;
}
}
/**
* Class that extends BasePlanner and provides an implementation for the
* plan method. It uses an instance of LLMChain and an output parser to
* generate a plan.
*/
export class LLMPlanner extends BasePlanner {
constructor(
private llmChain: LLMChain,
private outputParser: BaseOutputParser<Plan>
) {
super();
}
async plan(inputs: ChainValues, runManager?: CallbackManager): Promise<Plan> {
const output = await this.llmChain.run(inputs, runManager);
return this.outputParser.parse(output);
}
}
/**
* Class that extends BaseStepExecutor and provides an implementation for
* the step method. It uses an instance of BaseChain to execute a step.
*/
export class ChainStepExecutor extends BaseStepExecutor {
constructor(private chain: BaseChain) {
super();
}
async step(
inputs: ChainValues,
runManager?: CallbackManager
): Promise<StepResult> {
const chainResponse = await this.chain.call(inputs, runManager);
return { response: chainResponse.output };
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/plan_and_execute/prompt.ts | import {
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
} from "@langchain/core/prompts";
import { Tool, DynamicStructuredTool } from "@langchain/core/tools";
export const PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE = [
`Let's first understand the problem and devise a plan to solve the problem.`,
`Please output the plan starting with the header "Plan:"`,
`followed by a numbered list of steps.`,
`Please make the plan the minimum number of steps required`,
`to answer the query or complete the task accurately and precisely.`,
`You have a set of tools at your disposal to help you with this task:`,
"",
"{toolStrings}",
"",
`You must consider these tools when coming up with your plan.`,
`If the task is a question, the final step in the plan must be the following: "Given the above steps taken,`,
`please respond to the original query."`,
`At the end of your plan, say "<END_OF_PLAN>"`,
].join(" ");
export const DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE = `Previous steps: {previous_steps}
Current objective: {current_step}
{agent_scratchpad}
You may extract and combine relevant data from your previous steps when responding to me.`;
/**
* Add the tool descriptions to the planning system prompt in
* order to get a better suited plan that makes efficient use
* of the tools
* @param tools the tools available to the `planner`
* @returns
*/
export const getPlannerChatPrompt = async (
tools: Tool[] | DynamicStructuredTool[]
) => {
const toolStrings = tools
.map((tool) => `${tool.name}: ${tool.description}`)
.join("\n");
return /* #__PURE__ */ ChatPromptTemplate.fromMessages([
SystemMessagePromptTemplate.fromTemplate(
PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE
),
HumanMessagePromptTemplate.fromTemplate(`{input}`),
]).partial({ toolStrings });
};
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/plan_and_execute | lc_public_repos/langchainjs/langchain/src/experimental/plan_and_execute/tests/plan_and_execute.int.test.ts | /* eslint-disable no-process-env */
import { ChatOpenAI } from "@langchain/openai";
import { SerpAPI } from "../../../util/testing/tools/serpapi.js";
import { Calculator } from "../../../util/testing/tools/calculator.js";
import { PlanAndExecuteAgentExecutor } from "../agent_executor.js";
test.skip("Run agent on a simple input", async () => {
const tools = [new Calculator(), new SerpAPI()];
const model = new ChatOpenAI({
temperature: 0,
modelName: "gpt-3.5-turbo",
verbose: true,
});
const executor = await PlanAndExecuteAgentExecutor.fromLLMAndTools({
llm: model,
tools,
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await executor.call({
input: `What is 80 raised to the second power?`,
});
// console.log({ result });
});
test.skip("Run agent", async () => {
const tools = [new Calculator(), new SerpAPI()];
const model = new ChatOpenAI({
temperature: 0,
modelName: "gpt-3.5-turbo",
verbose: true,
});
const executor = await PlanAndExecuteAgentExecutor.fromLLMAndTools({
llm: model,
tools,
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await executor.call({
input: `Who is the current president of the United States? What is their current age raised to the second power?`,
});
// console.log({ result });
});
// TODO: Improve prompt to store compressed context to support this input
test.skip("Run agent with a sequential math problem", async () => {
const tools = [new Calculator()];
const model = new ChatOpenAI({
temperature: 0,
modelName: "gpt-3.5-turbo",
verbose: true,
});
const executor = await PlanAndExecuteAgentExecutor.fromLLMAndTools({
llm: model,
tools,
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await executor.call({
input: `In a dance class of 20 students, 20% enrolled in contemporary dance, 25% of the remaining enrolled in jazz dance, and the rest enrolled in hip-hop dance. What percentage of the entire students enrolled in hip-hop dance?`,
});
// console.log(result);
});
test.skip("Should run agent with no tools", async () => {
const model = new ChatOpenAI({
temperature: 0,
modelName: "gpt-3.5-turbo",
verbose: true,
});
const executor = await PlanAndExecuteAgentExecutor.fromLLMAndTools({
llm: model,
tools: [],
});
await executor.call({
input: `Who is the current president of the United States? What is their current age raised to the second power?`,
});
});
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/generative_agents/generative_agent_memory.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import { PromptTemplate } from "@langchain/core/prompts";
import { Document } from "@langchain/core/documents";
import { ChainValues } from "@langchain/core/utils/types";
import { BaseMemory, InputValues, OutputValues } from "@langchain/core/memory";
import {
CallbackManagerForChainRun,
Callbacks,
} from "@langchain/core/callbacks/manager";
import { TimeWeightedVectorStoreRetriever } from "../../retrievers/time_weighted.js";
import { BaseChain } from "../../chains/base.js";
import { LLMChain } from "../../chains/llm_chain.js";
export type GenerativeAgentMemoryConfig = {
reflectionThreshold?: number;
importanceWeight?: number;
verbose?: boolean;
maxTokensLimit?: number;
};
/**
* Class that manages the memory of a generative agent in LangChain. It
* extends the `BaseChain` class and has methods for adding observations
* or memories to the agent's memory, scoring the importance of a memory,
* reflecting on recent events to add synthesized memories, and generating
* insights on a topic of reflection based on pertinent memories.
*/
class GenerativeAgentMemoryChain extends BaseChain {
static lc_name() {
return "GenerativeAgentMemoryChain";
}
reflecting = false;
reflectionThreshold?: number;
importanceWeight = 0.15;
memoryRetriever: TimeWeightedVectorStoreRetriever;
llm: BaseLanguageModelInterface;
verbose = false;
private aggregateImportance = 0.0;
constructor(
llm: BaseLanguageModelInterface,
memoryRetriever: TimeWeightedVectorStoreRetriever,
config: Omit<GenerativeAgentMemoryConfig, "maxTokensLimit">
) {
super();
this.llm = llm;
this.memoryRetriever = memoryRetriever;
this.reflectionThreshold = config.reflectionThreshold;
this.importanceWeight = config.importanceWeight ?? this.importanceWeight;
this.verbose = config.verbose ?? this.verbose;
}
_chainType(): string {
return "generative_agent_memory";
}
get inputKeys(): string[] {
return ["memory_content", "now", "memory_metadata"];
}
get outputKeys(): string[] {
return ["output"];
}
/**
* Method that creates a new LLMChain with the given prompt.
* @param prompt The PromptTemplate to use for the new LLMChain.
* @returns A new LLMChain instance.
*/
chain(prompt: PromptTemplate): LLMChain {
const chain = new LLMChain({
llm: this.llm,
prompt,
verbose: this.verbose,
outputKey: "output",
});
return chain;
}
async _call(values: ChainValues, runManager?: CallbackManagerForChainRun) {
const { memory_content: memoryContent, now } = values;
// add an observation or memory to the agent's memory
const importanceScore = await this.scoreMemoryImportance(
memoryContent,
runManager
);
this.aggregateImportance += importanceScore;
const document = new Document({
pageContent: memoryContent,
metadata: {
importance: importanceScore,
...values.memory_metadata,
},
});
await this.memoryRetriever.addDocuments([document]);
// after an agent has processed a certain amount of memories (as measured by aggregate importance),
// it is time to pause and reflect on recent events to add more synthesized memories to the agent's
// memory stream.
if (
this.reflectionThreshold !== undefined &&
this.aggregateImportance > this.reflectionThreshold &&
!this.reflecting
) {
console.log("Reflecting on current memories...");
this.reflecting = true;
await this.pauseToReflect(now, runManager);
this.aggregateImportance = 0.0;
this.reflecting = false;
}
return { output: importanceScore };
}
/**
* Method that pauses the agent to reflect on recent events and generate
* new insights.
* @param now The current date.
* @param runManager The CallbackManagerForChainRun to use for the reflection.
* @returns An array of new insights as strings.
*/
async pauseToReflect(
now?: Date,
runManager?: CallbackManagerForChainRun
): Promise<string[]> {
if (this.verbose) {
console.log("Pausing to reflect...");
}
const newInsights: string[] = [];
const topics = await this.getTopicsOfReflection(50, runManager);
for (const topic of topics) {
const insights = await this.getInsightsOnTopic(topic, now, runManager);
for (const insight of insights) {
// add memory
await this.call(
{
memory_content: insight,
now,
memory_metadata: {
source: "reflection_insight",
},
},
runManager?.getChild("reflection_insight_memory")
);
}
newInsights.push(...insights);
}
return newInsights;
}
/**
* Method that scores the importance of a given memory.
* @param memoryContent The content of the memory to score.
* @param runManager The CallbackManagerForChainRun to use for scoring.
* @returns The importance score of the memory as a number.
*/
async scoreMemoryImportance(
memoryContent: string,
runManager?: CallbackManagerForChainRun
): Promise<number> {
// score the absolute importance of a given memory
const prompt = PromptTemplate.fromTemplate(
"On the scale of 1 to 10, where 1 is purely mundane" +
" (e.g., brushing teeth, making bed) and 10 is" +
" extremely poignant (e.g., a break up, college" +
" acceptance), rate the likely poignancy of the" +
" following piece of memory. Respond with a single integer." +
"\nMemory: {memory_content}" +
"\nRating: "
);
const score = await this.chain(prompt).run(
memoryContent,
runManager?.getChild("determine_importance")
);
const strippedScore = score.trim();
if (this.verbose) {
console.log("Importance score:", strippedScore);
}
const match = strippedScore.match(/^\D*(\d+)/);
if (match) {
const capturedNumber = parseFloat(match[1]);
const result = (capturedNumber / 10) * this.importanceWeight;
return result;
} else {
return 0.0;
}
}
/**
* Method that retrieves the topics of reflection based on the last K
* memories.
* @param lastK The number of most recent memories to consider for generating topics.
* @param runManager The CallbackManagerForChainRun to use for retrieving topics.
* @returns An array of topics of reflection as strings.
*/
async getTopicsOfReflection(
lastK: number,
runManager?: CallbackManagerForChainRun
): Promise<string[]> {
const prompt = PromptTemplate.fromTemplate(
"{observations}\n\n" +
"Given only the information above, what are the 3 most salient" +
" high-level questions we can answer about the subjects in" +
" the statements? Provide each question on a new line.\n\n"
);
const observations = this.memoryRetriever.getMemoryStream().slice(-lastK);
const observationStr = observations
.map((o: { pageContent: string }) => o.pageContent)
.join("\n");
const result = await this.chain(prompt).run(
observationStr,
runManager?.getChild("reflection_topics")
);
return GenerativeAgentMemoryChain.parseList(result);
}
/**
* Method that generates insights on a given topic of reflection based on
* pertinent memories.
* @param topic The topic of reflection.
* @param now The current date.
* @param runManager The CallbackManagerForChainRun to use for generating insights.
* @returns An array of insights as strings.
*/
async getInsightsOnTopic(
topic: string,
now?: Date,
runManager?: CallbackManagerForChainRun
): Promise<string[]> {
// generate insights on a topic of reflection, based on pertinent memories
const prompt = PromptTemplate.fromTemplate(
"Statements about {topic}\n" +
"{related_statements}\n\n" +
"What 5 high-level insights can you infer from the above statements?" +
" (example format: insight (because of 1, 5, 3))"
);
const relatedMemories = await this.fetchMemories(topic, now, runManager);
const relatedStatements: string = relatedMemories
.map((memory, index) => `${index + 1}. ${memory.pageContent}`)
.join("\n");
const result = await this.chain(prompt).call(
{
topic,
related_statements: relatedStatements,
},
runManager?.getChild("reflection_insights")
);
return GenerativeAgentMemoryChain.parseList(result.output); // added output
}
/**
* Method that parses a newline-separated string into a list of strings.
* @param text The newline-separated string to parse.
* @returns An array of strings.
*/
static parseList(text: string): string[] {
// parse a newine seperates string into a list of strings
return text.split("\n").map((s) => s.trim());
}
// TODO: Mock "now" to simulate different times
/**
* Method that fetches memories related to a given observation.
* @param observation The observation to fetch memories for.
* @param _now The current date.
* @param runManager The CallbackManagerForChainRun to use for fetching memories.
* @returns An array of Document instances representing the fetched memories.
*/
async fetchMemories(
observation: string,
_now?: Date,
runManager?: CallbackManagerForChainRun
): Promise<Document[]> {
return this.memoryRetriever.getRelevantDocuments(
observation,
runManager?.getChild("memory_retriever")
);
}
}
/**
* Class that manages the memory of a generative agent in LangChain. It
* extends the `BaseMemory` class and has methods for adding a memory,
* formatting memories, getting memories until a token limit is reached,
* loading memory variables, saving the context of a model run to memory,
* and clearing memory contents.
* @example
* ```typescript
* const createNewMemoryRetriever = async () => {
* const vectorStore = new MemoryVectorStore(new OpenAIEmbeddings());
* const retriever = new TimeWeightedVectorStoreRetriever({
* vectorStore,
* otherScoreKeys: ["importance"],
* k: 15,
* });
* return retriever;
* };
* const tommiesMemory = new GenerativeAgentMemory(
* llm,
* await createNewMemoryRetriever(),
* { reflectionThreshold: 8 },
* );
* const summary = await tommiesMemory.getSummary();
* ```
*/
export class GenerativeAgentMemory extends BaseMemory {
llm: BaseLanguageModelInterface;
memoryRetriever: TimeWeightedVectorStoreRetriever;
verbose: boolean;
reflectionThreshold?: number;
private maxTokensLimit = 1200;
queriesKey = "queries";
mostRecentMemoriesTokenKey = "recent_memories_token";
addMemoryKey = "addMemory";
relevantMemoriesKey = "relevant_memories";
relevantMemoriesSimpleKey = "relevant_memories_simple";
mostRecentMemoriesKey = "most_recent_memories";
nowKey = "now";
memoryChain: GenerativeAgentMemoryChain;
constructor(
llm: BaseLanguageModelInterface,
memoryRetriever: TimeWeightedVectorStoreRetriever,
config?: GenerativeAgentMemoryConfig
) {
super();
this.llm = llm;
this.memoryRetriever = memoryRetriever;
this.verbose = config?.verbose ?? this.verbose;
this.reflectionThreshold =
config?.reflectionThreshold ?? this.reflectionThreshold;
this.maxTokensLimit = config?.maxTokensLimit ?? this.maxTokensLimit;
this.memoryChain = new GenerativeAgentMemoryChain(llm, memoryRetriever, {
reflectionThreshold: config?.reflectionThreshold,
importanceWeight: config?.importanceWeight,
});
}
/**
* Method that returns the key for relevant memories.
* @returns The key for relevant memories as a string.
*/
getRelevantMemoriesKey(): string {
return this.relevantMemoriesKey;
}
/**
* Method that returns the key for the most recent memories token.
* @returns The key for the most recent memories token as a string.
*/
getMostRecentMemoriesTokenKey(): string {
return this.mostRecentMemoriesTokenKey;
}
/**
* Method that returns the key for adding a memory.
* @returns The key for adding a memory as a string.
*/
getAddMemoryKey(): string {
return this.addMemoryKey;
}
/**
* Method that returns the key for the current time.
* @returns The key for the current time as a string.
*/
getCurrentTimeKey(): string {
return this.nowKey;
}
get memoryKeys(): string[] {
// Return an array of memory keys
return [this.relevantMemoriesKey, this.mostRecentMemoriesKey];
}
/**
* Method that adds a memory to the agent's memory.
* @param memoryContent The content of the memory to add.
* @param now The current date.
* @param metadata The metadata for the memory.
* @param callbacks The Callbacks to use for adding the memory.
* @returns The result of the memory addition.
*/
async addMemory(
memoryContent: string,
now?: Date,
metadata?: Record<string, unknown>,
callbacks?: Callbacks
) {
return this.memoryChain.call(
{ memory_content: memoryContent, now, memory_metadata: metadata },
callbacks
);
}
/**
* Method that formats the given relevant memories in detail.
* @param relevantMemories The relevant memories to format.
* @returns The formatted memories as a string.
*/
formatMemoriesDetail(relevantMemories: Document[]): string {
if (!relevantMemories.length) {
return "No relevant information.";
}
const contentStrings = new Set();
const content = [];
for (const memory of relevantMemories) {
if (memory.pageContent in contentStrings) {
continue;
}
contentStrings.add(memory.pageContent);
const createdTime = memory.metadata.created_at.toLocaleString("en-US", {
month: "long",
day: "numeric",
year: "numeric",
hour: "numeric",
minute: "numeric",
hour12: true,
});
content.push(`${createdTime}: ${memory.pageContent.trim()}`);
}
const joinedContent = content.map((mem) => `${mem}`).join("\n");
return joinedContent;
}
/**
* Method that formats the given relevant memories in a simple manner.
* @param relevantMemories The relevant memories to format.
* @returns The formatted memories as a string.
*/
formatMemoriesSimple(relevantMemories: Document[]): string {
const joinedContent = relevantMemories
.map((mem) => `${mem.pageContent}`)
.join("; ");
return joinedContent;
}
/**
* Method that retrieves memories until a token limit is reached.
* @param consumedTokens The number of tokens consumed so far.
* @returns The memories as a string.
*/
async getMemoriesUntilLimit(consumedTokens: number): Promise<string> {
// reduce the number of tokens in the documents
const result = [];
for (const doc of this.memoryRetriever
.getMemoryStream()
.slice()
.reverse()) {
if (consumedTokens >= this.maxTokensLimit) {
if (this.verbose) {
console.log("Exceeding max tokens for LLM, filtering memories");
}
break;
}
// eslint-disable-next-line no-param-reassign
consumedTokens += await this.llm.getNumTokens(doc.pageContent);
if (consumedTokens < this.maxTokensLimit) {
result.push(doc);
}
}
return this.formatMemoriesSimple(result);
}
get memoryVariables(): string[] {
// input keys this memory class will load dynamically
return [];
}
/**
* Method that loads memory variables based on the given inputs.
* @param inputs The inputs to use for loading memory variables.
* @returns An object containing the loaded memory variables.
*/
async loadMemoryVariables(
inputs: InputValues
): Promise<Record<string, string>> {
const queries = inputs[this.queriesKey];
const now = inputs[this.nowKey];
if (queries !== undefined) {
const relevantMemories = (
await Promise.all(
queries.map((query: string) =>
this.memoryChain.fetchMemories(query, now)
)
)
).flat();
return {
[this.relevantMemoriesKey]: this.formatMemoriesDetail(relevantMemories),
[this.relevantMemoriesSimpleKey]:
this.formatMemoriesSimple(relevantMemories),
};
}
const mostRecentMemoriesToken = inputs[this.mostRecentMemoriesTokenKey];
if (mostRecentMemoriesToken !== undefined) {
return {
[this.mostRecentMemoriesKey]: await this.getMemoriesUntilLimit(
mostRecentMemoriesToken
),
};
}
return {};
}
/**
* Method that saves the context of a model run to memory.
* @param _inputs The inputs of the model run.
* @param outputs The outputs of the model run.
* @returns Nothing.
*/
async saveContext(
_inputs: InputValues,
outputs: OutputValues
): Promise<void> {
// save the context of this model run to memory
const mem = outputs[this.addMemoryKey];
const now = outputs[this.nowKey];
if (mem) {
await this.addMemory(mem, now, {});
}
}
/**
* Method that clears the memory contents.
* @returns Nothing.
*/
clear(): void {
// TODO: clear memory contents
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/generative_agents/index.ts | export { GenerativeAgentMemory } from "./generative_agent_memory.js";
export { GenerativeAgent } from "./generative_agent.js";
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/generative_agents/generative_agent.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import { PromptTemplate } from "@langchain/core/prompts";
import { ChainValues } from "@langchain/core/utils/types";
import {
CallbackManagerForChainRun,
Callbacks,
} from "@langchain/core/callbacks/manager";
import { LLMChain } from "../../chains/llm_chain.js";
import { GenerativeAgentMemory } from "./generative_agent_memory.js";
import { BaseChain } from "../../chains/base.js";
/**
* Configuration for the GenerativeAgent class. Defines the character's
* name, optional age, permanent traits, status, verbosity, and summary
* refresh seconds.
*/
export type GenerativeAgentConfig = {
name: string;
age?: number;
traits: string;
status: string;
verbose?: boolean;
summaryRefreshSeconds?: number;
// dailySummaries?: string[];
};
/**
* Implementation of a generative agent that can learn and form new memories over
* time. It extends the BaseChain class, which is a generic
* sequence of calls to components, including other chains.
* @example
* ```typescript
* const tommie: GenerativeAgent = new GenerativeAgent(
* new OpenAI({ temperature: 0.9, maxTokens: 1500 }),
* new GenerativeAgentMemory(
* new ChatOpenAI(),
* new TimeWeightedVectorStoreRetriever({
* vectorStore: new MemoryVectorStore(new OpenAIEmbeddings()),
* otherScoreKeys: ["importance"],
* k: 15,
* }),
* { reflectionThreshold: 8 },
* ),
* {
* name: "Tommie",
* age: 25,
* traits: "anxious, likes design, talkative",
* status: "looking for a job",
* },
* );
*
* await tommie.addMemory(
* "Tommie remembers his dog, Bruno, from when he was a kid",
* new Date(),
* );
* const summary = await tommie.getSummary({ forceRefresh: true });
* const response = await tommie.generateDialogueResponse(
* "USER says Hello Tommie, how are you today?",
* );
* ```
*/
export class GenerativeAgent extends BaseChain {
static lc_name() {
return "GenerativeAgent";
}
// a character with memory and innate characterisitics
name: string; // the character's name
age?: number; // the optional age of the character
traits: string; // permanent traits to ascribe to the character
status: string; // the traits of the character you wish not to change
longTermMemory: GenerativeAgentMemory;
llm: BaseLanguageModelInterface; // the underlying language model
verbose: boolean; // false
private summary: string; // stateful self-summary generated via reflection on the character's memory.
private summaryRefreshSeconds = 3600;
private lastRefreshed: Date; // the last time the character's summary was regenerated
// TODO: Add support for daily summaries
// private dailySummaries: string[] = []; // summary of the events in the plan that the agent took.
_chainType(): string {
return "generative_agent_executor";
}
get inputKeys(): string[] {
return ["observation", "suffix", "now"];
}
get outputKeys(): string[] {
return ["output", "continue_dialogue"];
}
constructor(
llm: BaseLanguageModelInterface,
longTermMemory: GenerativeAgentMemory,
config: GenerativeAgentConfig
) {
super();
this.llm = llm;
this.longTermMemory = longTermMemory;
this.name = config.name;
this.age = config.age;
this.traits = config.traits;
this.status = config.status;
this.verbose = config.verbose ?? this.verbose;
this.summary = "";
this.summaryRefreshSeconds =
config.summaryRefreshSeconds ?? this.summaryRefreshSeconds;
this.lastRefreshed = new Date();
// this.dailySummaries = config.dailySummaries ?? this.dailySummaries;
}
// LLM methods
/**
* Parses a newline-separated string into a list of strings.
* @param text The string to parse.
* @returns An array of strings parsed from the input text.
*/
parseList(text: string): string[] {
// parse a newline-seperated string into a list of strings
const lines: string[] = text.trim().split("\n");
const result: string[] = lines.map((line: string) =>
line.replace(/^\s*\d+\.\s*/, "").trim()
);
return result;
}
/**
* Creates a new LLMChain with the given prompt and the agent's language
* model, verbosity, output key, and memory.
* @param prompt The prompt to use for the LLMChain.
* @returns A new LLMChain instance.
*/
chain(prompt: PromptTemplate): LLMChain {
const chain = new LLMChain({
llm: this.llm,
prompt,
verbose: this.verbose,
outputKey: "output", // new
memory: this.longTermMemory,
});
return chain;
}
/**
* Extracts the observed entity from the given observation.
* @param observation The observation to extract the entity from.
* @param runManager Optional CallbackManagerForChainRun instance.
* @returns The extracted entity as a string.
*/
async getEntityFromObservations(
observation: string,
runManager?: CallbackManagerForChainRun
): Promise<string> {
const prompt = PromptTemplate.fromTemplate(
"What is the observed entity in the following observation? {observation}" +
"\nEntity="
);
const result = await this.chain(prompt).call(
{
observation,
},
runManager?.getChild("entity_extractor")
);
return result.output;
}
/**
* Extracts the action of the given entity from the given observation.
* @param observation The observation to extract the action from.
* @param entityName The name of the entity to extract the action for.
* @param runManager Optional CallbackManagerForChainRun instance.
* @returns The extracted action as a string.
*/
async getEntityAction(
observation: string,
entityName: string,
runManager?: CallbackManagerForChainRun
): Promise<string> {
const prompt = PromptTemplate.fromTemplate(
"What is the {entity} doing in the following observation? {observation}" +
"\nThe {entity} is"
);
const result = await this.chain(prompt).call(
{
entity: entityName,
observation,
},
runManager?.getChild("entity_action_extractor")
);
const trimmedResult = result.output.trim();
return trimmedResult;
}
/**
* Summarizes memories that are most relevant to an observation.
* @param observation The observation to summarize related memories for.
* @param runManager Optional CallbackManagerForChainRun instance.
* @returns The summarized memories as a string.
*/
async summarizeRelatedMemories(
observation: string,
runManager?: CallbackManagerForChainRun
): Promise<string> {
// summarize memories that are most relevant to an observation
const prompt = PromptTemplate.fromTemplate(
`
{q1}?
Context from memory:
{relevant_memories}
Relevant context:`
);
const entityName = await this.getEntityFromObservations(
observation,
runManager
);
const entityAction = await this.getEntityAction(
observation,
entityName,
runManager
);
const q1 = `What is the relationship between ${this.name} and ${entityName}`;
const q2 = `${entityName} is ${entityAction}`;
const response = await this.chain(prompt).call(
{
q1,
queries: [q1, q2],
},
runManager?.getChild("entity_relationships")
);
return response.output.trim(); // added output
}
async _call(
values: ChainValues,
runManager?: CallbackManagerForChainRun
): Promise<ChainValues> {
const { observation, suffix, now } = values;
// react to a given observation or dialogue act
const prompt = PromptTemplate.fromTemplate(
`{agent_summary_description}` +
`\nIt is {current_time}.` +
`\n{agent_name}'s status: {agent_status}` +
`\nSummary of relevant context from {agent_name}'s memory:` +
"\n{relevant_memories}" +
`\nMost recent observations: {most_recent_memories}` +
`\nObservation: {observation}` +
`\n\n${suffix}`
);
const agentSummaryDescription = await this.getSummary({}, runManager); // now = now in param
const relevantMemoriesStr = await this.summarizeRelatedMemories(
observation,
runManager
);
const currentTime = (now || new Date()).toLocaleString("en-US", {
month: "long",
day: "numeric",
year: "numeric",
hour: "numeric",
minute: "numeric",
hour12: true,
});
const chainInputs: ChainValues = {
agent_summary_description: agentSummaryDescription,
current_time: currentTime,
agent_name: this.name,
observation,
agent_status: this.status,
most_recent_memories: "",
};
chainInputs[this.longTermMemory.getRelevantMemoriesKey()] =
relevantMemoriesStr;
const consumedTokens = await this.llm.getNumTokens(
await prompt.format({ ...chainInputs })
);
chainInputs[this.longTermMemory.getMostRecentMemoriesTokenKey()] =
consumedTokens;
const response = await this.chain(prompt).call(
chainInputs,
runManager?.getChild("reaction_from_summary")
);
const rawOutput = response.output;
let output = rawOutput;
let continue_dialogue = false;
if (rawOutput.includes("REACT:")) {
const reaction = this._cleanResponse(rawOutput.split("REACT:").pop());
await this.addMemory(
`${this.name} observed ${observation} and reacted by ${reaction}`,
now,
{},
runManager?.getChild("memory")
);
output = `${reaction}`;
continue_dialogue = false;
} else if (rawOutput.includes("SAY:")) {
const saidValue = this._cleanResponse(rawOutput.split("SAY:").pop());
await this.addMemory(
`${this.name} observed ${observation} and said ${saidValue}`,
now,
{},
runManager?.getChild("memory")
);
output = `${this.name} said ${saidValue}`;
continue_dialogue = true;
} else if (rawOutput.includes("GOODBYE:")) {
const farewell = this._cleanResponse(
rawOutput.split("GOODBYE:").pop() ?? ""
);
await this.addMemory(
`${this.name} observed ${observation} and said ${farewell}`,
now,
{},
runManager?.getChild("memory")
);
output = `${this.name} said ${farewell}`;
continue_dialogue = false;
}
return { output, continue_dialogue };
}
private _cleanResponse(text: string | undefined): string {
if (text === undefined) {
return "";
}
const regex = new RegExp(`^${this.name} `);
return text.replace(regex, "").trim();
}
/**
* Generates a reaction to the given observation.
* @param observation The observation to generate a reaction for.
* @param now Optional current date.
* @returns A boolean indicating whether to continue the dialogue and the output string.
*/
async generateReaction(
observation: string,
now?: Date
): Promise<[boolean, string]> {
const callToActionTemplate: string =
`Should {agent_name} react to the observation, and if so,` +
` what would be an appropriate reaction? Respond in one line.` +
` If the action is to engage in dialogue, write:\nSAY: "what to say"` +
` \notherwise, write:\nREACT: {agent_name}'s reaction (if anything).` +
` \nEither do nothing, react, or say something but not both.\n\n`;
const { output, continue_dialogue } = await this.call({
observation,
suffix: callToActionTemplate,
now,
});
return [continue_dialogue, output];
}
/**
* Generates a dialogue response to the given observation.
* @param observation The observation to generate a dialogue response for.
* @param now Optional current date.
* @returns A boolean indicating whether to continue the dialogue and the output string.
*/
async generateDialogueResponse(
observation: string,
now?: Date
): Promise<[boolean, string]> {
const callToActionTemplate = `What would ${this.name} say? To end the conversation, write: GOODBYE: "what to say". Otherwise to continue the conversation, write: SAY: "what to say next"\n\n`;
const { output, continue_dialogue } = await this.call({
observation,
suffix: callToActionTemplate,
now,
});
return [continue_dialogue, output];
}
// Agent stateful' summary methods
// Each dialog or response prompt includes a header
// summarizing the agent's self-description. This is
// updated periodically through probing it's memories
/**
* Gets the agent's summary, which includes the agent's name, age, traits,
* and a summary of the agent's core characteristics. The summary is
* updated periodically through probing the agent's memories.
* @param config Optional configuration object with current date and a boolean to force refresh.
* @param runManager Optional CallbackManagerForChainRun instance.
* @returns The agent's summary as a string.
*/
async getSummary(
config?: {
now?: Date;
forceRefresh?: boolean;
},
runManager?: CallbackManagerForChainRun
): Promise<string> {
const { now = new Date(), forceRefresh = false } = config ?? {};
const sinceRefresh = Math.floor(
(now.getTime() - this.lastRefreshed.getTime()) / 1000
);
if (
!this.summary ||
sinceRefresh >= this.summaryRefreshSeconds ||
forceRefresh
) {
this.summary = await this.computeAgentSummary(runManager);
this.lastRefreshed = now;
}
let age;
if (this.age) {
age = this.age;
} else {
age = "N/A";
}
return `Name: ${this.name} (age: ${age})
Innate traits: ${this.traits}
${this.summary}`;
}
/**
* Computes the agent's summary by summarizing the agent's core
* characteristics given the agent's relevant memories.
* @param runManager Optional CallbackManagerForChainRun instance.
* @returns The computed summary as a string.
*/
async computeAgentSummary(
runManager?: CallbackManagerForChainRun
): Promise<string> {
const prompt = PromptTemplate.fromTemplate(
"How would you summarize {name}'s core characteristics given the following statements:\n" +
"----------" +
"{relevant_memories}" +
"----------" +
"Do not embellish." +
"\n\nSummary: "
);
// the agent seeks to think about their core characterisitics
const result = await this.chain(prompt).call(
{
name: this.name,
queries: [`${this.name}'s core characteristics`],
},
runManager?.getChild("compute_agent_summary")
);
return result.output.trim();
}
/**
* Returns a full header of the agent's status, summary, and current time.
* @param config Optional configuration object with current date and a boolean to force refresh.
* @returns The full header as a string.
*/
getFullHeader(
config: {
now?: Date;
forceRefresh?: boolean;
} = {}
): string {
const { now = new Date(), forceRefresh = false } = config;
// return a full header of the agent's status, summary, and current time.
const summary = this.getSummary({ now, forceRefresh });
const currentTimeString = now.toLocaleString("en-US", {
month: "long",
day: "numeric",
year: "numeric",
hour: "numeric",
minute: "numeric",
hour12: true,
});
return `${summary}\nIt is ${currentTimeString}.\n${this.name}'s status: ${this.status}`;
}
/**
* Adds a memory to the agent's long-term memory.
* @param memoryContent The content of the memory to add.
* @param now Optional current date.
* @param metadata Optional metadata for the memory.
* @param callbacks Optional Callbacks instance.
* @returns The result of adding the memory to the agent's long-term memory.
*/
async addMemory(
memoryContent: string,
now?: Date,
metadata?: Record<string, unknown>,
callbacks?: Callbacks
) {
return this.longTermMemory.addMemory(
memoryContent,
now,
metadata,
callbacks
);
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/generative_agents | lc_public_repos/langchainjs/langchain/src/experimental/generative_agents/tests/generative_agent.int.test.ts | import { test } from "@jest/globals";
import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { MemoryVectorStore } from "../../../vectorstores/memory.js";
import { TimeWeightedVectorStoreRetriever } from "../../../retrievers/time_weighted.js";
import { GenerativeAgentMemory, GenerativeAgent } from "../index.js";
// This takes a very long time, mostly for illustrative purposes
test.skip(
"Test generative agent end-to-end",
async () => {
const Simulation = async () => {
const userName = "USER";
const llm = new OpenAI({
temperature: 0.9,
maxTokens: 1500,
});
const createNewMemoryRetriever = async () => {
// Create a new, demo in-memory vector store retriever unique to the agent.
// Better results can be achieved with a more sophisticatd vector store.
const vectorStore = new MemoryVectorStore(new OpenAIEmbeddings());
const retriever = new TimeWeightedVectorStoreRetriever({
vectorStore,
otherScoreKeys: ["importance"],
k: 15,
});
return retriever;
};
// Tommie
const tommiesMemory: GenerativeAgentMemory = new GenerativeAgentMemory(
llm,
await createNewMemoryRetriever(),
{ reflectionThreshold: 8 }
);
const tommie: GenerativeAgent = new GenerativeAgent(llm, tommiesMemory, {
name: "Tommie",
age: 25,
traits: "anxious, likes design, talkative",
status: "looking for a job",
});
// console.log("Tommie's first summary:\n", await tommie.getSummary());
/*
Tommie's first summary:
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie is an individual with no specific core characteristics described.
*/
const tommieObservations = [
"Tommie remembers his dog, Bruno, from when he was a kid",
"Tommie feels tired from driving so far",
"Tommie sees the new home",
"The new neighbors have a cat",
"The road is noisy at night",
"Tommie is hungry",
"Tommie tries to get some rest.",
];
for (const observation of tommieObservations) {
await tommie.addMemory(observation, new Date());
}
// console.log(
// "Tommie's second summary:\n",
// await tommie.getSummary({ forceRefresh: true })
// );
/*
Tommie's second summary:
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie remembers his dog, is tired from driving, sees a new home with neighbors who have a cat, is aware of the noisy road at night, is hungry, and tries to get some rest.
*/
const interviewAgent = async (
agent: GenerativeAgent,
message: string
): Promise<string> => {
// Simple wrapper helping the user interact with the agent
const newMessage = `${userName} says ${message}`;
const response = await agent.generateDialogueResponse(newMessage);
return response[1];
};
// Let's have Tommie start going through a day in his life.
const observations = [
"Tommie wakes up to the sound of a noisy construction site outside his window.",
"Tommie gets out of bed and heads to the kitchen to make himself some coffee.",
"Tommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.",
"Tommie finally finds the filters and makes himself a cup of coffee.",
"The coffee tastes bitter, and Tommie regrets not buying a better brand.",
"Tommie checks his email and sees that he has no job offers yet.",
"Tommie spends some time updating his resume and cover letter.",
"Tommie heads out to explore the city and look for job openings.",
"Tommie sees a sign for a job fair and decides to attend.",
"The line to get in is long, and Tommie has to wait for an hour.",
"Tommie meets several potential employers at the job fair but doesn't receive any offers.",
"Tommie leaves the job fair feeling disappointed.",
"Tommie stops by a local diner to grab some lunch.",
"The service is slow, and Tommie has to wait for 30 minutes to get his food.",
"Tommie overhears a conversation at the next table about a job opening.",
"Tommie asks the diners about the job opening and gets some information about the company.",
"Tommie decides to apply for the job and sends his resume and cover letter.",
"Tommie continues his search for job openings and drops off his resume at several local businesses.",
"Tommie takes a break from his job search to go for a walk in a nearby park.",
"A dog approaches and licks Tommie's feet, and he pets it for a few minutes.",
"Tommie sees a group of people playing frisbee and decides to join in.",
"Tommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.",
"Tommie goes back to his apartment to rest for a bit.",
"A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.",
"Tommie starts to feel frustrated with his job search.",
"Tommie calls his best friend to vent about his struggles.",
"Tommie's friend offers some words of encouragement and tells him to keep trying.",
"Tommie feels slightly better after talking to his friend.",
];
// Let's send Tommie on his way. We'll check in on his summary every few observations to watch him evolve
for (let i = 0; i < observations.length; i += 1) {
const observation = observations[i];
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const [, reaction] = await tommie.generateReaction(observation);
// console.log("\x1b[32m", observation, "\x1b[0m", reaction);
if ((i + 1) % 20 === 0) {
// console.log("*".repeat(40));
// console.log(
// "\x1b[34m",
// `After ${
// i + 1
// } observations, Tommie's summary is:\n${await tommie.getSummary({
// forceRefresh: true,
// })}`,
// "\x1b[0m"
// );
// console.log("*".repeat(40));
}
}
/*
Tommie wakes up to the sound of a noisy construction site outside his window. Tommie REACT: Tommie groans in frustration and covers his ears with his pillow.
Tommie gets out of bed and heads to the kitchen to make himself some coffee. Tommie REACT: Tommie rubs his tired eyes before heading to the kitchen to make himself some coffee.
Tommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some. Tommie REACT: Tommie groans and looks through his moving boxes in search of coffee filters.
Tommie finally finds the filters and makes himself a cup of coffee. Tommie REACT: Tommie sighs in relief and prepares himself a much-needed cup of coffee.
The coffee tastes bitter, and Tommie regrets not buying a better brand. Tommie REACT: Tommie frowns in disappointment as he takes a sip of the bitter coffee.
Tommie checks his email and sees that he has no job offers yet. Tommie REACT: Tommie sighs in disappointment before pushing himself away from the computer with a discouraged look on his face.
Tommie spends some time updating his resume and cover letter. Tommie REACT: Tommie takes a deep breath and stares at the computer screen as he updates his resume and cover letter.
Tommie heads out to explore the city and look for job openings. Tommie REACT: Tommie takes a deep breath and steps out into the city, ready to find the perfect job opportunity.
Tommie sees a sign for a job fair and decides to attend. Tommie REACT: Tommie takes a deep breath and marches towards the job fair, determination in his eyes.
The line to get in is long, and Tommie has to wait for an hour. Tommie REACT: Tommie groans in frustration as he notices the long line.
Tommie meets several potential employers at the job fair but doesn't receive any offers. Tommie REACT: Tommie's face falls as he listens to each potential employer's explanation as to why they can't hire him.
Tommie leaves the job fair feeling disappointed. Tommie REACT: Tommie's face falls as he walks away from the job fair, disappointment evident in his expression.
Tommie stops by a local diner to grab some lunch. Tommie REACT: Tommie smiles as he remembers Bruno as he walks into the diner, feeling both a sense of nostalgia and excitement.
The service is slow, and Tommie has to wait for 30 minutes to get his food. Tommie REACT: Tommie sighs in frustration and taps his fingers on the table, growing increasingly impatient.
Tommie overhears a conversation at the next table about a job opening. Tommie REACT: Tommie leans in closer, eager to hear the conversation.
Tommie asks the diners about the job opening and gets some information about the company. Tommie REACT: Tommie eagerly listens to the diner's description of the company, feeling hopeful about the job opportunity.
Tommie decides to apply for the job and sends his resume and cover letter. Tommie REACT: Tommie confidently sends in his resume and cover letter, determined to get the job.
Tommie continues his search for job openings and drops off his resume at several local businesses. Tommie REACT: Tommie confidently drops his resume off at the various businesses, determined to find a job.
Tommie takes a break from his job search to go for a walk in a nearby park. Tommie REACT: Tommie takes a deep breath of the fresh air and smiles in appreciation as he strolls through the park.
A dog approaches and licks Tommie's feet, and he pets it for a few minutes. Tommie REACT: Tommie smiles in surprise as he pets the dog, feeling a sense of comfort and nostalgia.
****************************************
After 20 observations, Tommie's summary is:
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie is a determined and resilient individual who remembers his dog from when he was a kid. Despite feeling tired from driving, he has the courage to explore the city, looking for job openings. He persists in updating his resume and cover letter in the pursuit of finding the perfect job opportunity, even attending job fairs when necessary, and is disappointed when he's not offered a job.
****************************************
Tommie sees a group of people playing frisbee and decides to join in. Tommie REACT: Tommie smiles and approaches the group, eager to take part in the game.
Tommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose. Tommie REACT: Tommie grimaces in pain and raises his hand to his nose, checking to see if it's bleeding.
Tommie goes back to his apartment to rest for a bit. Tommie REACT: Tommie yawns and trudges back to his apartment, feeling exhausted from his busy day.
A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor. Tommie REACT: Tommie shakes his head in annoyance as he surveys the mess.
Tommie starts to feel frustrated with his job search. Tommie REACT: Tommie sighs in frustration and shakes his head, feeling discouraged from his lack of progress.
Tommie calls his best friend to vent about his struggles. Tommie REACT: Tommie runs his hands through his hair and sighs heavily, overwhelmed by his job search.
Tommie's friend offers some words of encouragement and tells him to keep trying. Tommie REACT: Tommie gives his friend a grateful smile, feeling comforted by the words of encouragement.
Tommie feels slightly better after talking to his friend. Tommie REACT: Tommie gives a small smile of appreciation to his friend, feeling grateful for the words of encouragement.
*/
// Interview after the day
// console.log(
// await interviewAgent(
// tommie,
// "Tell me about how your day has been going"
// )
// );
/*
Tommie said "My day has been pretty hectic. I've been driving around looking for job openings, attending job fairs, and updating my resume and cover letter. It's been really exhausting, but I'm determined to find the perfect job for me."
*/
// console.log(
// await interviewAgent(tommie, "How do you feel about coffee?")
// );
/*
Tommie said "I actually love coffee - it's one of my favorite things. I try to drink it every day, especially when I'm stressed from job searching."
*/
// console.log(
// await interviewAgent(tommie, "Tell me about your childhood dog!")
// );
/*
Tommie said "My childhood dog was named Bruno. He was an adorable black Labrador Retriever who was always full of energy. Every time I came home he'd be so excited to see me, it was like he never stopped smiling. He was always ready for adventure and he was always my shadow. I miss him every day."
*/
// console.log(
// "Tommie's second summary:\n",
// await tommie.getSummary({ forceRefresh: true })
// );
/*
Tommie's second summary:
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie is a hardworking individual who is looking for new opportunities. Despite feeling tired, he is determined to find the perfect job. He remembers his dog from when he was a kid, is hungry, and is frustrated at times. He shows resilience when searching for his coffee filters, disappointment when checking his email and finding no job offers, and determination when attending the job fair.
*/
// Let’s add a second character to have a conversation with Tommie. Feel free to configure different traits.
const evesMemory: GenerativeAgentMemory = new GenerativeAgentMemory(
llm,
await createNewMemoryRetriever(),
{
verbose: false,
reflectionThreshold: 5,
}
);
const eve: GenerativeAgent = new GenerativeAgent(llm, evesMemory, {
name: "Eve",
age: 34,
traits: "curious, helpful",
status:
"just started her new job as a career counselor last week and received her first assignment, a client named Tommie.",
// dailySummaries: [
// "Eve started her new job as a career counselor last week and received her first assignment, a client named Tommie."
// ]
});
const eveObservations = [
"Eve overhears her colleague say something about a new client being hard to work with",
"Eve wakes up and hears the alarm",
"Eve eats a boal of porridge",
"Eve helps a coworker on a task",
"Eve plays tennis with her friend Xu before going to work",
"Eve overhears her colleague say something about Tommie being hard to work with",
];
for (const observation of eveObservations) {
await eve.addMemory(observation, new Date());
}
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const eveInitialSummary: string = await eve.getSummary({
forceRefresh: true,
});
// console.log("Eve's initial summary\n", eveInitialSummary);
/*
Eve's initial summary
Name: Eve (age: 34)
Innate traits: curious, helpful
Eve is an attentive listener, helpful colleague, and sociable friend who enjoys playing tennis.
*/
// Let’s “Interview” Eve before she speaks with Tommie.
// console.log(
// await interviewAgent(eve, "How are you feeling about today?")
// );
/*
Eve said "I'm feeling a bit anxious about meeting my new client, but I'm sure it will be fine! How about you?".
*/
// console.log(await interviewAgent(eve, "What do you know about Tommie?"));
/*
Eve said "I know that Tommie is a recent college graduate who's been struggling to find a job. I'm looking forward to figuring out how I can help him move forward."
*/
// console.log(
// await interviewAgent(
// eve,
// "Tommie is looking to find a job. What are are some things you'd like to ask him?"
// )
// );
/*
Eve said: "I'd really like to get to know more about Tommie's professional background and experience, and why he is looking for a job. And I'd also like to know more about his strengths and passions and what kind of work he would be best suited for. That way I can help him find the right job to fit his needs."
*/
// Generative agents are much more complex when they interact with a virtual environment or with each other.
// Below, we run a simple conversation between Tommie and Eve.
const runConversation = async (
agents: GenerativeAgent[],
initialObservation: string
): Promise<void> => {
// Starts the conversation bewteen two agents
const [, observation] = await agents[1].generateReaction(
initialObservation
);
// console.log("Initial reply:", observation);
// eslint-disable-next-line no-constant-condition
while (true) {
let breakDialogue = false;
for (const agent of agents) {
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const [stayInDialogue, agentObservation] =
await agent.generateDialogueResponse(observation);
// console.log("Next reply:", agentObservation);
if (!stayInDialogue) {
breakDialogue = true;
}
}
if (breakDialogue) {
break;
}
}
};
const agents: GenerativeAgent[] = [tommie, eve];
await runConversation(
agents,
"Tommie said: Hi, Eve. Thanks for agreeing to meet with me today. I have a bunch of questions and am not sure where to start. Maybe you could first share about your experience?"
);
/*
Initial reply: Eve said "Of course, Tommie. I'd be happy to share about my experience. What specific questions do you have?"
Next reply: Tommie said "Thank you, Eve. I'm curious about what strategies you used in your own job search. Did you have any specific tactics that helped you stand out to employers?"
Next reply: Eve said "Sure, Tommie. I found that networking and reaching out to professionals in my field was really helpful. I also made sure to tailor my resume and cover letter to each job I applied to. Do you have any specific questions about those strategies?"
Next reply: Tommie said "Thank you, Eve. That's really helpful advice. Did you have any specific ways of networking that worked well for you?"
Next reply: Eve said "Sure, Tommie. I found that attending industry events and connecting with professionals on LinkedIn were both great ways to network. Do you have any specific questions about those tactics?"
Next reply: Tommie said "That's really helpful, thank you for sharing. Did you find that you were able to make meaningful connections through LinkedIn?"
Next reply: Eve said "Yes, definitely. I was able to connect with several professionals in my field and even landed a job through a LinkedIn connection. Have you had any luck with networking on LinkedIn?"
Next reply: Tommie said "That's really impressive! I haven't had much luck yet, but I'll definitely keep trying. Thank you for the advice, Eve."
Next reply: Eve said "Glad I could help, Tommie. Is there anything else you want to know?"
Next reply: Tommie said "Thanks again, Eve. I really appreciate your advice and I'll definitely put it into practice. Have a great day!"
Next reply: Eve said "You're welcome, Tommie! Don't hesitate to reach out if you have any more questions. Have a great day too!"
*/
// Since the generative agents retain their memories from the day, we can ask them about their plans, conversations, and other memories.
const tommieSummary: string = await tommie.getSummary({
forceRefresh: true,
});
// console.log("Tommie's third and final summary\n", tommieSummary);
/*
Tommie's third and final summary
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie is a determined individual, who demonstrates resilience in the face of disappointment. He is also a nostalgic person, remembering fondly his childhood pet, Bruno. He is resourceful, searching through his moving boxes to find what he needs, and takes initiative to attend job fairs to look for job openings.
*/
const eveSummary: string = await eve.getSummary({ forceRefresh: true });
// console.log("Eve's final summary\n", eveSummary);
/*
Eve's final summary
Name: Eve (age: 34)
Innate traits: curious, helpful
Eve is a helpful and encouraging colleague who actively listens to her colleagues and offers advice on how to move forward. She is willing to take time to understand her clients and their goals, and is committed to helping them succeed.
*/
const interviewOne: string = await interviewAgent(
tommie,
"How was your conversation with Eve?"
);
// console.log("USER: How was your conversation with Eve?\n");
// console.log(interviewOne);
/*
Tommie said "It was great. She was really helpful and knowledgeable. I'm thankful that she took the time to answer all my questions."
*/
const interviewTwo: string = await interviewAgent(
eve,
"How was your conversation with Tommie?"
);
// console.log("USER: How was your conversation with Tommie?\n");
// console.log(interviewTwo);
/*
Eve said "The conversation went very well. We discussed his goals and career aspirations, what kind of job he is looking for, and his experience and qualifications. I'm confident I can help him find the right job."
*/
const interviewThree: string = await interviewAgent(
eve,
"What do you wish you would have said to Tommie?"
);
// console.log("USER: What do you wish you would have said to Tommie?\n");
// console.log(interviewThree);
/*
Eve said "It's ok if you don't have all the answers yet. Let's take some time to learn more about your experience and qualifications, so I can help you find a job that fits your goals."
*/
return {
tommieFinalSummary: tommieSummary,
eveFinalSummary: eveSummary,
interviewOne,
interviewTwo,
interviewThree,
};
};
await Simulation();
},
60000 * 30
);
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/autogpt/agent.ts | import type { VectorStoreRetrieverInterface } from "@langchain/core/vectorstores";
import { Tool } from "@langchain/core/tools";
import {
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
} from "@langchain/core/messages";
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
import {
getEmbeddingContextSize,
getModelContextSize,
} from "@langchain/core/language_models/base";
import { LLMChain } from "../../chains/llm_chain.js";
import { AutoGPTOutputParser } from "./output_parser.js";
import { AutoGPTPrompt } from "./prompt.js";
// import { HumanInputRun } from "./tools/human/tool"; // TODO
import { ObjectTool, FINISH_NAME } from "./schema.js";
import { TokenTextSplitter } from "../../text_splitter.js";
/**
* Interface for the input parameters of the AutoGPT class.
*/
export interface AutoGPTInput {
aiName: string;
aiRole: string;
memory: VectorStoreRetrieverInterface;
humanInTheLoop?: boolean;
outputParser?: AutoGPTOutputParser;
maxIterations?: number;
}
/**
* Class representing the AutoGPT concept with LangChain primitives. It is
* designed to be used with a set of tools such as a search tool,
* write-file tool, and a read-file tool.
* @example
* ```typescript
* const autogpt = AutoGPT.fromLLMAndTools(
* new ChatOpenAI({ temperature: 0 }),
* [
* new ReadFileTool({ store: new InMemoryFileStore() }),
* new WriteFileTool({ store: new InMemoryFileStore() }),
* new SerpAPI("YOUR_SERPAPI_API_KEY", {
* location: "San Francisco,California,United States",
* hl: "en",
* gl: "us",
* }),
* ],
* {
* memory: new MemoryVectorStore(new OpenAIEmbeddings()).asRetriever(),
* aiName: "Tom",
* aiRole: "Assistant",
* },
* );
* const result = await autogpt.run(["write a weather report for SF today"]);
* ```
*/
export class AutoGPT {
aiName: string;
memory: VectorStoreRetrieverInterface;
fullMessageHistory: BaseMessage[];
nextActionCount: number;
chain: LLMChain;
outputParser: AutoGPTOutputParser;
tools: ObjectTool[];
feedbackTool?: Tool;
maxIterations: number;
// Currently not generic enough to support any text splitter.
textSplitter: TokenTextSplitter;
constructor({
aiName,
memory,
chain,
outputParser,
tools,
feedbackTool,
maxIterations,
}: Omit<Required<AutoGPTInput>, "aiRole" | "humanInTheLoop"> & {
chain: LLMChain;
tools: ObjectTool[];
feedbackTool?: Tool;
}) {
this.aiName = aiName;
this.memory = memory;
this.fullMessageHistory = [];
this.nextActionCount = 0;
this.chain = chain;
this.outputParser = outputParser;
this.tools = tools;
this.feedbackTool = feedbackTool;
this.maxIterations = maxIterations;
const chunkSize = getEmbeddingContextSize(
"modelName" in memory.vectorStore.embeddings
? (memory.vectorStore.embeddings.modelName as string)
: undefined
);
this.textSplitter = new TokenTextSplitter({
chunkSize,
chunkOverlap: Math.round(chunkSize / 10),
});
}
/**
* Creates a new AutoGPT instance from a given LLM and a set of tools.
* @param llm A BaseChatModel object.
* @param tools An array of ObjectTool objects.
* @param options.aiName The name of the AI.
* @param options.aiRole The role of the AI.
* @param options.memory A VectorStoreRetriever object that represents the memory of the AI.
* @param options.maxIterations The maximum number of iterations the AI can perform.
* @param options.outputParser An AutoGPTOutputParser object that parses the output of the AI.
* @returns A new instance of the AutoGPT class.
*/
static fromLLMAndTools(
llm: BaseChatModel,
tools: ObjectTool[],
{
aiName,
aiRole,
memory,
maxIterations = 100,
// humanInTheLoop = false,
outputParser = new AutoGPTOutputParser(),
}: AutoGPTInput
): AutoGPT {
const prompt = new AutoGPTPrompt({
aiName,
aiRole,
tools,
tokenCounter: llm.getNumTokens.bind(llm),
sendTokenLimit: getModelContextSize(
"modelName" in llm ? (llm.modelName as string) : "gpt2"
),
});
// const feedbackTool = humanInTheLoop ? new HumanInputRun() : null;
const chain = new LLMChain({ llm, prompt });
return new AutoGPT({
aiName,
memory,
chain,
outputParser,
tools,
// feedbackTool,
maxIterations,
});
}
/**
* Runs the AI with a given set of goals.
* @param goals An array of strings representing the goals.
* @returns A string representing the result of the run or undefined if the maximum number of iterations is reached without a result.
*/
async run(goals: string[]): Promise<string | undefined> {
const user_input =
"Determine which next command to use, and respond using the format specified above:";
let loopCount = 0;
while (loopCount < this.maxIterations) {
loopCount += 1;
const { text: assistantReply } = await this.chain.call({
goals,
user_input,
memory: this.memory,
messages: this.fullMessageHistory,
});
// Print the assistant reply
console.log(assistantReply);
this.fullMessageHistory.push(new HumanMessage(user_input));
this.fullMessageHistory.push(new AIMessage(assistantReply));
const action = await this.outputParser.parse(assistantReply);
const tools = this.tools.reduce(
(acc, tool) => ({ ...acc, [tool.name]: tool }),
{} as { [key: string]: ObjectTool }
);
if (action.name === FINISH_NAME) {
return action.args.response;
}
let result: string;
if (action.name in tools) {
const tool = tools[action.name];
let observation;
try {
observation = await tool.call(action.args);
} catch (e) {
observation = `Error in args: ${e}`;
}
result = `Command ${tool.name} returned: ${observation}`;
} else if (action.name === "ERROR") {
result = `Error: ${action.args}. `;
} else {
result = `Unknown command '${action.name}'. Please refer to the 'COMMANDS' list for available commands and only respond in the specified JSON format.`;
}
let memoryToAdd = `Assistant Reply: ${assistantReply}\nResult: ${result} `;
if (this.feedbackTool) {
const feedback = `\n${await this.feedbackTool.call("Input: ")}`;
if (feedback === "q" || feedback === "stop") {
console.log("EXITING");
return "EXITING";
}
memoryToAdd += feedback;
}
const documents = await this.textSplitter.createDocuments([memoryToAdd]);
await this.memory.addDocuments(documents);
this.fullMessageHistory.push(new SystemMessage(result));
}
return undefined;
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/autogpt/schema.ts | import { StructuredTool } from "@langchain/core/tools";
/**
* Type alias for StructuredTool. It is part of the tools module in
* LangChain, which includes a variety of tools used for different
* purposes.
*/
export type ObjectTool = StructuredTool;
export const FINISH_NAME = "finish";
/**
* Interface that describes an action that can be performed by the AutoGPT
* model in LangChain. It has a `name` property, which is a string that
* represents the name of the action, and an `args` property, which is an
* object that contains the arguments for the action.
*/
export interface AutoGPTAction {
name: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
args: Record<string, any>;
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/autogpt/index.ts | export { AutoGPTPrompt, type AutoGPTPromptInput } from "./prompt.js";
export { AutoGPTOutputParser, preprocessJsonInput } from "./output_parser.js";
export { AutoGPT, type AutoGPTInput } from "./agent.js";
export type { AutoGPTAction } from "./schema.js";
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/autogpt/prompt_generator.ts | import { zodToJsonSchema, JsonSchema7ObjectType } from "zod-to-json-schema";
import { ObjectTool, FINISH_NAME } from "./schema.js";
/**
* Class that generates prompts for generative agents. It maintains a list
* of constraints, commands, resources, and performance evaluations.
*/
export class PromptGenerator {
constraints: string[];
commands: ObjectTool[];
resources: string[];
performance_evaluation: string[];
response_format: object;
constructor() {
this.constraints = [];
this.commands = [];
this.resources = [];
this.performance_evaluation = [];
this.response_format = {
thoughts: {
text: "thought",
reasoning: "reasoning",
plan: "- short bulleted\n- list that conveys\n- long-term plan",
criticism: "constructive self-criticism",
speak: "thoughts summary to say to user",
},
command: { name: "command name", args: { "arg name": "value" } },
};
}
/**
* Adds a constraint to the list of constraints.
* @param constraint The constraint to add.
* @returns void
*/
add_constraint(constraint: string): void {
this.constraints.push(constraint);
}
/**
* Adds a tool to the list of commands.
* @param tool The tool to add.
* @returns void
*/
add_tool(tool: ObjectTool): void {
this.commands.push(tool);
}
_generate_command_string(tool: ObjectTool): string {
let output = `"${tool.name}": ${tool.description}`;
output += `, args json schema: ${JSON.stringify(
(zodToJsonSchema(tool.schema) as JsonSchema7ObjectType).properties
)}`;
return output;
}
/**
* Adds a resource to the list of resources.
* @param resource The resource to add.
* @returns void
*/
add_resource(resource: string): void {
this.resources.push(resource);
}
/**
* Adds a performance evaluation to the list of performance evaluations.
* @param evaluation The performance evaluation to add.
* @returns void
*/
add_performance_evaluation(evaluation: string): void {
this.performance_evaluation.push(evaluation);
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
_generate_numbered_list(items: any[], item_type = "list"): string {
if (item_type === "command") {
const command_strings = items.map(
(item, i) => `${i + 1}. ${this._generate_command_string(item)}`
);
const finish_description =
"use this to signal that you have finished all your objectives";
const finish_args =
'"response": "final response to let people know you have finished your objectives"';
const finish_string = `${
items.length + 1
}. ${FINISH_NAME}: ${finish_description}, args: ${finish_args}`;
return command_strings.concat([finish_string]).join("\n");
}
return items.map((item, i) => `${i + 1}. ${item}`).join("\n");
}
/**
* Generates a prompt string that includes the constraints, commands,
* resources, performance evaluations, and response format.
* @returns A string representing the prompt.
*/
generate_prompt_string(): string {
const formatted_response_format = JSON.stringify(
this.response_format,
null,
4
);
const prompt_string =
`Constraints:\n${this._generate_numbered_list(this.constraints)}\n\n` +
`Commands:\n${this._generate_numbered_list(
this.commands,
"command"
)}\n\n` +
`Resources:\n${this._generate_numbered_list(this.resources)}\n\n` +
`Performance Evaluation:\n${this._generate_numbered_list(
this.performance_evaluation
)}\n\n` +
`You should only respond in JSON format as described below ` +
`\nResponse Format: \n${formatted_response_format} ` +
`\nEnsure the response can be parsed by Python json.loads`;
return prompt_string;
}
}
/**
* Function that generates a prompt string for a given list of tools.
*/
export function getPrompt(tools: ObjectTool[]): string {
const prompt_generator = new PromptGenerator();
prompt_generator.add_constraint(
"~4000 word limit for short term memory. " +
"Your short term memory is short, " +
"so immediately save important information to files."
);
prompt_generator.add_constraint(
"If you are unsure how you previously did something " +
"or want to recall past events, " +
"thinking about similar events will help you remember."
);
prompt_generator.add_constraint("No user assistance");
prompt_generator.add_constraint(
'Exclusively use the commands listed in double quotes e.g. "command name"'
);
for (const tool of tools) {
prompt_generator.add_tool(tool);
}
prompt_generator.add_resource(
"Internet access for searches and information gathering."
);
prompt_generator.add_resource("Long Term memory management.");
prompt_generator.add_resource(
"GPT-3.5 powered Agents for delegation of simple tasks."
);
prompt_generator.add_resource("File output.");
prompt_generator.add_performance_evaluation(
"Continuously review and analyze your actions " +
"to ensure you are performing to the best of your abilities."
);
prompt_generator.add_performance_evaluation(
"Constructively self-criticize your big-picture behavior constantly."
);
prompt_generator.add_performance_evaluation(
"Reflect on past decisions and strategies to refine your approach."
);
prompt_generator.add_performance_evaluation(
"Every command has a cost, so be smart and efficient. " +
"Aim to complete tasks in the least number of steps."
);
const prompt_string = prompt_generator.generate_prompt_string();
return prompt_string;
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/autogpt/output_parser.ts | import { BaseOutputParser } from "@langchain/core/output_parsers";
import { AutoGPTAction } from "./schema.js";
/**
* Utility function used to preprocess a string to be parsed as JSON.
* It replaces single backslashes with double backslashes, while leaving
* already escaped ones intact.
* It also extracts the json code if it is inside a code block
*/
export function preprocessJsonInput(inputStr: string): string {
const correctedStr = inputStr.replace(
/(?<!\\)\\(?!["\\/bfnrt]|u[0-9a-fA-F]{4})/g,
"\\\\"
);
const match = correctedStr.match(
/```(.*)(\r\n|\r|\n)(?<code>[\w\W\n]+)(\r\n|\r|\n)```/
);
if (match?.groups?.code) {
return match.groups.code.trim();
} else {
return correctedStr;
}
}
/**
* Class responsible for parsing the output of AutoGPT. It extends the
* BaseOutputParser class.
*/
export class AutoGPTOutputParser extends BaseOutputParser<AutoGPTAction> {
lc_namespace = ["langchain", "experimental", "autogpt"];
/**
* Method not implemented in the class and will throw an error if called.
* It is likely meant to be overridden in subclasses to provide specific
* format instructions.
* @returns Throws an error.
*/
getFormatInstructions(): string {
throw new Error("Method not implemented.");
}
/**
* Asynchronous method that takes a string as input and attempts to parse
* it into an AutoGPTAction object. If the input string cannot be parsed
* directly, the method tries to preprocess the string using the
* preprocessJsonInput function and parse it again. If parsing fails
* again, it returns an AutoGPTAction object with an error message.
* @param text The string to be parsed.
* @returns A Promise that resolves to an AutoGPTAction object.
*/
async parse(text: string): Promise<AutoGPTAction> {
let parsed: {
command: {
name: string;
args: Record<string, unknown>;
};
};
try {
parsed = JSON.parse(text);
} catch (error) {
const preprocessedText = preprocessJsonInput(text);
try {
parsed = JSON.parse(preprocessedText);
} catch (error) {
return {
name: "ERROR",
args: { error: `Could not parse invalid json: ${text}` },
};
}
}
try {
return {
name: parsed.command.name,
args: parsed.command.args,
};
} catch (error) {
return {
name: "ERROR",
args: { error: `Incomplete command args: ${parsed}` },
};
}
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/autogpt/prompt.ts | import type { VectorStoreRetrieverInterface } from "@langchain/core/vectorstores";
import {
BaseChatPromptTemplate,
SerializedBasePromptTemplate,
} from "@langchain/core/prompts";
import {
BaseMessage,
HumanMessage,
SystemMessage,
} from "@langchain/core/messages";
import { PartialValues } from "@langchain/core/utils/types";
import { getPrompt } from "./prompt_generator.js";
import { ObjectTool } from "./schema.js";
/**
* Interface for the input parameters of the AutoGPTPrompt class.
*/
export interface AutoGPTPromptInput {
aiName: string;
aiRole: string;
tools: ObjectTool[];
tokenCounter: (text: string) => Promise<number>;
sendTokenLimit?: number;
}
/**
* Class used to generate prompts for the AutoGPT model. It takes into
* account the AI's name, role, tools, token counter, and send token
* limit. The class also handles the formatting of messages and the
* construction of the full prompt.
*/
export class AutoGPTPrompt
extends BaseChatPromptTemplate
implements AutoGPTPromptInput
{
aiName: string;
aiRole: string;
tools: ObjectTool[];
tokenCounter: (text: string) => Promise<number>;
sendTokenLimit: number;
constructor(fields: AutoGPTPromptInput) {
super({ inputVariables: ["goals", "memory", "messages", "user_input"] });
this.aiName = fields.aiName;
this.aiRole = fields.aiRole;
this.tools = fields.tools;
this.tokenCounter = fields.tokenCounter;
this.sendTokenLimit = fields.sendTokenLimit || 4196;
}
_getPromptType() {
return "autogpt" as const;
}
/**
* Constructs the full prompt based on the provided goals.
* @param goals An array of goals.
* @returns The full prompt as a string.
*/
constructFullPrompt(goals: string[]): string {
const promptStart = `Your decisions must always be made independently
without seeking user assistance. Play to your strengths
as an LLM and pursue simple strategies with no legal complications.
If you have completed all your tasks,
make sure to use the "finish" command.`;
let fullPrompt = `You are ${this.aiName}, ${this.aiRole}\n${promptStart}\n\nGOALS:\n\n`;
goals.forEach((goal, index) => {
fullPrompt += `${index + 1}. ${goal}\n`;
});
fullPrompt += `\n\n${getPrompt(this.tools)}`;
return fullPrompt;
}
/**
* Formats the messages based on the provided parameters.
* @param goals An array of goals.
* @param memory A VectorStoreRetriever instance.
* @param messages An array of previous messages.
* @param user_input The user's input.
* @returns An array of formatted messages.
*/
async formatMessages({
goals,
memory,
messages: previousMessages,
user_input,
}: {
goals: string[];
memory: VectorStoreRetrieverInterface;
messages: BaseMessage[];
user_input: string;
}) {
const basePrompt = new SystemMessage(this.constructFullPrompt(goals));
const timePrompt = new SystemMessage(
`The current time and date is ${new Date().toLocaleString()}`
);
if (
typeof basePrompt.content !== "string" ||
typeof timePrompt.content !== "string"
) {
throw new Error("Non-string message content is not supported.");
}
const usedTokens =
(await this.tokenCounter(basePrompt.content)) +
(await this.tokenCounter(timePrompt.content));
const relevantDocs = await memory.getRelevantDocuments(
JSON.stringify(previousMessages.slice(-10))
);
const relevantMemory = relevantDocs.map(
(d: { pageContent: string }) => d.pageContent
);
let relevantMemoryTokens = await relevantMemory.reduce(
async (acc: Promise<number>, doc: string) =>
(await acc) + (await this.tokenCounter(doc)),
Promise.resolve(0)
);
while (usedTokens + relevantMemoryTokens > 2500) {
relevantMemory.pop();
relevantMemoryTokens = await relevantMemory.reduce(
async (acc: Promise<number>, doc: string) =>
(await acc) + (await this.tokenCounter(doc)),
Promise.resolve(0)
);
}
const contentFormat = `This reminds you of these events from your past:\n${relevantMemory.join(
"\n"
)}\n\n`;
const memoryMessage = new SystemMessage(contentFormat);
if (typeof memoryMessage.content !== "string") {
throw new Error("Non-string message content is not supported.");
}
const usedTokensWithMemory =
usedTokens + (await this.tokenCounter(memoryMessage.content));
const historicalMessages: BaseMessage[] = [];
for (const message of previousMessages.slice(-10).reverse()) {
if (typeof message.content !== "string") {
throw new Error("Non-string message content is not supported.");
}
const messageTokens = await this.tokenCounter(message.content);
if (usedTokensWithMemory + messageTokens > this.sendTokenLimit - 1000) {
break;
}
historicalMessages.unshift(message);
}
const inputMessage = new HumanMessage(user_input);
const messages: BaseMessage[] = [
basePrompt,
timePrompt,
memoryMessage,
...historicalMessages,
inputMessage,
];
return messages;
}
/**
* This method is not implemented in the AutoGPTPrompt class and will
* throw an error if called.
* @param _values Partial values.
* @returns Throws an error.
*/
async partial(_values: PartialValues): Promise<BaseChatPromptTemplate> {
throw new Error("Method not implemented.");
}
serialize(): SerializedBasePromptTemplate {
throw new Error("Method not implemented.");
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/autogpt | lc_public_repos/langchainjs/langchain/src/experimental/autogpt/tests/output_parser.test.ts | import { test, expect } from "@jest/globals";
import { preprocessJsonInput } from "../output_parser.js";
test("should parse outputs correctly", () => {
expect(preprocessJsonInput("{'escaped':'\\a'}")).toBe("{'escaped':'\\\\a'}");
expect(preprocessJsonInput("```\n{}\n```")).toBe("{}");
expect(preprocessJsonInput("```json\n{}\n```")).toBe("{}");
expect(
preprocessJsonInput("I will do the following:\n\n```json\n{}\n```")
).toBe("{}");
});
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/autogpt | lc_public_repos/langchainjs/langchain/src/experimental/autogpt/tests/prompt.test.ts | import { HumanMessage, SystemMessage } from "@langchain/core/messages";
import { AutoGPTPrompt } from "../prompt.js";
// Mock token counter function
const mockTokenCounter = async (text: string): Promise<number> => text.length;
// Mock vector store retriever interface
// Todo: replace any with actual interface
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const mockMemory: any = {
getRelevantDocuments: async () => [
{ pageContent: "relevant content", metadata: {} },
],
};
describe("AutoGPTPrompt", () => {
it("should construct full prompt correctly", () => {
const prompt = new AutoGPTPrompt({
aiName: "TestAI",
aiRole: "Assistant",
tools: [],
tokenCounter: mockTokenCounter,
sendTokenLimit: 2500,
});
const goals = ["Goal1", "Goal2"];
const fullPrompt = prompt.constructFullPrompt(goals);
expect(fullPrompt).toContain("TestAI");
expect(fullPrompt).toContain("Assistant");
expect(fullPrompt).toContain("Goal1");
expect(fullPrompt).toContain("Goal2");
});
it("should format messages correctly", async () => {
const prompt = new AutoGPTPrompt({
aiName: "TestAI",
aiRole: "Assistant",
tools: [],
tokenCounter: mockTokenCounter,
sendTokenLimit: 2500,
});
const formattedMessages = await prompt.formatMessages({
goals: ["Goal1"],
memory: mockMemory,
messages: [
new HumanMessage("Hello"),
new SystemMessage("System message"),
],
user_input: "User input",
});
expect(formattedMessages).toHaveLength(4); // Base prompt, time prompt, memory message, and 2 previous messages
// Check the content of the first message (base prompt)
expect(formattedMessages[0].content).toContain("TestAI");
expect(formattedMessages[0].content).toContain("Assistant");
expect(formattedMessages[0].content).toContain("Goal1");
// Check the content of the second message (time prompt)
expect(formattedMessages[1].content).toMatch(
/\d{1,2}\/\d{1,2}\/\d{4}, \d{1,2}:\d{1,2}:\d{1,2} (AM|PM)/
);
// Check the content of the third message (memory message)
expect(formattedMessages[2].content).toContain("relevant content");
// Check the content of the previous messages
const humanMessage = formattedMessages.find(
// eslint-disable-next-line no-instanceof/no-instanceof
(msg) => msg instanceof HumanMessage
);
const systemMessage = formattedMessages.find(
// eslint-disable-next-line no-instanceof/no-instanceof
(msg) => msg instanceof SystemMessage
);
// Validate HumanMessage
expect(humanMessage).toBeDefined();
// Validate SystemMessage
expect(systemMessage).toBeDefined();
// Validate user_input
expect(formattedMessages[3].content).toContain("User input");
});
});
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/autogpt | lc_public_repos/langchainjs/langchain/src/experimental/autogpt/tests/prompt_generator.test.ts | import { test, expect } from "@jest/globals";
import { z } from "zod";
import { StructuredTool } from "@langchain/core/tools";
import { getPrompt } from "../prompt_generator.js";
import { Calculator } from "../../../util/testing/tools/calculator.js";
import { ReadFileTool, WriteFileTool } from "../../../tools/fs.js";
import { InMemoryFileStore } from "../../../stores/file/in_memory.js";
class FakeBrowserTool extends StructuredTool {
schema = z.object({
url: z.string(),
query: z.string().optional(),
});
name = "fake_browser_tool";
description =
"useful for when you need to find something on the web or summarize a webpage.";
async _call({
url: _url,
query: _query,
}: z.infer<this["schema"]>): Promise<string> {
return "fake_browser_tool";
}
}
test("prompt with several tools", () => {
const store = new InMemoryFileStore();
const tools = [
new FakeBrowserTool(),
new Calculator(),
new ReadFileTool({ store }),
new WriteFileTool({ store }),
];
const prompt = getPrompt(tools);
expect(prompt).toMatchInlineSnapshot(`
"Constraints:
1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.
2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.
3. No user assistance
4. Exclusively use the commands listed in double quotes e.g. "command name"
Commands:
1. "fake_browser_tool": useful for when you need to find something on the web or summarize a webpage., args json schema: {"url":{"type":"string"},"query":{"type":"string"}}
2. "calculator": Useful for getting the result of a math expression. The input to this tool should be a valid mathematical expression that could be executed by a simple calculator., args json schema: {"input":{"type":"string"}}
3. "read_file": Read file from disk, args json schema: {"file_path":{"type":"string","description":"name of file"}}
4. "write_file": Write file from disk, args json schema: {"file_path":{"type":"string","description":"name of file"},"text":{"type":"string","description":"text to write to file"}}
5. finish: use this to signal that you have finished all your objectives, args: "response": "final response to let people know you have finished your objectives"
Resources:
1. Internet access for searches and information gathering.
2. Long Term memory management.
3. GPT-3.5 powered Agents for delegation of simple tasks.
4. File output.
Performance Evaluation:
1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.
2. Constructively self-criticize your big-picture behavior constantly.
3. Reflect on past decisions and strategies to refine your approach.
4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.
You should only respond in JSON format as described below
Response Format:
{
"thoughts": {
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\\n- list that conveys\\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user"
},
"command": {
"name": "command name",
"args": {
"arg name": "value"
}
}
}
Ensure the response can be parsed by Python json.loads"
`);
});
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/prompts/handlebars.ts | import Handlebars from "handlebars";
import { type ParsedFStringNode } from "@langchain/core/prompts";
import type { InputValues } from "@langchain/core/utils/types";
import {
CustomFormatPromptTemplate,
CustomFormatPromptTemplateInput,
} from "./custom_format.js";
export const parseHandlebars = (template: string): ParsedFStringNode[] => {
const parsed: ParsedFStringNode[] = [];
const nodes: { type: string }[] = [...Handlebars.parse(template).body];
while (nodes.length) {
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
const node = nodes.pop()!;
if (node.type === "ContentStatement") {
// @ts-expect-error - handlebars' hbs.AST.ContentStatement isn't exported
const text = node.value;
parsed.push({ type: "literal", text });
} else if (node.type === "MustacheStatement") {
// @ts-expect-error - handlebars' hbs.AST.MustacheStatement isn't exported
const name: string = node.path.parts[0];
// @ts-expect-error - handlebars' hbs.AST.MustacheStatement isn't exported
const { original } = node.path as { original: string };
if (
!!name &&
!original.startsWith("this.") &&
!original.startsWith("@")
) {
parsed.push({ type: "variable", name });
}
} else if (node.type === "PathExpression") {
// @ts-expect-error - handlebars' hbs.AST.PathExpression isn't exported
const name: string = node.parts[0];
// @ts-expect-error - handlebars' hbs.AST.PathExpression isn't exported
const { original } = node;
if (
!!name &&
!original.startsWith("this.") &&
!original.startsWith("@")
) {
parsed.push({ type: "variable", name });
}
} else if (node.type === "BlockStatement") {
// @ts-expect-error - handlebars' hbs.AST.BlockStatement isn't exported
nodes.push(...node.params, ...node.program.body);
}
}
return parsed;
};
export const interpolateHandlebars = (
template: string,
values: InputValues
) => {
const compiled = Handlebars.compile(template, { noEscape: true });
return compiled(values);
};
export type HandlebarsPromptTemplateInput<RunInput extends InputValues> =
CustomFormatPromptTemplateInput<RunInput>;
export class HandlebarsPromptTemplate<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunInput extends InputValues = any
> extends CustomFormatPromptTemplate<RunInput> {
static lc_name() {
return "HandlebarsPromptTemplate";
}
/**
* Load prompt template from a template
*/
static fromTemplate<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunInput extends InputValues = Record<string, any>
>(
template: string,
params?: Omit<
HandlebarsPromptTemplateInput<RunInput>,
| "template"
| "inputVariables"
| "customParser"
| "templateValidator"
| "renderer"
>
) {
return super.fromTemplate<RunInput>(template, {
...params,
validateTemplate: false,
customParser: parseHandlebars,
renderer: interpolateHandlebars,
});
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/prompts/custom_format.ts | import type { InputValues } from "@langchain/core/utils/types";
import {
type ParsedFStringNode,
PromptTemplate,
type PromptTemplateInput,
TypedPromptInputValues,
} from "@langchain/core/prompts";
export type CustomFormatPromptTemplateInput<RunInput extends InputValues> =
Omit<PromptTemplateInput<RunInput, string>, "templateFormat"> & {
customParser: (template: string) => ParsedFStringNode[];
templateValidator?: (template: string, inputVariables: string[]) => boolean;
renderer: (template: string, values: InputValues) => string;
};
export class CustomFormatPromptTemplate<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunInput extends InputValues = any,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
PartialVariableName extends string = any
> extends PromptTemplate<RunInput, PartialVariableName> {
static lc_name() {
return "CustomPromptTemplate";
}
lc_serializable = false;
templateValidator?: (template: string, inputVariables: string[]) => boolean;
renderer: (template: string, values: InputValues) => string;
constructor(input: CustomFormatPromptTemplateInput<RunInput>) {
super(input);
Object.assign(this, input);
if (this.validateTemplate && this.templateValidator !== undefined) {
let totalInputVariables: string[] = this.inputVariables;
if (this.partialVariables) {
totalInputVariables = totalInputVariables.concat(
Object.keys(this.partialVariables)
);
}
if (typeof this.template === "string") {
this.templateValidator(this.template, totalInputVariables);
} else {
throw new Error(
`Must pass in string as template. Received: ${this.template}`
);
}
}
}
/**
* Load prompt template from a template
*/
static fromTemplate<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunInput extends InputValues = Record<string, any>
>(
template: string,
{
customParser,
...rest
}: Omit<
CustomFormatPromptTemplateInput<RunInput>,
"template" | "inputVariables"
>
) {
const names = new Set<string>();
const nodes = customParser(template);
for (const node of nodes) {
if (node.type === "variable") {
names.add(node.name);
}
}
// eslint-disable-next-line @typescript-eslint/ban-types
return new this<RunInput extends Symbol ? never : RunInput>({
// eslint-disable-next-line @typescript-eslint/no-explicit-any
inputVariables: [...names] as any[],
template,
customParser,
...rest,
});
}
/**
* Formats the prompt template with the provided values.
* @param values The values to be used to format the prompt template.
* @returns A promise that resolves to a string which is the formatted prompt.
*/
async format(values: TypedPromptInputValues<RunInput>): Promise<string> {
const allValues = await this.mergePartialAndUserVariables(values);
if (typeof this.template === "string") {
return this.renderer(this.template, allValues);
} else {
throw new Error(
`Must pass in string as template. Received: ${this.template}`
);
}
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/prompts | lc_public_repos/langchainjs/langchain/src/experimental/prompts/tests/handlebars.test.ts | import { HandlebarsPromptTemplate } from "../handlebars.js";
describe.each([
["{{foo}}", { foo: "bar" }, "bar"],
["pre{{foo}}post", { foo: "bar" }, "prebarpost"],
["{{{foo}}}", { foo: "bar" }, "bar"],
["text", {}, "text"],
["}}", {}, "}}"],
["{{first}}_{{second}}", { first: "foo", second: "bar" }, "foo_bar"],
])("Valid handlebars", (template, variables, result) => {
test(`Interpolation works: ${template}`, async () => {
const prompt = HandlebarsPromptTemplate.fromTemplate(template);
const invokeResult = await prompt.invoke(variables);
expect(invokeResult.value).toBe(result);
});
});
describe.each([
["}}{{", {}],
["{{", {}],
["{{foo", {}],
])("Invalid handlebars", (template) => {
test(`Interpolation throws: ${template}`, async () => {
expect(() =>
HandlebarsPromptTemplate.fromTemplate(template)
).toThrowError();
});
});
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/masking/parser.ts | import { MaskingTransformer } from "./transformer.js";
import type { MaskingParserConfig } from "./types.js";
/**
* MaskingParser class for handling the masking and rehydrating of messages.
*/
export class MaskingParser {
private transformers: MaskingTransformer[];
private state: Map<string, string>;
private config: MaskingParserConfig;
constructor(config: MaskingParserConfig = {}) {
this.transformers = config.transformers ?? [];
this.state = new Map<string, string>();
this.config = config;
}
/**
* Adds a transformer to the parser.
* @param transformer - An instance of a class extending MaskingTransformer.
*/
addTransformer(transformer: MaskingTransformer) {
this.transformers.push(transformer);
}
/**
* Getter method for retrieving the current state.
* @returns The current state map.
*/
public getState(): Map<string, string> {
return this.state;
}
/**
* Masks the provided message using the added transformers.
* This method sequentially applies each transformer's masking logic to the message.
* It utilizes a state map to track original values corresponding to their masked versions.
*
* @param message - The message to be masked.
* @returns A masked version of the message.
* @throws {TypeError} If the message is not a string.
* @throws {Error} If no transformers are added.
*/
async mask(message: string): Promise<string> {
// If onMaskingStart is a function, handle it accordingly
if (this.config.onMaskingStart) {
await this.config.onMaskingStart(message);
}
// Check if there are any transformers added to the parser. If not, throw an error
// as masking requires at least one transformer to apply its logic.
if (this.transformers.length === 0) {
throw new Error(
"MaskingParser.mask Error: No transformers have been added. Please add at least one transformer before parsing."
);
}
if (typeof message !== "string") {
throw new TypeError(
"MaskingParser.mask Error: The 'message' argument must be a string."
);
}
// Initialize the variable to hold the progressively masked message.
// It starts as the original message and gets transformed by each transformer.
let processedMessage = message;
// Iterate through each transformer and apply their transform method.
for (const transformer of this.transformers) {
// Transform the message and get the transformer's state changes, ensuring no direct mutation of the shared state.
const [transformedMessage, transformerState] =
await transformer.transform(processedMessage, new Map(this.state));
// Update the processed message for subsequent transformers.
processedMessage = transformedMessage;
// Merge state changes from the transformer into the parser's state.
// This accumulates all transformations' effects on the state.
transformerState.forEach((value, key) => this.state.set(key, value));
}
// Handle onMaskingEnd callback
if (this.config.onMaskingEnd) {
await this.config.onMaskingEnd(processedMessage);
}
// Return the fully masked message after all transformers have been applied.
return processedMessage;
}
/**
* Rehydrates a masked message back to its original form.
* This method sequentially applies the rehydration logic of each added transformer in reverse order.
* It relies on the state map to correctly map the masked values back to their original values.
*
* The rehydration process is essential for restoring the original content of a message
* that has been transformed (masked) by the transformers. This process is the inverse of the masking process.
*
* @param message - The masked message to be rehydrated.
* @returns The original (rehydrated) version of the message.
*/
async rehydrate(
message: string,
state?: Map<string, string>
): Promise<string> {
// Handle onRehydratingStart callback
if (this.config.onRehydratingStart) {
await this.config.onRehydratingStart(message);
}
if (typeof message !== "string") {
throw new TypeError(
"MaskingParser.rehydrate Error: The 'message' argument must be a string."
);
}
// Check if any transformers have been added to the parser.
// If no transformers are present, throw an error as rehydration requires at least one transformer.
if (this.transformers.length === 0) {
throw new Error(
"MaskingParser.rehydrate Error: No transformers have been added. Please add at least one transformer before rehydrating."
);
}
// eslint-disable-next-line no-instanceof/no-instanceof
if (state && !(state instanceof Map)) {
throw new TypeError(
"MaskingParser.rehydrate Error: The 'state' argument, if provided, must be an instance of Map."
);
}
const rehydrationState = state || this.state; // Use provided state or fallback to internal state
// Initialize the rehydratedMessage with the input masked message.
// This variable will undergo rehydration by each transformer in reverse order.
let rehydratedMessage = message;
// Use a reverse for...of loop to accommodate asynchronous rehydrate methods
const reversedTransformers = this.transformers.slice().reverse();
for (const transformer of reversedTransformers) {
// Check if the result is a Promise and use await, otherwise use it directly
rehydratedMessage = await transformer.rehydrate(
rehydratedMessage,
rehydrationState
);
}
// Handle onRehydratingEnd callback
if (this.config.onRehydratingEnd) {
await this.config.onRehydratingEnd(rehydratedMessage);
}
// Return the fully rehydrated message after all transformers have been applied.
return rehydratedMessage;
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/masking/transformer.ts | /**
* Abstract class representing a transformer used for masking and rehydrating messages.
*/
export abstract class MaskingTransformer {
abstract transform(
message: string,
state?: Map<string, string>
): Promise<[string, Map<string, string>]>;
abstract rehydrate(
message: string,
state: Map<string, string>
): Promise<string>;
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/masking/types.ts | import { MaskingTransformer } from "./transformer.js";
/**
* Configuration type for MaskingParser.
*/
export type MaskingParserConfig = {
transformers?: MaskingTransformer[];
defaultHashFunction?: HashFunction;
onMaskingStart?: HookFunction;
onMaskingEnd?: HookFunction;
onRehydratingStart?: HookFunction;
onRehydratingEnd?: HookFunction;
};
/**
* Regex Masking Pattern used for masking in PIIMaskingTransformer.
*/
export type MaskingPattern = {
regex: RegExp;
replacement?: string;
mask?: (match: string) => string;
};
export type HookFunction =
| ((message: string) => Promise<void>)
| ((message: string) => void);
/**
* Represents a function that can hash a string input.
*/
export type HashFunction = (input: string) => string;
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/masking/index.ts | export { MaskingParser } from "./parser.js";
export { RegexMaskingTransformer } from "./regex_masking_transformer.js";
export { MaskingTransformer } from "./transformer.js";
export {
type MaskingParserConfig,
type HashFunction,
type HookFunction,
} from "./types.js";
|
0 | lc_public_repos/langchainjs/langchain/src/experimental | lc_public_repos/langchainjs/langchain/src/experimental/masking/regex_masking_transformer.ts | import { MaskingTransformer } from "./transformer.js";
import type { HashFunction, MaskingPattern } from "./types.js";
/**
* RegexMaskingTransformer class for masking and rehydrating messages with Regex.
*/
export class RegexMaskingTransformer extends MaskingTransformer {
private patterns: { [key: string]: MaskingPattern };
private hashFunction: HashFunction;
/**
* Constructs a RegexMaskingTransformer with given patterns and an optional hash function.
* Validates the provided patterns to ensure they conform to the expected structure.
*
* @param patterns - An object containing masking patterns. Each pattern should include
* a regular expression (`regex`) and optionally a `replacement` string
* or a `mask` function.
* @param hashFunction - An optional custom hash function to be used for masking.
*/
constructor(
patterns: { [key: string]: MaskingPattern },
hashFunction?: HashFunction
) {
super();
// Validates the provided masking patterns before initializing the transformer.
// This ensures that each pattern has a valid regular expression.
this.validatePatterns(patterns);
// Assigns the validated patterns and the hash function to the transformer.
// If no custom hash function is provided, the default hash function is used.
this.patterns = patterns;
this.hashFunction = hashFunction || this.defaultHashFunction;
}
/**
* Validates the given masking patterns to ensure each pattern has a valid regular expression.
* Throws an error if any pattern is found to be invalid.
*
* @param patterns - The patterns object to validate.
*/
private validatePatterns(patterns: { [key: string]: MaskingPattern }) {
for (const key of Object.keys(patterns)) {
const pattern = patterns[key];
// Checks that each pattern is an object and has a regex property that is an instance of RegExp.
// Throws an error if these conditions are not met, indicating an invalid pattern configuration.
if (
!pattern ||
typeof pattern !== "object" ||
// eslint-disable-next-line no-instanceof/no-instanceof
!(pattern.regex instanceof RegExp)
) {
throw new Error("Invalid pattern configuration.");
}
}
}
/**
* Masks content in a message based on the defined patterns.
* @param message - The message to be masked.
* @param state - The current state containing original values.
* @returns A tuple of the masked message and the updated state.
*/
async transform(
message: string,
state: Map<string, string>
): Promise<[string, Map<string, string>]> {
if (typeof message !== "string") {
throw new TypeError(
"RegexMaskingTransformer.transform Error: The 'message' argument must be a string."
);
}
// eslint-disable-next-line no-instanceof/no-instanceof
if (!(state instanceof Map)) {
throw new TypeError(
"RegexMaskingTransformer.transform Error: The 'state' argument must be an instance of Map."
);
}
// Holds the progressively masked message
let processedMessage = message;
// Initialize original values map with the current state or a new map
const originalValues = state || new Map<string, string>();
// Iterate over each pattern defined in the transformer
for (const key of Object.keys(this.patterns)) {
const pattern = this.patterns[key];
// Apply the current pattern's regex to the message
processedMessage = processedMessage.replace(pattern.regex, (match) => {
// Determine the masked value: use the mask function if provided, else use the replacement string,
// else use the hash function.
const maskedValue = pattern.mask
? pattern.mask(match)
: pattern.replacement ?? this.hashFunction(match);
// Store the mapping of the masked value to the original value (match)
originalValues.set(maskedValue, match);
// Return the masked value to replace the original value in the message
return maskedValue;
});
}
// Return the fully masked message and the state map with all original values
// Wrap the synchronous return values in Promise.resolve() to maintain compatibility
// with the MaskingParser's expectation of a Promise return type.
return [processedMessage, originalValues];
}
/**
* Rehydrates a masked message back to its original form using the provided state.
* @param message - The masked message to be rehydrated.
* @param state - The state map containing mappings of masked values to their original values.
* @returns The rehydrated (original) message.
*/
async rehydrate(
message: string,
state: Map<string, string>
): Promise<string> {
if (typeof message !== "string") {
throw new TypeError(
"RegexMaskingTransformer.rehydrate Error: The 'message' argument must be a string."
);
}
// eslint-disable-next-line no-instanceof/no-instanceof
if (!(state instanceof Map)) {
throw new TypeError(
"RegexMaskingTransformer.rehydrate Error: The 'state' argument must be an instance of Map."
);
}
// Convert the state map to an array and use reduce to sequentially replace masked values with original values.
const rehydratedMessage = Array.from(state).reduce(
(msg, [masked, original]) => {
// Escape special characters in the masked string to ensure it can be used in a regular expression safely.
// This is necessary because masked values might contain characters that have special meanings in regex.
const escapedMasked = masked.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
// Replace all instances of the escaped masked value in the message with the original value.
// The 'g' flag in the RegExp ensures that all occurrences of the masked value are replaced.
return msg.replace(new RegExp(escapedMasked, "g"), original);
},
message
);
return rehydratedMessage;
}
/**
* Default hash function for creating unique hash values.
* @param input - The input string to hash.
* @returns The resulting hash as a string.
*/
private defaultHashFunction(input: string): string {
let hash = 0;
// Iterate over each character in the input string
for (let i = 0; i < input.length; i += 1) {
// Get ASCII value of the character
const char = input.charCodeAt(i);
// Combine the current hash with the new character and ensure it remains a 32-bit integer
hash = (hash << 5) - hash + char;
// Bitwise OR operation to convert to a 32-bit integer.
// This is a common technique to ensure the final hash value stays within the 32-bit limit,
// effectively wrapping the value when it becomes too large.
hash |= 0;
}
// Convert the numerical hash value to a string and return
return hash.toString();
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/masking | lc_public_repos/langchainjs/langchain/src/experimental/masking/tests/masking.test.ts | /* eslint-disable no-promise-executor-return */
/* eslint-disable @typescript-eslint/no-explicit-any */
// yarn test:single src/experimental/masking/tests/masking.test.ts
import { jest } from "@jest/globals";
import {
MaskingParser,
RegexMaskingTransformer,
MaskingTransformer,
} from "../index.js";
describe("MaskingParser and PIIMaskingTransformer", () => {
describe("Masking with Static Identifiers", () => {
let maskingParser: MaskingParser;
let piiMaskingTransformer: RegexMaskingTransformer;
const emailPattern = { regex: /\S+@\S+\.\S+/, replacement: "[email]" };
const phonePattern = { regex: /\d{3}-\d{3}-\d{4}/, replacement: "[phone]" };
beforeEach(() => {
piiMaskingTransformer = new RegexMaskingTransformer({
email: emailPattern,
phone: phonePattern,
});
maskingParser = new MaskingParser();
maskingParser.addTransformer(piiMaskingTransformer);
});
it("masks single occurrences of PII with static identifiers", async () => {
const message = "Contact me at jane.doe@email.com or 555-123-4567.";
const expectedMaskedMessage = "Contact me at [email] or [phone].";
const maskedMessage = await maskingParser.mask(message);
expect(maskedMessage).toBe(expectedMaskedMessage);
});
it("rehydrates static masked data to its original form", async () => {
const maskedMessage = "Contact me at [email] or [phone].";
const expectedOriginalMessage =
"Contact me at jane.doe@email.com or 555-123-4567.";
await maskingParser.mask(expectedOriginalMessage); // Masking original message
const rehydratedMessage = await maskingParser.rehydrate(maskedMessage);
expect(rehydratedMessage).toBe(expectedOriginalMessage);
});
function generateLargeMessage() {
let largeMessage = "";
for (let i = 0; i < 10000; i += 1) {
// Adjust the number for desired message size
largeMessage += `User${i}: jane.doe${i}@email.com, 555-123-${i
.toString()
.padStart(4, "0")}. `;
}
return largeMessage;
}
describe("Performance Testing", () => {
it("efficiently processes large data sets", async () => {
const largeMessage = generateLargeMessage();
const startTime = performance.now();
const maskedMessage = await maskingParser.mask(largeMessage);
const endTime = performance.now();
const someAcceptableDuration = 5000; // Set this to a duration you consider acceptable, e.g., 5000 milliseconds (5 seconds)
expect(maskedMessage).toBeDefined();
expect(endTime - startTime).toBeLessThan(someAcceptableDuration);
});
});
});
describe("Masking with Dynamic Identifiers", () => {
let maskingParser: MaskingParser;
let piiMaskingTransformer: RegexMaskingTransformer;
const emailMask = () => `[email-${Math.random().toString(16).slice(2)}]`;
const phoneMask = () => `[phone-${Math.random().toString(16).slice(2)}]`;
beforeEach(() => {
piiMaskingTransformer = new RegexMaskingTransformer({
email: { regex: /\S+@\S+\.\S+/g, mask: emailMask },
phone: { regex: /\d{3}-\d{3}-\d{4}/g, mask: phoneMask },
});
maskingParser = new MaskingParser();
maskingParser.addTransformer(piiMaskingTransformer);
});
it("masks multiple occurrences of different PII with unique identifiers", async () => {
const message =
"Contact me at jane.doe@email.com or 555-123-4567. Also reach me at john.smith@email.com";
const maskedMessage = await maskingParser.mask(message);
expect(maskedMessage).toMatch(/\[email-[a-f0-9]+\]/g);
expect(maskedMessage).toMatch(/\[phone-[a-f0-9]+\]/g);
expect((maskedMessage.match(/\[email-[a-f0-9]+\]/g) || []).length).toBe(
2
);
expect((maskedMessage.match(/\[phone-[a-f0-9]+\]/g) || []).length).toBe(
1
);
});
it("rehydrates dynamic masked data to its original form", async () => {
const originalMessage =
"Contact me at jane.doe@email.com or 555-123-4567. Also reach me at john.smith@email.com";
const maskedMessage = await maskingParser.mask(originalMessage);
const rehydratedMessage = await maskingParser.rehydrate(maskedMessage);
expect(rehydratedMessage).toBe(originalMessage);
});
it("masks identical PII with consistent dynamic identifiers", async () => {
const message =
"Contact me at jane.doe@email.com or 555-123-4567. Also reach me at john.smith@email.com and 555-123-4567";
const maskedMessage = await maskingParser.mask(message);
expect(maskedMessage).toMatch(/\[email-[a-f0-9]+\]/g);
expect(maskedMessage).toMatch(/\[phone-[a-f0-9]+\]/g);
expect((maskedMessage.match(/\[email-[a-f0-9]+\]/g) || []).length).toBe(
2
);
expect((maskedMessage.match(/\[phone-[a-f0-9]+\]/g) || []).length).toBe(
2
);
});
});
describe("PIIMaskingTransformer with Default Hash Function", () => {
let maskingParser: MaskingParser;
let piiMaskingTransformer: RegexMaskingTransformer;
const emailPattern = { regex: /\S+@\S+\.\S+/, replacement: "[email]" };
const phonePattern = { regex: /\d{3}-\d{3}-\d{4}/, replacement: "[phone]" };
beforeEach(() => {
piiMaskingTransformer = new RegexMaskingTransformer({
email: emailPattern,
phone: phonePattern,
});
maskingParser = new MaskingParser();
maskingParser.addTransformer(piiMaskingTransformer);
});
it("should mask email and phone using default hash function", async () => {
const piiMaskingTransformer = new RegexMaskingTransformer({
email: emailPattern,
phone: phonePattern,
});
const maskingParser = new MaskingParser();
maskingParser.addTransformer(piiMaskingTransformer);
const message =
"My email is jane.doe@email.com and phone is 555-123-4567.";
const maskedMessage = await maskingParser.mask(message);
expect(maskedMessage).toContain("[email]");
expect(maskedMessage).toContain("[phone]");
});
});
describe("PIIMaskingTransformer with Custom Hash Function", () => {
const emailPattern = { regex: /\S+@\S+\.\S+/, replacement: "[email]" };
const phonePattern = { regex: /\d{3}-\d{3}-\d{4}/, replacement: "[phone]" };
let maskingParser: MaskingParser;
let piiMaskingTransformer: RegexMaskingTransformer;
beforeEach(() => {
piiMaskingTransformer = new RegexMaskingTransformer({
email: emailPattern,
phone: phonePattern,
});
maskingParser = new MaskingParser();
maskingParser.addTransformer(piiMaskingTransformer);
});
// A simple hash function that creates a mock hash representation of the input.
// This is just for demonstration purposes and not a secure hashing method.
const customHashFunction = (input: string) =>
input
.split("")
.map(() => "*")
.join("");
it("should mask email and phone using custom hash function", async () => {
const piiMaskingTransformer = new RegexMaskingTransformer(
{
email: {
regex: /\S+@\S+\.\S+/,
mask: (match) => `custom-email-${customHashFunction(match)}`,
},
phone: {
regex: /\d{3}-\d{3}-\d{4}/,
mask: (match) => `custom-phone-${customHashFunction(match)}`,
},
},
customHashFunction
);
const maskingParser = new MaskingParser();
maskingParser.addTransformer(piiMaskingTransformer);
const message = "Contact me at jane.doe@email.com or 555-123-4567.";
const maskedMessage = await maskingParser.mask(message);
// The lengths of the masked parts should be equal to the lengths of the original email and phone number.
const expectedEmailMask = `custom-email-${"*".repeat(
"jane.doe@email.com".length
)}`;
const expectedPhoneMask = `custom-phone-${"*".repeat(
"555-123-4567".length
)}`;
expect(maskedMessage).toContain(expectedEmailMask);
expect(maskedMessage).toContain(expectedPhoneMask);
});
it("should rehydrate masked data correctly using custom hash function", async () => {
const piiMaskingTransformer = new RegexMaskingTransformer(
{
email: {
regex: /\S+@\S+\.\S+/,
mask: (match) => `custom-email-${customHashFunction(match)}`,
},
phone: {
regex: /\d{3}-\d{3}-\d{4}/,
mask: (match) => `custom-phone-${customHashFunction(match)}`,
},
},
customHashFunction
);
maskingParser.addTransformer(piiMaskingTransformer);
const originalMessage =
"Contact me at jane.doe@email.com or 555-123-4567.";
const maskedMessage = await maskingParser.mask(originalMessage);
const rehydratedMessage = await maskingParser.rehydrate(maskedMessage);
expect(rehydratedMessage).toBe(originalMessage);
});
});
describe("Error Handling in MaskingParser", () => {
let maskingParser: MaskingParser;
let piiMaskingTransformer: RegexMaskingTransformer;
beforeEach(() => {
piiMaskingTransformer = new RegexMaskingTransformer({});
maskingParser = new MaskingParser();
});
it("throws an error when no transformers are added and parse is called", async () => {
const message = "Some message";
await expect(maskingParser.mask(message)).rejects.toThrow(
"MaskingParser.mask Error: No transformers have been added. Please add at least one transformer before parsing."
);
});
it("throws an error when no transformers are added and rehydrate is called", async () => {
const message = "Some masked message";
await expect(maskingParser.rehydrate(message)).rejects.toThrow(
"MaskingParser.rehydrate Error: No transformers have been added. Please add at least one transformer before rehydrating."
);
});
it("throws an error for invalid message type in parse", async () => {
const invalidMessage: any = 123; // intentionally incorrect type
maskingParser.addTransformer(piiMaskingTransformer); // Add a transformer
await expect(maskingParser.mask(invalidMessage)).rejects.toThrow(
"The 'message' argument must be a string."
);
});
it("throws an error for invalid message type in rehydrate", async () => {
const invalidMessage: any = 123; // intentionally incorrect type
await expect(maskingParser.rehydrate(invalidMessage)).rejects.toThrow(
"The 'message' argument must be a string."
);
});
});
describe("Error Handling in PIIMaskingTransformer", () => {
it("throws an error for invalid message type in transform", async () => {
const transformer = new RegexMaskingTransformer({});
const invalidMessage: any = 123; // intentionally incorrect type
const state = new Map<string, string>();
await expect(
transformer.transform(invalidMessage, state)
).rejects.toThrow("The 'message' argument must be a string.");
});
it("throws an error for invalid state type in transform", async () => {
const transformer = new RegexMaskingTransformer({});
const message = "Some message";
const invalidState: any = {}; // intentionally incorrect type
await expect(
transformer.transform(message, invalidState)
).rejects.toThrow("The 'state' argument must be an instance of Map.");
});
it("throws an error when initialized with invalid regex pattern", () => {
expect(() => {
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const transformer = new RegexMaskingTransformer({
// @ts-expect-error Should throw with invalid regex
invalid: { regex: null },
});
// console.log(transformer);
}).toThrow("Invalid pattern configuration.");
});
});
describe("MaskingParser Hooks", () => {
let maskingParser: MaskingParser;
let piiMaskingTransformer: RegexMaskingTransformer;
const emailPattern = { regex: /\S+@\S+\.\S+/, replacement: "[email]" };
beforeEach(() => {
piiMaskingTransformer = new RegexMaskingTransformer({
email: emailPattern,
});
});
// Masking hooks
it("handles synchronous onMaskingStart and onMaskingEnd hooks during parse", async () => {
const onMaskingStart = jest.fn(); // Synchronous mock
const onMaskingEnd = jest.fn(); // Synchronous mock
maskingParser = new MaskingParser({
transformers: [piiMaskingTransformer],
onMaskingStart,
onMaskingEnd,
});
const message = "Contact me at jane.doe@email.com";
await maskingParser.mask(message);
expect(onMaskingStart).toHaveBeenCalledWith(message);
expect(onMaskingEnd).toHaveBeenCalled();
});
it("handles asynchronous onMaskingStart and onMaskingEnd hooks during parse", async () => {
const onMaskingStart = jest.fn(() => Promise.resolve()); // Correctly mocked as an async function
const onMaskingEnd = jest.fn(() => Promise.resolve()); // Correctly mocked as an async function
maskingParser = new MaskingParser({
transformers: [piiMaskingTransformer],
onMaskingStart,
onMaskingEnd,
});
const message = "Contact me at jane.doe@email.com";
await maskingParser.mask(message);
expect(onMaskingStart).toHaveBeenCalledWith(message);
expect(onMaskingEnd).toHaveBeenCalled();
});
it("handles errors in synchronous onMaskingStart and onMaskingEnd hooks during parse", async () => {
const error = new Error("Test Error");
const onMaskingStart = jest.fn(() => {
throw error;
}); // Synchronous mock that throws an error
const onMaskingEnd = jest.fn(() => {
throw error;
}); // Synchronous mock that throws an error
maskingParser = new MaskingParser({
transformers: [piiMaskingTransformer],
onMaskingStart,
onMaskingEnd,
});
const message = "Contact me at jane.doe@email.com";
await expect(maskingParser.mask(message)).rejects.toThrow(error);
expect(onMaskingStart).toHaveBeenCalledWith(message);
// onMaskingEnd should not be called because an error is thrown in onMaskingStart
expect(onMaskingEnd).not.toHaveBeenCalled();
});
it("handles errors in asynchronous onMaskingStart and onMaskingEnd hooks during parse", async () => {
const error = new Error("Test Error");
const onMaskingStart = jest.fn(() => Promise.reject(error)); // Asynchronous mock that rejects with an error
const onMaskingEnd = jest.fn(() => Promise.reject(error)); // Asynchronous mock that rejects with an error
maskingParser = new MaskingParser({
transformers: [piiMaskingTransformer],
onMaskingStart,
onMaskingEnd,
});
const message = "Contact me at jane.doe@email.com";
await expect(maskingParser.mask(message)).rejects.toThrow(error);
expect(onMaskingStart).toHaveBeenCalledWith(message);
// onMaskingEnd should not be called because an error is thrown in onMaskingStart
expect(onMaskingEnd).not.toHaveBeenCalled();
});
// Rehydration hooks
it("handles synchronous onRehydratingStart and onRehydratingEnd hooks during rehydrate", async () => {
const onRehydratingStart = jest.fn(); // Synchronous mock
const onRehydratingEnd = jest.fn(); // Synchronous mock
maskingParser = new MaskingParser({
transformers: [piiMaskingTransformer],
onRehydratingStart,
onRehydratingEnd,
});
const maskedMessage = await maskingParser.mask(
"Contact me at jane.doe@email.com"
);
await maskingParser.rehydrate(maskedMessage);
expect(onRehydratingStart).toHaveBeenCalledWith(maskedMessage);
expect(onRehydratingEnd).toHaveBeenCalled();
});
it("handles asynchronous onRehydratingStart and onRehydratingEnd hooks during rehydrate", async () => {
const onRehydratingStart = jest.fn(() => Promise.resolve()); // Asynchronous mock
const onRehydratingEnd = jest.fn(() => Promise.resolve()); // Asynchronous mock
maskingParser = new MaskingParser({
transformers: [piiMaskingTransformer],
onRehydratingStart,
onRehydratingEnd,
});
const maskedMessage = await maskingParser.mask(
"Contact me at jane.doe@email.com"
);
await maskingParser.rehydrate(maskedMessage);
expect(onRehydratingStart).toHaveBeenCalledWith(maskedMessage);
expect(onRehydratingEnd).toHaveBeenCalled();
});
it("handles errors in synchronous onRehydratingStart and onRehydratingEnd hooks during rehydrate", async () => {
const error = new Error("Test Error");
const onRehydratingStart = jest.fn(() => {
throw error;
}); // Synchronous mock that throws an error
const onRehydratingEnd = jest.fn(() => {
throw error;
}); // Synchronous mock that throws an error
maskingParser = new MaskingParser({
transformers: [piiMaskingTransformer],
onRehydratingStart,
onRehydratingEnd,
});
const maskedMessage = await maskingParser.mask(
"Contact me at jane.doe@email.com"
);
await expect(maskingParser.rehydrate(maskedMessage)).rejects.toThrow(
error
);
expect(onRehydratingStart).toHaveBeenCalledWith(maskedMessage);
// onRehydratingEnd should not be called because an error is thrown in onRehydratingStart
expect(onRehydratingEnd).not.toHaveBeenCalled();
});
it("handles errors in asynchronous onRehydratingStart and onRehydratingEnd hooks during rehydrate", async () => {
const error = new Error("Test Error");
const onRehydratingStart = jest.fn(() => Promise.reject(error)); // Asynchronous mock that rejects with an error
const onRehydratingEnd = jest.fn(() => Promise.reject(error)); // Asynchronous mock that rejects with an error
maskingParser = new MaskingParser({
transformers: [piiMaskingTransformer],
onRehydratingStart,
onRehydratingEnd,
});
const maskedMessage = await maskingParser.mask(
"Contact me at jane.doe@email.com"
);
await expect(maskingParser.rehydrate(maskedMessage)).rejects.toThrow(
error
);
expect(onRehydratingStart).toHaveBeenCalledWith(maskedMessage);
// onRehydratingEnd should not be called because an error is thrown in onRehydratingStart
expect(onRehydratingEnd).not.toHaveBeenCalled();
});
});
describe("MaskingParser with Asynchronous Transformers", () => {
let maskingParser: MaskingParser;
let asyncTransformer: MaskingTransformer;
beforeEach(() => {
// Mock an asynchronous transformer
asyncTransformer = {
async transform(message, state) {
// Simulate an asynchronous operation
await new Promise((resolve) => setTimeout(resolve, 100));
// Return transformed message and updated state
const transformedMessage = message.replace(
/sensitiveData/g,
"[REDACTED]"
);
const newState = new Map(state).set(
"redacted",
"sensitive string :("
);
return [transformedMessage, newState];
},
// Mock or placeholder rehydrate method
async rehydrate(message, _state) {
return message;
},
};
maskingParser = new MaskingParser({
transformers: [asyncTransformer],
// Add other configurations if necessary
});
});
it("properly handles asynchronous transformations and state updates", async () => {
const originalMessage =
"This message contains sensitiveData that should be redacted.";
const transformedMessage = await maskingParser.mask(originalMessage);
// Check if the message is transformed correctly
expect(transformedMessage).toBe(
"This message contains [REDACTED] that should be redacted."
);
// Check if the state is updated correctly
expect(maskingParser.getState().get("redacted")).toBe(
"sensitive string :("
);
});
});
});
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/masking | lc_public_repos/langchainjs/langchain/src/experimental/masking/tests/masking-extended.test.ts | // yarn test:single src/experimental/masking/tests/masking-extended.test.ts
import { MaskingParser, RegexMaskingTransformer } from "../index.js";
// Mock database for simulating state storage and retrieval
const mockDB = (() => {
const db = new Map<string, string>();
return {
async saveState(key: string, serializedState: string) {
db.set(key, serializedState);
},
async getState(key: string): Promise<string> {
return db.get(key) || "";
},
};
})();
function serializeState(state: Map<string, string>): string {
return JSON.stringify(Array.from(state.entries()));
}
function deserializeState(serializedState: string): Map<string, string> {
return new Map(JSON.parse(serializedState));
}
describe("MaskingParser Integration Test", () => {
let parser: MaskingParser;
let transformer: RegexMaskingTransformer;
const emailPattern = { regex: /\S+@\S+\.\S+/, replacement: "[email]" };
const phonePattern = { regex: /\d{3}-\d{3}-\d{4}/, replacement: "[phone]" };
beforeEach(() => {
transformer = new RegexMaskingTransformer({
email: emailPattern,
phone: phonePattern,
});
parser = new MaskingParser();
parser.addTransformer(transformer);
});
it("should mask, store state, and rehydrate with altered order", async () => {
const originalMessage = "Contact me at jane.doe@email.com or 555-123-4567.";
const maskedMessage = await parser.mask(originalMessage);
// Serialize and store the state
const serializedState = serializeState(parser.getState());
await mockDB.saveState("uniqueMessageId", serializedState);
// Simulate retrieving and altering the masked message
// Here, we assume the AI processing reverses the order of masked content
// Simulate retrieving and altering the masked message
const alteredMaskedMessage = maskedMessage.split(" ").reverse().join(" ");
// Retrieve and deserialize the state
const retrievedSerializedState = await mockDB.getState("uniqueMessageId");
const retrievedState = deserializeState(retrievedSerializedState);
// Rehydrate the altered message
const rehydratedMessage = await parser.rehydrate(
alteredMaskedMessage,
retrievedState
);
// The expectation depends on how the alteration affects the masked message.
// Here, we assume that the rehydrated message should match the original message
// even after the alteration since the masked content still aligns with the stored state.
const expectedRehydratedMessage = originalMessage
.split(" ")
.reverse()
.join(" ");
expect(rehydratedMessage).toEqual(expectedRehydratedMessage);
});
});
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/chains | lc_public_repos/langchainjs/langchain/src/experimental/chains/tests/violation_of_expectations_chain.int.test.ts | import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { AIMessage, HumanMessage } from "@langchain/core/messages";
import { MemoryVectorStore } from "../../../vectorstores/memory.js";
import { ViolationOfExpectationsChain } from "../violation_of_expectations/violation_of_expectations_chain.js";
const dummyMessages = [
new HumanMessage(
"I've been thinking about the importance of time with myself to discover my voice. I feel like 1-2 hours is never enough."
),
new AIMessage(
"The concept of 'adequate time' varies. Have you tried different formats of introspection, such as morning pages or long-form writing, to see if they make the process more efficient?"
),
new HumanMessage(
"I have tried journaling but never consistently. Sometimes it feels like writing doesn't capture everything."
),
];
test.skip("should respond with the proper schema", async () => {
const vectorStore = await MemoryVectorStore.fromTexts(
[" "],
[{ id: 1 }],
new OpenAIEmbeddings()
);
const retriever = vectorStore.asRetriever();
const llm = new ChatOpenAI({
modelName: "gpt-4",
});
const chain = new ViolationOfExpectationsChain({
llm,
retriever,
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chain.call({
chat_history: dummyMessages,
});
// console.log({
// res,
// });
});
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/chains | lc_public_repos/langchainjs/langchain/src/experimental/chains/violation_of_expectations/types.ts | import { BaseMessage, HumanMessage } from "@langchain/core/messages";
/**
* Contains the chunk of messages, along with the
* users response, which is the next message after the chunk.
*/
export type MessageChunkResult = {
chunkedMessages: BaseMessage[];
/**
* User response can be undefined if the last message in
* the chat history was from the AI.
*/
userResponse?: HumanMessage;
};
export type PredictNextUserMessageResponse = {
userState: string;
predictedUserMessage: string;
insights: Array<string>;
};
export type GetPredictionViolationsResponse = {
userResponse?: HumanMessage;
revisedPrediction: string;
explainedPredictionErrors: Array<string>;
};
export const PREDICT_NEXT_USER_MESSAGE_FUNCTION = {
name: "predictNextUserMessage",
description: "Predicts the next user message, along with insights.",
parameters: {
type: "object",
properties: {
userState: {
type: "string",
description: "Concise reasoning about the users internal mental state.",
},
predictedUserMessage: {
type: "string",
description:
"Your prediction on how they will respond to the AI's most recent message.",
},
insights: {
type: "array",
items: {
type: "string",
},
description:
"A concise list of any additional insights that would be useful to improve prediction.",
},
},
required: ["userState", "predictedUserMessage", "insights"],
},
};
export const PREDICTION_VIOLATIONS_FUNCTION = {
name: "predictionViolations",
description:
"Generates violations, errors and differences between the predicted user response, and the actual response.",
parameters: {
type: "object",
properties: {
violationExplanation: {
type: "string",
description: "How was the predication violated?",
},
explainedPredictionErrors: {
type: "array",
items: {
type: "string",
},
description: "Explanations of how the prediction was violated and why",
},
},
required: ["violationExplanation", "explainedPredictionErrors"],
},
};
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/chains | lc_public_repos/langchainjs/langchain/src/experimental/chains/violation_of_expectations/violation_of_expectations_chain.ts | import type { BaseRetrieverInterface } from "@langchain/core/retrievers";
import { ChatOpenAI } from "@langchain/openai";
import {
BaseMessage,
HumanMessage,
isBaseMessage,
} from "@langchain/core/messages";
import { ChainValues } from "@langchain/core/utils/types";
import { StringOutputParser } from "@langchain/core/output_parsers";
import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager";
import { JsonOutputFunctionsParser } from "../../../output_parsers/openai_functions.js";
import { BaseChain, ChainInputs } from "../../../chains/base.js";
import {
GetPredictionViolationsResponse,
MessageChunkResult,
PREDICTION_VIOLATIONS_FUNCTION,
PREDICT_NEXT_USER_MESSAGE_FUNCTION,
PredictNextUserMessageResponse,
} from "./types.js";
import {
GENERATE_FACTS_PROMPT,
GENERATE_REVISED_PREDICTION_PROMPT,
PREDICTION_VIOLATIONS_PROMPT,
PREDICT_NEXT_USER_MESSAGE_PROMPT,
} from "./violation_of_expectations_prompt.js";
/**
* Interface for the input parameters of the ViolationOfExpectationsChain class.
*/
export interface ViolationOfExpectationsChainInput extends ChainInputs {
/**
* The retriever to use for retrieving stored
* thoughts and insights.
*/
retriever: BaseRetrieverInterface;
/**
* The LLM to use
*/
llm: ChatOpenAI;
}
/**
* Chain that generates key insights/facts of a user based on a
* a chat conversation with an AI.
*/
export class ViolationOfExpectationsChain
extends BaseChain
implements ViolationOfExpectationsChainInput
{
static lc_name() {
return "ViolationOfExpectationsChain";
}
_chainType(): string {
return "violation_of_expectation_chain";
}
chatHistoryKey = "chat_history";
thoughtsKey = "thoughts";
get inputKeys() {
return [this.chatHistoryKey];
}
get outputKeys() {
return [this.thoughtsKey];
}
retriever: BaseRetrieverInterface;
llm: ChatOpenAI;
jsonOutputParser: JsonOutputFunctionsParser;
stringOutputParser: StringOutputParser;
constructor(fields: ViolationOfExpectationsChainInput) {
super(fields);
this.retriever = fields.retriever;
this.llm = fields.llm;
this.jsonOutputParser = new JsonOutputFunctionsParser();
this.stringOutputParser = new StringOutputParser();
}
getChatHistoryString(chatHistory: BaseMessage[]): string {
return chatHistory
.map((chatMessage) => {
if (chatMessage._getType() === "human") {
return `Human: ${chatMessage.content}`;
} else if (chatMessage._getType() === "ai") {
return `AI: ${chatMessage.content}`;
} else {
return `${chatMessage.content}`;
}
})
.join("\n");
}
removeDuplicateStrings(strings: Array<string>): Array<string> {
return [...new Set(strings)];
}
/**
* This method breaks down the chat history into chunks of messages.
* Each chunk consists of a sequence of messages ending with an AI message and the subsequent user response, if any.
*
* @param {BaseMessage[]} chatHistory - The chat history to be chunked.
*
* @returns {MessageChunkResult[]} An array of message chunks. Each chunk includes a sequence of messages and the subsequent user response.
*
* @description
* The method iterates over the chat history and pushes each message into a temporary array.
* When it encounters an AI message, it checks for a subsequent user message.
* If a user message is found, it is considered as the user response to the AI message.
* If no user message is found after the AI message, the user response is undefined.
* The method then pushes the chunk (sequence of messages and user response) into the result array.
* This process continues until all messages in the chat history have been processed.
*/
chunkMessagesByAIResponse(chatHistory: BaseMessage[]): MessageChunkResult[] {
const newArray: MessageChunkResult[] = [];
const tempArray: BaseMessage[] = [];
chatHistory.forEach((item, index) => {
tempArray.push(item);
if (item._getType() === "ai") {
let userResponse: BaseMessage | undefined = chatHistory[index + 1];
if (!userResponse || userResponse._getType() !== "human") {
userResponse = undefined;
}
newArray.push({
chunkedMessages: tempArray,
userResponse: userResponse
? new HumanMessage(userResponse)
: undefined,
});
}
});
return newArray;
}
/**
* This method processes a chat history to generate insights about the user.
*
* @param {ChainValues} values - The input values for the chain. It should contain a key for chat history.
* @param {CallbackManagerForChainRun} [runManager] - Optional callback manager for the chain run.
*
* @returns {Promise<ChainValues>} A promise that resolves to a list of insights about the user.
*
* @throws {Error} If the chat history key is not found in the input values or if the chat history is not an array of BaseMessages.
*
* @description
* The method performs the following steps:
* 1. Checks if the chat history key is present in the input values and if the chat history is an array of BaseMessages.
* 2. Breaks the chat history into chunks of messages.
* 3. For each chunk, it generates an initial prediction for the user's next message.
* 4. For each prediction, it generates insights and prediction violations, and regenerates the prediction based on the violations.
* 5. For each set of messages, it generates a fact/insight about the user.
* The method returns a list of these insights.
*/
async _call(
values: ChainValues,
runManager?: CallbackManagerForChainRun
): Promise<ChainValues> {
if (!(this.chatHistoryKey in values)) {
throw new Error(`Chat history key ${this.chatHistoryKey} not found`);
}
const chatHistory: unknown[] = values[this.chatHistoryKey];
const isEveryMessageBaseMessage = chatHistory.every((message) =>
isBaseMessage(message)
);
if (!isEveryMessageBaseMessage) {
throw new Error("Chat history must be an array of BaseMessages");
}
const messageChunks = this.chunkMessagesByAIResponse(
chatHistory as BaseMessage[]
);
// Generate the initial prediction for every user message.
const userPredictions = await Promise.all(
messageChunks.map(async (chatHistoryChunk) => ({
userPredictions: await this.predictNextUserMessage(
chatHistoryChunk.chunkedMessages
),
userResponse: chatHistoryChunk.userResponse,
runManager,
}))
);
// Generate insights, and prediction violations for every user message.
// This call also regenerates the prediction based on the violations.
const predictionViolations = await Promise.all(
userPredictions.map((prediction) =>
this.getPredictionViolations({
userPredictions: prediction.userPredictions,
userResponse: prediction.userResponse,
runManager,
})
)
);
// Generate a fact/insight about the user for every set of messages.
const insights = await Promise.all(
predictionViolations.map((violation) =>
this.generateFacts({
userResponse: violation.userResponse,
predictions: {
revisedPrediction: violation.revisedPrediction,
explainedPredictionErrors: violation.explainedPredictionErrors,
},
})
)
);
return {
insights,
};
}
/**
* This method predicts the next user message based on the chat history.
*
* @param {BaseMessage[]} chatHistory - The chat history based on which the next user message is predicted.
* @param {CallbackManagerForChainRun} [runManager] - Optional callback manager for the chain run.
*
* @returns {Promise<PredictNextUserMessageResponse>} A promise that resolves to the predicted next user message, the user state, and any insights.
*
* @throws {Error} If the response from the language model does not contain the expected keys: 'userState', 'predictedUserMessage', and 'insights'.
*/
private async predictNextUserMessage(
chatHistory: BaseMessage[],
runManager?: CallbackManagerForChainRun
): Promise<PredictNextUserMessageResponse> {
const messageString = this.getChatHistoryString(chatHistory);
const llmWithFunctions = this.llm.bind({
functions: [PREDICT_NEXT_USER_MESSAGE_FUNCTION],
function_call: { name: PREDICT_NEXT_USER_MESSAGE_FUNCTION.name },
});
const chain = PREDICT_NEXT_USER_MESSAGE_PROMPT.pipe(llmWithFunctions).pipe(
this.jsonOutputParser
);
const res = await chain.invoke(
{
chat_history: messageString,
},
runManager?.getChild("prediction")
);
if (
!(
"userState" in res &&
"predictedUserMessage" in res &&
"insights" in res
)
) {
throw new Error(`Invalid response from LLM: ${JSON.stringify(res)}`);
}
const predictionResponse = res as PredictNextUserMessageResponse;
// Query the retriever for relevant insights. Use the generates insights as a query.
const retrievedDocs = await this.retrieveRelevantInsights(
predictionResponse.insights
);
const relevantDocs = this.removeDuplicateStrings([
...predictionResponse.insights,
...retrievedDocs,
]);
return {
...predictionResponse,
insights: relevantDocs,
};
}
/**
* Retrieves relevant insights based on the provided insights.
*
* @param {Array<string>} insights - An array of insights to be used for retrieving relevant documents.
*
* @returns {Promise<Array<string>>} A promise that resolves to an array of relevant insights content.
*/
private async retrieveRelevantInsights(
insights: Array<string>
): Promise<Array<string>> {
// Only extract the first relevant doc from the retriever. We don't need more than one.
const relevantInsightsDocuments = await Promise.all(
insights.map(async (insight) => {
const relevantInsight = await this.retriever.getRelevantDocuments(
insight
);
return relevantInsight[0];
})
);
const relevantInsightsContent = relevantInsightsDocuments.map(
(document) => document.pageContent
);
return relevantInsightsContent;
}
/**
* This method generates prediction violations based on the predicted and actual user responses.
* It also generates a revised prediction based on the identified violations.
*
* @param {Object} params - The parameters for the method.
* @param {PredictNextUserMessageResponse} params.userPredictions - The predicted user message, user state, and insights.
* @param {BaseMessage} [params.userResponse] - The actual user response.
* @param {CallbackManagerForChainRun} [params.runManager] - Optional callback manager for the chain run.
*
* @returns {Promise<{ userResponse: BaseMessage | undefined; revisedPrediction: string; explainedPredictionErrors: Array<string>; }>} A promise that resolves to an object containing the actual user response, the revised prediction, and the explained prediction errors.
*
* @throws {Error} If the response from the language model does not contain the expected keys: 'violationExplanation', 'explainedPredictionErrors', and 'accuratePrediction'.
*/
private async getPredictionViolations({
userPredictions,
userResponse,
runManager,
}: {
userPredictions: PredictNextUserMessageResponse;
userResponse?: BaseMessage;
runManager?: CallbackManagerForChainRun;
}): Promise<GetPredictionViolationsResponse> {
const llmWithFunctions = this.llm.bind({
functions: [PREDICTION_VIOLATIONS_FUNCTION],
function_call: { name: PREDICTION_VIOLATIONS_FUNCTION.name },
});
const chain = PREDICTION_VIOLATIONS_PROMPT.pipe(llmWithFunctions).pipe(
this.jsonOutputParser
);
if (typeof userResponse?.content !== "string") {
throw new Error("This chain does not support non-string model output.");
}
const res = (await chain.invoke(
{
predicted_output: userPredictions.predictedUserMessage,
actual_output: userResponse?.content ?? "",
user_insights: userPredictions.insights.join("\n"),
},
runManager?.getChild("prediction_violations")
)) as Awaited<{
violationExplanation: string;
explainedPredictionErrors: Array<string>;
accuratePrediction: boolean;
}>;
// Generate a revised prediction based on violations.
const revisedPrediction = await this.generateRevisedPrediction({
originalPrediction: userPredictions.predictedUserMessage,
explainedPredictionErrors: res.explainedPredictionErrors,
userInsights: userPredictions.insights,
runManager,
});
return {
userResponse,
revisedPrediction,
explainedPredictionErrors: res.explainedPredictionErrors,
};
}
/**
* This method generates a revised prediction based on the original prediction, explained prediction errors, and user insights.
*
* @param {Object} params - The parameters for the method.
* @param {string} params.originalPrediction - The original prediction made by the model.
* @param {Array<string>} params.explainedPredictionErrors - An array of explained prediction errors.
* @param {Array<string>} params.userInsights - An array of insights about the user.
* @param {CallbackManagerForChainRun} [params.runManager] - Optional callback manager for the chain run.
*
* @returns {Promise<string>} A promise that resolves to a revised prediction.
*/
private async generateRevisedPrediction({
originalPrediction,
explainedPredictionErrors,
userInsights,
runManager,
}: {
originalPrediction: string;
explainedPredictionErrors: Array<string>;
userInsights: Array<string>;
runManager?: CallbackManagerForChainRun;
}): Promise<string> {
const revisedPredictionChain = GENERATE_REVISED_PREDICTION_PROMPT.pipe(
this.llm
).pipe(this.stringOutputParser);
const revisedPredictionRes = await revisedPredictionChain.invoke(
{
prediction: originalPrediction,
explained_prediction_errors: explainedPredictionErrors.join("\n"),
user_insights: userInsights.join("\n"),
},
runManager?.getChild("prediction_revision")
);
return revisedPredictionRes;
}
/**
* This method generates facts or insights about the user based on the revised prediction, explained prediction errors, and the user's response.
*
* @param {Object} params - The parameters for the method.
* @param {BaseMessage} [params.userResponse] - The actual user response.
* @param {Object} params.predictions - The revised prediction and explained prediction errors.
* @param {string} params.predictions.revisedPrediction - The revised prediction made by the model.
* @param {Array<string>} params.predictions.explainedPredictionErrors - An array of explained prediction errors.
* @param {CallbackManagerForChainRun} [params.runManager] - Optional callback manager for the chain run.
*
* @returns {Promise<string>} A promise that resolves to a string containing the generated facts or insights about the user.
*/
private async generateFacts({
userResponse,
predictions,
runManager,
}: {
userResponse?: BaseMessage;
/**
* Optional if the prediction was accurate.
*/
predictions: {
revisedPrediction: string;
explainedPredictionErrors: Array<string>;
};
runManager?: CallbackManagerForChainRun;
}): Promise<string> {
const chain = GENERATE_FACTS_PROMPT.pipe(this.llm).pipe(
this.stringOutputParser
);
if (typeof userResponse?.content !== "string") {
throw new Error("This chain does not support non-string model output.");
}
const res = await chain.invoke(
{
prediction_violations: predictions.explainedPredictionErrors.join("\n"),
prediction: predictions.revisedPrediction,
user_message: userResponse?.content ?? "",
},
runManager?.getChild("generate_facts")
);
return res;
}
/**
* Static method that creates a ViolationOfExpectationsChain instance from a
* ChatOpenAI and retriever. It also accepts optional options
* to customize the chain.
*
* @param llm The ChatOpenAI instance.
* @param retriever The retriever used for similarity search.
* @param options Optional options to customize the chain.
*
* @returns A new instance of ViolationOfExpectationsChain.
*/
static fromLLM(
llm: ChatOpenAI,
retriever: BaseRetrieverInterface,
options?: Partial<
Omit<ViolationOfExpectationsChainInput, "llm" | "retriever">
>
): ViolationOfExpectationsChain {
return new this({
retriever,
llm,
...options,
});
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/chains | lc_public_repos/langchainjs/langchain/src/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.ts | import { PromptTemplate } from "@langchain/core/prompts";
export const PREDICT_NEXT_USER_MESSAGE_PROMPT =
/* #__PURE__ */ PromptTemplate.fromTemplate(`
You have been tasked with coming up with insights and data-points based on a chat history between a human and an AI.
Given the user's chat history provide the following:
- Concise reasoning about the users internal mental state.
- Your prediction on how they will respond to the AI's most recent message.
- A concise list of any additional insights that would be useful to improve prediction.
--------
Chat History: {chat_history}`);
export const PREDICTION_VIOLATIONS_PROMPT =
/* #__PURE__ */ PromptTemplate.fromTemplate(`You have been given a prediction and an actual message from a human and AI conversation.
Using the prediction, actual message, and additional user insights, generate the following:
- How exactly was the original prediction violated? Which parts were wrong? State the exact differences.
- If there were errors with the prediction, what were they and why?
--------
Predicted Output: {predicted_output}
--------
Actual Output: {actual_output}
--------
User Insights: {user_insights}
--------
`);
export const GENERATE_REVISED_PREDICTION_PROMPT =
/* #__PURE__ */ PromptTemplate.fromTemplate(`
You have been tasked with revising a prediction on what a user might say in a chat conversation.
--------
Your previous prediction: {prediction}
--------
Ways in which your prediction was off: {explained_prediction_errors}
--------
Key insights to the user: {user_insights}
--------
Given the above, revise your prediction to be more accurate.
Revised Prediction:`);
export const GENERATE_FACTS_PROMPT =
/* #__PURE__ */ PromptTemplate.fromTemplate(`
Given a user message, an LLM generated prediction of what that message might be, and a list of violations which the prediction made compared to the actual message, generate a fact about the user, relevant to the users message.
--------
Prediction violations: {prediction_violations}
--------
Revised prediction: {prediction}
--------
Actual user message: {user_message}
--------
Relevant fact:`);
|
0 | lc_public_repos/langchainjs/langchain/src/experimental/chains | lc_public_repos/langchainjs/langchain/src/experimental/chains/violation_of_expectations/index.ts | export {
type ViolationOfExpectationsChainInput,
ViolationOfExpectationsChain,
} from "./violation_of_expectations_chain.js";
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/retrievers/multi_vector.ts | import {
BaseRetriever,
type BaseRetrieverInput,
} from "@langchain/core/retrievers";
import type { VectorStoreInterface } from "@langchain/core/vectorstores";
import { Document } from "@langchain/core/documents";
import { BaseStore, type BaseStoreInterface } from "@langchain/core/stores";
import { createDocumentStoreFromByteStore } from "../storage/encoder_backed.js";
/**
* Arguments for the MultiVectorRetriever class.
*/
export interface MultiVectorRetrieverInput extends BaseRetrieverInput {
vectorstore: VectorStoreInterface;
/** @deprecated Prefer `byteStore`. */
docstore?: BaseStoreInterface<string, Document>;
byteStore?: BaseStore<string, Uint8Array>;
idKey?: string;
childK?: number;
parentK?: number;
}
/**
* A retriever that retrieves documents from a vector store and a document
* store. It uses the vector store to find relevant documents based on a
* query, and then retrieves the full documents from the document store.
* @example
* ```typescript
* const retriever = new MultiVectorRetriever({
* vectorstore: new FaissStore(),
* byteStore: new InMemoryStore<Unit8Array>(),
* idKey: "doc_id",
* childK: 20,
* parentK: 5,
* });
*
* const retrieverResult = await retriever.getRelevantDocuments("justice breyer");
* console.log(retrieverResult[0].pageContent.length);
* ```
*/
export class MultiVectorRetriever extends BaseRetriever {
static lc_name() {
return "MultiVectorRetriever";
}
lc_namespace = ["langchain", "retrievers", "multi_vector"];
public vectorstore: VectorStoreInterface;
public docstore: BaseStoreInterface<string, Document>;
protected idKey: string;
protected childK?: number;
protected parentK?: number;
constructor(args: MultiVectorRetrieverInput) {
super(args);
this.vectorstore = args.vectorstore;
if (args.byteStore) {
this.docstore = createDocumentStoreFromByteStore(args.byteStore);
} else if (args.docstore) {
this.docstore = args.docstore;
} else {
throw new Error(
"byteStore and docstore are undefined. Please provide at least one."
);
}
this.idKey = args.idKey ?? "doc_id";
this.childK = args.childK;
this.parentK = args.parentK;
}
async _getRelevantDocuments(query: string): Promise<Document[]> {
const subDocs = await this.vectorstore.similaritySearch(query, this.childK);
const ids: string[] = [];
for (const doc of subDocs) {
if (doc.metadata[this.idKey] && !ids.includes(doc.metadata[this.idKey])) {
ids.push(doc.metadata[this.idKey]);
}
}
const docs = await this.docstore.mget(ids);
return docs
.filter((doc) => doc !== undefined)
.slice(0, this.parentK) as Document[];
}
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/retrievers/score_threshold.ts | import { Document } from "@langchain/core/documents";
import {
VectorStore,
VectorStoreRetriever,
VectorStoreRetrieverInput,
} from "@langchain/core/vectorstores";
export type ScoreThresholdRetrieverInput<V extends VectorStore> = Omit<
VectorStoreRetrieverInput<V>,
"k"
> & {
maxK?: number;
kIncrement?: number;
minSimilarityScore: number;
};
export class ScoreThresholdRetriever<
V extends VectorStore
> extends VectorStoreRetriever<V> {
minSimilarityScore: number;
kIncrement = 10;
maxK = 100;
constructor(input: ScoreThresholdRetrieverInput<V>) {
super(input);
this.maxK = input.maxK ?? this.maxK;
this.minSimilarityScore =
input.minSimilarityScore ?? this.minSimilarityScore;
this.kIncrement = input.kIncrement ?? this.kIncrement;
}
async getRelevantDocuments(query: string): Promise<Document[]> {
let currentK = 0;
let filteredResults: [Document, number][] = [];
do {
currentK += this.kIncrement;
const results = await this.vectorStore.similaritySearchWithScore(
query,
currentK,
this.filter
);
filteredResults = results.filter(
([, score]) => score >= this.minSimilarityScore
);
} while (filteredResults.length >= currentK && currentK < this.maxK);
return filteredResults.map((documents) => documents[0]).slice(0, this.maxK);
}
static fromVectorStore<V extends VectorStore>(
vectorStore: V,
options: Omit<ScoreThresholdRetrieverInput<V>, "vectorStore">
) {
return new this<V>({ ...options, vectorStore });
}
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/retrievers/multi_query.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import {
BaseRetriever,
type BaseRetrieverInput,
type BaseRetrieverInterface,
} from "@langchain/core/retrievers";
import { Document } from "@langchain/core/documents";
import { BaseOutputParser } from "@langchain/core/output_parsers";
import { PromptTemplate, BasePromptTemplate } from "@langchain/core/prompts";
import { CallbackManagerForRetrieverRun } from "@langchain/core/callbacks/manager";
import { LLMChain } from "../chains/llm_chain.js";
import type { BaseDocumentCompressor } from "./document_compressors/index.js";
interface LineList {
lines: string[];
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export type MultiDocs = Document<Record<string, any>>[];
class LineListOutputParser extends BaseOutputParser<LineList> {
static lc_name() {
return "LineListOutputParser";
}
lc_namespace = ["langchain", "retrievers", "multiquery"];
async parse(text: string): Promise<LineList> {
const startKeyIndex = text.indexOf("<questions>");
const endKeyIndex = text.indexOf("</questions>");
const questionsStartIndex =
startKeyIndex === -1 ? 0 : startKeyIndex + "<questions>".length;
const questionsEndIndex = endKeyIndex === -1 ? text.length : endKeyIndex;
const lines = text
.slice(questionsStartIndex, questionsEndIndex)
.trim()
.split("\n")
.filter((line) => line.trim() !== "");
return { lines };
}
getFormatInstructions(): string {
throw new Error("Not implemented.");
}
}
// Create template
const DEFAULT_QUERY_PROMPT = /* #__PURE__ */ new PromptTemplate({
inputVariables: ["question", "queryCount"],
template: `You are an AI language model assistant. Your task is
to generate {queryCount} different versions of the given user
question to retrieve relevant documents from a vector database.
By generating multiple perspectives on the user question,
your goal is to help the user overcome some of the limitations
of distance-based similarity search.
Provide these alternative questions separated by newlines between XML tags. For example:
<questions>
Question 1
Question 2
Question 3
</questions>
Original question: {question}`,
});
export interface MultiQueryRetrieverInput extends BaseRetrieverInput {
retriever: BaseRetrieverInterface;
/** @deprecated Pass a custom prompt into `.fromLLM` instead. */
llmChain: LLMChain<LineList>;
queryCount?: number;
parserKey?: string;
documentCompressor?: BaseDocumentCompressor | undefined;
documentCompressorFilteringFn?: (docs: MultiDocs) => MultiDocs;
}
/**
* @example
* ```typescript
* const retriever = new MultiQueryRetriever.fromLLM({
* llm: new ChatAnthropic({}),
* retriever: new MemoryVectorStore().asRetriever(),
* verbose: true,
* });
* const retrievedDocs = await retriever.getRelevantDocuments(
* "What are mitochondria made of?",
* );
* ```
*/
export class MultiQueryRetriever extends BaseRetriever {
static lc_name() {
return "MultiQueryRetriever";
}
lc_namespace = ["langchain", "retrievers", "multiquery"];
private retriever: BaseRetrieverInterface;
private llmChain: LLMChain<LineList>;
private queryCount = 3;
private parserKey = "lines";
documentCompressor: BaseDocumentCompressor | undefined;
documentCompressorFilteringFn?: MultiQueryRetrieverInput["documentCompressorFilteringFn"];
constructor(fields: MultiQueryRetrieverInput) {
super(fields);
this.retriever = fields.retriever;
this.llmChain = fields.llmChain;
this.queryCount = fields.queryCount ?? this.queryCount;
this.parserKey = fields.parserKey ?? this.parserKey;
this.documentCompressor = fields.documentCompressor;
this.documentCompressorFilteringFn = fields.documentCompressorFilteringFn;
}
static fromLLM(
fields: Omit<MultiQueryRetrieverInput, "llmChain"> & {
llm: BaseLanguageModelInterface;
prompt?: BasePromptTemplate;
}
): MultiQueryRetriever {
const {
retriever,
llm,
prompt = DEFAULT_QUERY_PROMPT,
queryCount,
parserKey,
...rest
} = fields;
const outputParser = new LineListOutputParser();
const llmChain = new LLMChain({ llm, prompt, outputParser });
return new this({ retriever, llmChain, queryCount, parserKey, ...rest });
}
// Generate the different queries for each retrieval, using our llmChain
private async _generateQueries(
question: string,
runManager?: CallbackManagerForRetrieverRun
): Promise<string[]> {
const response = await this.llmChain.call(
{ question, queryCount: this.queryCount },
runManager?.getChild()
);
const lines = response.text[this.parserKey] || [];
if (this.verbose) {
console.log(`Generated queries: ${lines}`);
}
return lines;
}
// Retrieve documents using the original retriever
private async _retrieveDocuments(
queries: string[],
runManager?: CallbackManagerForRetrieverRun
): Promise<Document[]> {
const documents: Document[] = [];
await Promise.all(
queries.map(async (query) => {
const docs = await this.retriever.getRelevantDocuments(
query,
runManager?.getChild()
);
documents.push(...docs);
})
);
return documents;
}
// Deduplicate the documents that were returned in multiple retrievals
private _uniqueUnion(documents: Document[]): Document[] {
const uniqueDocumentsDict: { [key: string]: Document } = {};
for (const doc of documents) {
const key = `${doc.pageContent}:${JSON.stringify(
Object.entries(doc.metadata).sort()
)}`;
uniqueDocumentsDict[key] = doc;
}
const uniqueDocuments = Object.values(uniqueDocumentsDict);
return uniqueDocuments;
}
async _getRelevantDocuments(
question: string,
runManager?: CallbackManagerForRetrieverRun
): Promise<Document[]> {
const queries = await this._generateQueries(question, runManager);
const documents = await this._retrieveDocuments(queries, runManager);
const uniqueDocuments = this._uniqueUnion(documents);
let outputDocs = uniqueDocuments;
if (this.documentCompressor && uniqueDocuments.length) {
outputDocs = await this.documentCompressor.compressDocuments(
uniqueDocuments,
question,
runManager?.getChild()
);
if (this.documentCompressorFilteringFn) {
outputDocs = this.documentCompressorFilteringFn(outputDocs);
}
}
return outputDocs;
}
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/retrievers/hyde.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import { Document } from "@langchain/core/documents";
import { PromptTemplate, BasePromptTemplate } from "@langchain/core/prompts";
import {
StringPromptValue,
BasePromptValue,
} from "@langchain/core/prompt_values";
import {
VectorStore,
VectorStoreRetriever,
VectorStoreRetrieverInput,
} from "@langchain/core/vectorstores";
import { CallbackManagerForRetrieverRun } from "@langchain/core/callbacks/manager";
/**
* A string that corresponds to a specific prompt template.
*/
export type PromptKey =
| "websearch"
| "scifact"
| "arguana"
| "trec-covid"
| "fiqa"
| "dbpedia-entity"
| "trec-news"
| "mr-tydi";
/**
* Options for the HydeRetriever class, which includes a BaseLanguageModel
* instance, a VectorStore instance, and an optional promptTemplate which
* can either be a BasePromptTemplate instance or a PromptKey.
*/
export type HydeRetrieverOptions<V extends VectorStore> =
VectorStoreRetrieverInput<V> & {
llm: BaseLanguageModelInterface;
promptTemplate?: BasePromptTemplate | PromptKey;
};
/**
* A class for retrieving relevant documents based on a given query. It
* extends the VectorStoreRetriever class and uses a BaseLanguageModel to
* generate a hypothetical answer to the query, which is then used to
* retrieve relevant documents.
* @example
* ```typescript
* const retriever = new HydeRetriever({
* vectorStore: new MemoryVectorStore(new OpenAIEmbeddings()),
* llm: new ChatOpenAI(),
* k: 1,
* });
* await vectorStore.addDocuments(
* [
* "My name is John.",
* "My name is Bob.",
* "My favourite food is pizza.",
* "My favourite food is pasta.",
* ].map((pageContent) => new Document({ pageContent })),
* );
* const results = await retriever.getRelevantDocuments(
* "What is my favourite food?",
* );
* ```
*/
export class HydeRetriever<
V extends VectorStore = VectorStore
> extends VectorStoreRetriever<V> {
static lc_name() {
return "HydeRetriever";
}
get lc_namespace(): string[] {
return ["langchain", "retrievers", "hyde"];
}
llm: BaseLanguageModelInterface;
promptTemplate?: BasePromptTemplate;
constructor(fields: HydeRetrieverOptions<V>) {
super(fields);
this.llm = fields.llm;
this.promptTemplate =
typeof fields.promptTemplate === "string"
? getPromptTemplateFromKey(fields.promptTemplate)
: fields.promptTemplate;
if (this.promptTemplate) {
const { inputVariables } = this.promptTemplate;
if (inputVariables.length !== 1 && inputVariables[0] !== "question") {
throw new Error(
`Prompt template must accept a single input variable 'question'. Invalid input variables for prompt template: ${inputVariables}`
);
}
}
}
async _getRelevantDocuments(
query: string,
runManager?: CallbackManagerForRetrieverRun
): Promise<Document[]> {
let value: BasePromptValue = new StringPromptValue(query);
// Use a custom template if provided
if (this.promptTemplate) {
value = await this.promptTemplate.formatPromptValue({ question: query });
}
// Get a hypothetical answer from the LLM
const res = await this.llm.generatePrompt([value]);
const answer = res.generations[0][0].text;
// Retrieve relevant documents based on the hypothetical answer
const results = await this.vectorStore.similaritySearch(
answer,
this.k,
this.filter,
runManager?.getChild("vectorstore")
);
return results;
}
}
/**
* Returns a BasePromptTemplate instance based on a given PromptKey.
*/
export function getPromptTemplateFromKey(key: PromptKey): BasePromptTemplate {
let template: string;
switch (key) {
case "websearch":
template = `Please write a passage to answer the question
Question: {question}
Passage:`;
break;
case "scifact":
template = `Please write a scientific paper passage to support/refute the claim
Claim: {question}
Passage:`;
break;
case "arguana":
template = `Please write a counter argument for the passage
Passage: {question}
Counter Argument:`;
break;
case "trec-covid":
template = `Please write a scientific paper passage to answer the question
Question: {question}
Passage:`;
break;
case "fiqa":
template = `Please write a financial article passage to answer the question
Question: {question}
Passage:`;
break;
case "dbpedia-entity":
template = `Please write a passage to answer the question.
Question: {question}
Passage:`;
break;
case "trec-news":
template = `Please write a news passage about the topic.
Topic: {question}
Passage:`;
break;
case "mr-tydi":
template = `Please write a passage in Swahili/Korean/Japanese/Bengali to answer the question in detail.
Question: {question}
Passage:`;
break;
default:
throw new Error(`Invalid prompt key: ${key}`);
}
return PromptTemplate.fromTemplate(template);
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/retrievers/ensemble.ts | import { BaseRetriever, BaseRetrieverInput } from "@langchain/core/retrievers";
import { Document, DocumentInterface } from "@langchain/core/documents";
import { CallbackManagerForRetrieverRun } from "@langchain/core/callbacks/manager";
export interface EnsembleRetrieverInput extends BaseRetrieverInput {
/** A list of retrievers to ensemble. */
retrievers: BaseRetriever[];
/**
* A list of weights corresponding to the retrievers. Defaults to equal
* weighting for all retrievers.
*/
weights?: number[];
/**
* A constant added to the rank, controlling the balance between the importance
* of high-ranked items and the consideration given to lower-ranked items.
* Default is 60.
*/
c?: number;
}
/**
* Ensemble retriever that aggregates and orders the results of
* multiple retrievers by using weighted Reciprocal Rank Fusion.
*/
export class EnsembleRetriever extends BaseRetriever {
static lc_name() {
return "EnsembleRetriever";
}
lc_namespace = ["langchain", "retrievers", "ensemble_retriever"];
retrievers: BaseRetriever[];
weights: number[];
c = 60;
constructor(args: EnsembleRetrieverInput) {
super(args);
this.retrievers = args.retrievers;
this.weights =
args.weights ||
new Array(args.retrievers.length).fill(1 / args.retrievers.length);
this.c = args.c || 60;
}
async _getRelevantDocuments(
query: string,
runManager?: CallbackManagerForRetrieverRun
) {
return this._rankFusion(query, runManager);
}
async _rankFusion(
query: string,
runManager?: CallbackManagerForRetrieverRun
) {
const retrieverDocs = await Promise.all(
this.retrievers.map((retriever, i) =>
retriever.invoke(query, {
callbacks: runManager?.getChild(`retriever_${i + 1}`),
})
)
);
const fusedDocs = await this._weightedReciprocalRank(retrieverDocs);
return fusedDocs;
}
async _weightedReciprocalRank(docList: DocumentInterface[][]) {
if (docList.length !== this.weights.length) {
throw new Error(
"Number of retrieved document lists must be equal to the number of weights."
);
}
const rrfScoreDict = docList.reduce(
(rffScore: Record<string, number>, retrieverDoc, idx) => {
let rank = 1;
const weight = this.weights[idx];
while (rank <= retrieverDoc.length) {
const { pageContent } = retrieverDoc[rank - 1];
if (!rffScore[pageContent]) {
// eslint-disable-next-line no-param-reassign
rffScore[pageContent] = 0;
}
// eslint-disable-next-line no-param-reassign
rffScore[pageContent] += weight / (rank + this.c);
rank += 1;
}
return rffScore;
},
{}
);
const uniqueDocs = this._uniqueUnion(docList.flat());
const sortedDocs = Array.from(uniqueDocs).sort(
(a, b) => rrfScoreDict[b.pageContent] - rrfScoreDict[a.pageContent]
);
return sortedDocs;
}
private _uniqueUnion(documents: Document[]): Document[] {
const documentSet = new Set();
const result = [];
for (const doc of documents) {
const key = doc.pageContent;
if (!documentSet.has(key)) {
documentSet.add(key);
result.push(doc);
}
}
return result;
}
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/retrievers/time_weighted.ts | import { BaseRetriever, BaseRetrieverInput } from "@langchain/core/retrievers";
import type { VectorStoreInterface } from "@langchain/core/vectorstores";
import type { DocumentInterface } from "@langchain/core/documents";
import { CallbackManagerForRetrieverRun } from "@langchain/core/callbacks/manager";
/**
* Interface for the fields required to initialize a
* TimeWeightedVectorStoreRetriever instance.
*/
export interface TimeWeightedVectorStoreRetrieverFields
extends BaseRetrieverInput {
vectorStore: VectorStoreInterface;
searchKwargs?: number;
memoryStream?: DocumentInterface[];
decayRate?: number;
k?: number;
otherScoreKeys?: string[];
defaultSalience?: number;
}
export const LAST_ACCESSED_AT_KEY = "last_accessed_at";
export const BUFFER_IDX = "buffer_idx";
/**
* TimeWeightedVectorStoreRetriever retrieves documents based on their time-weighted relevance.
* ref: https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/retrievers/time_weighted_retriever.py
* @example
* ```typescript
* const retriever = new TimeWeightedVectorStoreRetriever({
* vectorStore: new MemoryVectorStore(new OpenAIEmbeddings()),
* memoryStream: [],
* searchKwargs: 2,
* });
* await retriever.addDocuments([
* { pageContent: "My name is John.", metadata: {} },
* { pageContent: "My favourite food is pizza.", metadata: {} },
*
* ]);
* const results = await retriever.getRelevantDocuments(
* "What is my favourite food?",
* );
* ```
*/
export class TimeWeightedVectorStoreRetriever extends BaseRetriever {
static lc_name() {
return "TimeWeightedVectorStoreRetriever";
}
get lc_namespace() {
return ["langchain", "retrievers", "time_weighted"];
}
/**
* The vectorstore to store documents and determine salience.
*/
private vectorStore: VectorStoreInterface;
/**
* The number of top K most relevant documents to consider when searching.
*/
private searchKwargs: number;
/**
* The memory_stream of documents to search through.
*/
private memoryStream: DocumentInterface[];
/**
* The exponential decay factor used as (1.0-decay_rate)**(hrs_passed).
*/
private decayRate: number;
/**
* The maximum number of documents to retrieve in a given call.
*/
private k: number;
/**
* Other keys in the metadata to factor into the score, e.g. 'importance'.
*/
private otherScoreKeys: string[];
/**
* The salience to assign memories not retrieved from the vector store.
*/
private defaultSalience: number | null;
/**
* Constructor to initialize the required fields
* @param fields - The fields required for initializing the TimeWeightedVectorStoreRetriever
*/
constructor(fields: TimeWeightedVectorStoreRetrieverFields) {
super(fields);
this.vectorStore = fields.vectorStore;
this.searchKwargs = fields.searchKwargs ?? 100;
this.memoryStream = fields.memoryStream ?? [];
this.decayRate = fields.decayRate ?? 0.01;
this.k = fields.k ?? 4;
this.otherScoreKeys = fields.otherScoreKeys ?? [];
this.defaultSalience = fields.defaultSalience ?? null;
}
/**
* Get the memory stream of documents.
* @returns The memory stream of documents.
*/
getMemoryStream(): DocumentInterface[] {
return this.memoryStream;
}
/**
* Set the memory stream of documents.
* @param memoryStream The new memory stream of documents.
*/
setMemoryStream(memoryStream: DocumentInterface[]) {
this.memoryStream = memoryStream;
}
/**
* Get relevant documents based on time-weighted relevance
* @param query - The query to search for
* @returns The relevant documents
*/
async _getRelevantDocuments(
query: string,
runManager?: CallbackManagerForRetrieverRun
): Promise<DocumentInterface[]> {
const now = Math.floor(Date.now() / 1000);
const memoryDocsAndScores = this.getMemoryDocsAndScores();
const salientDocsAndScores = await this.getSalientDocuments(
query,
runManager
);
const docsAndScores = { ...memoryDocsAndScores, ...salientDocsAndScores };
return this.computeResults(docsAndScores, now);
}
/**
* NOTE: When adding documents to a vector store, use addDocuments
* via retriever instead of directly to the vector store.
* This is because it is necessary to process the document
* in prepareDocuments.
*
* @param docs - The documents to add to vector store in the retriever
*/
async addDocuments(docs: DocumentInterface[]): Promise<void> {
const now = Math.floor(Date.now() / 1000);
const savedDocs = this.prepareDocuments(docs, now);
this.memoryStream.push(...savedDocs);
await this.vectorStore.addDocuments(savedDocs);
}
/**
* Get memory documents and their scores
* @returns An object containing memory documents and their scores
*/
private getMemoryDocsAndScores(): Record<
number,
{ doc: DocumentInterface; score: number }
> {
const memoryDocsAndScores: Record<
number,
{ doc: DocumentInterface; score: number }
> = {};
for (const doc of this.memoryStream.slice(-this.k)) {
const bufferIdx = doc.metadata[BUFFER_IDX];
if (bufferIdx === undefined) {
throw new Error(
`Found a document in the vector store that is missing required metadata. This retriever only supports vector stores with documents that have been added through the "addDocuments" method on a TimeWeightedVectorStoreRetriever, not directly added or loaded into the backing vector store.`
);
}
memoryDocsAndScores[bufferIdx] = {
doc,
score: this.defaultSalience ?? 0,
};
}
return memoryDocsAndScores;
}
/**
* Get salient documents and their scores based on the query
* @param query - The query to search for
* @returns An object containing salient documents and their scores
*/
private async getSalientDocuments(
query: string,
runManager?: CallbackManagerForRetrieverRun
): Promise<Record<number, { doc: DocumentInterface; score: number }>> {
const docAndScores: [DocumentInterface, number][] =
await this.vectorStore.similaritySearchWithScore(
query,
this.searchKwargs,
undefined,
runManager?.getChild()
);
const results: Record<number, { doc: DocumentInterface; score: number }> =
{};
for (const [fetchedDoc, score] of docAndScores) {
const bufferIdx = fetchedDoc.metadata[BUFFER_IDX];
if (bufferIdx === undefined) {
throw new Error(
`Found a document in the vector store that is missing required metadata. This retriever only supports vector stores with documents that have been added through the "addDocuments" method on a TimeWeightedVectorStoreRetriever, not directly added or loaded into the backing vector store.`
);
}
const doc = this.memoryStream[bufferIdx];
results[bufferIdx] = { doc, score };
}
return results;
}
/**
* Compute the final result set of documents based on the combined scores
* @param docsAndScores - An object containing documents and their scores
* @param now - The current timestamp
* @returns The final set of documents
*/
private computeResults(
docsAndScores: Record<number, { doc: DocumentInterface; score: number }>,
now: number
): DocumentInterface[] {
const recordedDocs = Object.values(docsAndScores)
.map(({ doc, score }) => ({
doc,
score: this.getCombinedScore(doc, score, now),
}))
.sort((a, b) => b.score - a.score);
const results: DocumentInterface[] = [];
for (const { doc } of recordedDocs) {
const bufferedDoc = this.memoryStream[doc.metadata[BUFFER_IDX]];
bufferedDoc.metadata[LAST_ACCESSED_AT_KEY] = now;
results.push(bufferedDoc);
if (results.length > this.k) {
break;
}
}
return results;
}
/**
* Prepare documents with necessary metadata before saving
* @param docs - The documents to prepare
* @param now - The current timestamp
* @returns The prepared documents
*/
private prepareDocuments(
docs: DocumentInterface[],
now: number
): DocumentInterface[] {
return docs.map((doc, i) => ({
...doc,
metadata: {
...doc.metadata,
[LAST_ACCESSED_AT_KEY]: doc.metadata[LAST_ACCESSED_AT_KEY] ?? now,
created_at: doc.metadata.created_at ?? now,
[BUFFER_IDX]: this.memoryStream.length + i,
},
}));
}
/**
* Calculate the combined score based on vector relevance and other factors
* @param doc - The document to calculate the score for
* @param vectorRelevance - The relevance score from the vector store
* @param nowMsec - The current timestamp in milliseconds
* @returns The combined score for the document
*/
private getCombinedScore(
doc: DocumentInterface,
vectorRelevance: number | null,
nowMsec: number
): number {
const hoursPassed = this.getHoursPassed(
nowMsec,
doc.metadata[LAST_ACCESSED_AT_KEY]
);
let score = (1.0 - this.decayRate) ** hoursPassed;
for (const key of this.otherScoreKeys) {
score += doc.metadata[key];
}
if (vectorRelevance !== null) {
score += vectorRelevance;
}
return score;
}
/**
* Calculate the hours passed between two time points
* @param time - The current time in seconds
* @param refTime - The reference time in seconds
* @returns The number of hours passed between the two time points
*/
private getHoursPassed(time: number, refTime: number): number {
return (time - refTime) / 3600;
}
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/retrievers/matryoshka_retriever.ts | import { DocumentInterface } from "@langchain/core/documents";
import { Embeddings } from "@langchain/core/embeddings";
import {
cosineSimilarity,
euclideanDistance,
innerProduct,
} from "@langchain/core/utils/math";
import {
VectorStore,
VectorStoreRetriever,
VectorStoreRetrieverInput,
} from "@langchain/core/vectorstores";
/**
* Type for options when adding a document to the VectorStore.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
type AddDocumentOptions = Record<string, any>;
export interface MatryoshkaRetrieverFields {
/**
* The number of documents to retrieve from the small store.
* @default 50
*/
smallK?: number;
/**
* The number of documents to retrieve from the large store.
* @default 8
*/
largeK?: number;
/**
* The metadata key to store the larger embeddings.
* @default "lc_large_embedding"
*/
largeEmbeddingKey?: string;
/**
* The embedding model to use when generating the large
* embeddings.
*/
largeEmbeddingModel: Embeddings;
/**
* The type of search to perform using the large embeddings.
* @default "cosine"
*/
searchType?: "cosine" | "innerProduct" | "euclidean";
}
/**
* A retriever that uses two sets of embeddings to perform adaptive retrieval. Based
* off of the "Matryoshka embeddings: faster OpenAI vector search using Adaptive Retrieval"
* blog post {@link https://supabase.com/blog/matryoshka-embeddings}.
*
*
* This class performs "Adaptive Retrieval" for searching text embeddings efficiently using the
* Matryoshka Representation Learning (MRL) technique. It retrieves documents similar to a query
* embedding in two steps:
*
* First-pass: Uses a lower dimensional sub-vector from the MRL embedding for an initial, fast,
* but less accurate search.
*
* Second-pass: Re-ranks the top results from the first pass using the full, high-dimensional
* embedding for higher accuracy.
*
*
* This code implements MRL embeddings for efficient vector search by combining faster,
* lower-dimensional initial search with accurate, high-dimensional re-ranking.
*/
export class MatryoshkaRetriever<
Store extends VectorStore = VectorStore
> extends VectorStoreRetriever<Store> {
smallK = 50;
largeK = 8;
largeEmbeddingKey = "lc_large_embedding";
largeEmbeddingModel: Embeddings;
searchType: "cosine" | "innerProduct" | "euclidean" = "cosine";
constructor(
fields: MatryoshkaRetrieverFields & VectorStoreRetrieverInput<Store>
) {
super(fields);
this.smallK = fields.smallK ?? this.smallK;
this.largeK = fields.largeK ?? this.largeK;
this.largeEmbeddingKey = fields.largeEmbeddingKey ?? this.largeEmbeddingKey;
this.largeEmbeddingModel = fields.largeEmbeddingModel;
this.searchType = fields.searchType ?? this.searchType;
}
/**
* Ranks documents based on their similarity to a query embedding using larger embeddings.
*
* This method takes a query embedding and a list of documents (smallResults) as input. Each document
* in the smallResults array has previously been associated with a large embedding stored in its metadata.
* Depending on the `searchType` (cosine, innerProduct, or euclidean), it calculates the similarity scores
* between the query embedding and each document's large embedding. It then ranks the documents based on
* these similarity scores, from the most similar to the least similar.
*
* The method returns a promise that resolves to an array of the top `largeK` documents, where `largeK`
* is a class property defining the number of documents to return. This subset of documents is determined
* by sorting the entire list of documents based on their similarity scores and then selecting the top
* `largeK` documents.
*
* @param {number[]} embeddedQuery The embedding of the query, represented as an array of numbers.
* @param {DocumentInterface[]} smallResults An array of documents, each with metadata that includes a large embedding for similarity comparison.
* @returns {Promise<DocumentInterface[]>} A promise that resolves to an array of the top `largeK` ranked documents based on their similarity to the query embedding.
*/
private _rankByLargeEmbeddings(
embeddedQuery: number[],
smallResults: DocumentInterface[]
): DocumentInterface[] {
const largeEmbeddings: Array<number[]> = smallResults.map((doc) =>
JSON.parse(doc.metadata[this.largeEmbeddingKey])
);
let func: () => Array<number[]>;
switch (this.searchType) {
case "cosine":
func = () => cosineSimilarity([embeddedQuery], largeEmbeddings);
break;
case "innerProduct":
func = () => innerProduct([embeddedQuery], largeEmbeddings);
break;
case "euclidean":
func = () => euclideanDistance([embeddedQuery], largeEmbeddings);
break;
default:
throw new Error(`Unknown search type: ${this.searchType}`);
}
// Calculate the similarity scores between the query embedding and the large embeddings
const [similarityScores] = func();
// Create an array of indices from 0 to N-1, where N is the number of documents
let indices = Array.from(
{ length: smallResults.length },
(_, index) => index
);
indices = indices
.map((v, i) => [similarityScores[i], v])
.sort(([a], [b]) => b - a)
.slice(0, this.largeK)
.map(([, i]) => i);
return indices.map((i) => smallResults[i]);
}
async _getRelevantDocuments(query: string): Promise<DocumentInterface[]> {
const [embeddedQuery, smallResults] = await Promise.all([
this.largeEmbeddingModel.embedQuery(query),
this.vectorStore.similaritySearch(query, this.smallK, this.filter),
]);
return this._rankByLargeEmbeddings(embeddedQuery, smallResults);
}
/**
* Override the default `addDocuments` method to embed the documents twice,
* once using the larger embeddings model, and then again using the default
* embedding model linked to the vector store.
*
* @param {DocumentInterface[]} documents - An array of documents to add to the vector store.
* @param {AddDocumentOptions} options - An optional object containing additional options for adding documents.
* @returns {Promise<string[] | void>} A promise that resolves to an array of the document IDs that were added to the vector store.
*/
override addDocuments = async (
documents: DocumentInterface[],
options?: AddDocumentOptions
): Promise<string[] | void> => {
// Insure documents metadata does not contain the large embedding key
if (documents.some((doc) => this.largeEmbeddingKey in doc.metadata)) {
throw new Error(
`All documents must not contain the large embedding key: ${this.largeEmbeddingKey} in their metadata.`
);
}
const allDocPageContent = documents.map((doc) => doc.pageContent);
const allDocLargeEmbeddings = await this.largeEmbeddingModel.embedDocuments(
allDocPageContent
);
const newDocuments: Array<DocumentInterface> = documents.map(
(doc, idx) => ({
...doc,
metadata: {
...doc.metadata,
[this.largeEmbeddingKey]: JSON.stringify(allDocLargeEmbeddings[idx]),
},
})
);
return this.vectorStore.addDocuments(newDocuments, options);
};
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/retrievers/parent_document.ts | import * as uuid from "uuid";
import {
type VectorStoreInterface,
type VectorStoreRetrieverInterface,
} from "@langchain/core/vectorstores";
import { Document } from "@langchain/core/documents";
import type { BaseDocumentCompressor } from "./document_compressors/index.js";
import {
TextSplitter,
TextSplitterChunkHeaderOptions,
} from "../text_splitter.js";
import {
MultiVectorRetriever,
type MultiVectorRetrieverInput,
} from "./multi_vector.js";
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export type SubDocs = Document<Record<string, any>>[];
/**
* Interface for the fields required to initialize a
* ParentDocumentRetriever instance.
*/
export type ParentDocumentRetrieverFields = MultiVectorRetrieverInput & {
childSplitter: TextSplitter;
parentSplitter?: TextSplitter;
/**
* A custom retriever to use when retrieving instead of
* the `.similaritySearch` method of the vectorstore.
*/
childDocumentRetriever?: VectorStoreRetrieverInterface<VectorStoreInterface>;
documentCompressor?: BaseDocumentCompressor | undefined;
documentCompressorFilteringFn?: (docs: SubDocs) => SubDocs;
};
/**
* A type of document retriever that splits input documents into smaller chunks
* while separately storing and preserving the original documents.
* The small chunks are embedded, then on retrieval, the original
* "parent" documents are retrieved.
*
* This strikes a balance between better targeted retrieval with small documents
* and the more context-rich larger documents.
* @example
* ```typescript
* const retriever = new ParentDocumentRetriever({
* vectorstore: new MemoryVectorStore(new OpenAIEmbeddings()),
* byteStore: new InMemoryStore<Uint8Array>(),
* parentSplitter: new RecursiveCharacterTextSplitter({
* chunkOverlap: 0,
* chunkSize: 500,
* }),
* childSplitter: new RecursiveCharacterTextSplitter({
* chunkOverlap: 0,
* chunkSize: 50,
* }),
* childK: 20,
* parentK: 5,
* });
*
* const parentDocuments = await getDocuments();
* await retriever.addDocuments(parentDocuments);
* const retrievedDocs = await retriever.getRelevantDocuments("justice breyer");
* ```
*/
export class ParentDocumentRetriever extends MultiVectorRetriever {
static lc_name() {
return "ParentDocumentRetriever";
}
lc_namespace = ["langchain", "retrievers", "parent_document"];
vectorstore: VectorStoreInterface;
protected childSplitter: TextSplitter;
protected parentSplitter?: TextSplitter;
protected idKey = "doc_id";
protected childK?: number;
protected parentK?: number;
childDocumentRetriever:
| VectorStoreRetrieverInterface<VectorStoreInterface>
| undefined;
documentCompressor: BaseDocumentCompressor | undefined;
documentCompressorFilteringFn?: ParentDocumentRetrieverFields["documentCompressorFilteringFn"];
constructor(fields: ParentDocumentRetrieverFields) {
super(fields);
this.vectorstore = fields.vectorstore;
this.childSplitter = fields.childSplitter;
this.parentSplitter = fields.parentSplitter;
this.idKey = fields.idKey ?? this.idKey;
this.childK = fields.childK;
this.parentK = fields.parentK;
this.childDocumentRetriever = fields.childDocumentRetriever;
this.documentCompressor = fields.documentCompressor;
this.documentCompressorFilteringFn = fields.documentCompressorFilteringFn;
}
async _getRelevantDocuments(query: string): Promise<Document[]> {
let subDocs: SubDocs = [];
if (this.childDocumentRetriever) {
subDocs = await this.childDocumentRetriever.getRelevantDocuments(query);
} else {
subDocs = await this.vectorstore.similaritySearch(query, this.childK);
}
if (this.documentCompressor && subDocs.length) {
subDocs = await this.documentCompressor.compressDocuments(subDocs, query);
if (this.documentCompressorFilteringFn) {
subDocs = this.documentCompressorFilteringFn(subDocs);
}
}
// Maintain order
const parentDocIds: string[] = [];
for (const doc of subDocs) {
if (!parentDocIds.includes(doc.metadata[this.idKey])) {
parentDocIds.push(doc.metadata[this.idKey]);
}
}
const parentDocs: Document[] = [];
const storedParentDocs = await this.docstore.mget(parentDocIds);
const retrievedDocs: Document[] = storedParentDocs.filter(
(doc?: Document): doc is Document => doc !== undefined
);
parentDocs.push(...retrievedDocs);
return parentDocs.slice(0, this.parentK);
}
async _storeDocuments(
parentDoc: Record<string, Document>,
childDocs: Document[],
addToDocstore: boolean
) {
if (this.childDocumentRetriever) {
await this.childDocumentRetriever.addDocuments(childDocs);
} else {
await this.vectorstore.addDocuments(childDocs);
}
if (addToDocstore) {
await this.docstore.mset(Object.entries(parentDoc));
}
}
/**
* Adds documents to the docstore and vectorstores.
* If a retriever is provided, it will be used to add documents instead of the vectorstore.
* @param docs The documents to add
* @param config.ids Optional list of ids for documents. If provided should be the same
* length as the list of documents. Can provided if parent documents
* are already in the document store and you don't want to re-add
* to the docstore. If not provided, random UUIDs will be used as ids.
* @param config.addToDocstore Boolean of whether to add documents to docstore.
* This can be false if and only if `ids` are provided. You may want
* to set this to False if the documents are already in the docstore
* and you don't want to re-add them.
* @param config.chunkHeaderOptions Object with options for adding Contextual chunk headers
*/
async addDocuments(
docs: Document[],
config?: {
ids?: string[];
addToDocstore?: boolean;
childDocChunkHeaderOptions?: TextSplitterChunkHeaderOptions;
}
): Promise<void> {
const {
ids,
addToDocstore = true,
childDocChunkHeaderOptions = {},
} = config ?? {};
const parentDocs = this.parentSplitter
? await this.parentSplitter.splitDocuments(docs)
: docs;
let parentDocIds;
if (ids === undefined) {
if (!addToDocstore) {
throw new Error(
`If ids are not passed in, "config.addToDocstore" MUST be true`
);
}
parentDocIds = parentDocs.map((_doc: Document) => uuid.v4());
} else {
parentDocIds = ids;
}
if (parentDocs.length !== parentDocIds.length) {
throw new Error(
`Got uneven list of documents and ids.\nIf "ids" is provided, should be same length as "documents".`
);
}
for (let i = 0; i < parentDocs.length; i += 1) {
const parentDoc = parentDocs[i];
const parentDocId = parentDocIds[i];
const subDocs = await this.childSplitter.splitDocuments(
[parentDoc],
childDocChunkHeaderOptions
);
const taggedSubDocs = subDocs.map(
(subDoc: Document) =>
new Document({
pageContent: subDoc.pageContent,
metadata: { ...subDoc.metadata, [this.idKey]: parentDocId },
})
);
await this._storeDocuments(
{ [parentDocId]: parentDoc },
taggedSubDocs,
addToDocstore
);
}
}
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/retrievers/contextual_compression.ts | import {
BaseRetriever,
type BaseRetrieverInput,
type BaseRetrieverInterface,
} from "@langchain/core/retrievers";
import type { DocumentInterface } from "@langchain/core/documents";
import { CallbackManagerForRetrieverRun } from "@langchain/core/callbacks/manager";
import { BaseDocumentCompressor } from "./document_compressors/index.js";
/**
* Interface for the arguments required to construct a
* ContextualCompressionRetriever. It extends the BaseRetrieverInput
* interface with two additional fields: baseCompressor and baseRetriever.
*/
export interface ContextualCompressionRetrieverArgs extends BaseRetrieverInput {
baseCompressor: BaseDocumentCompressor;
baseRetriever: BaseRetrieverInterface;
}
/**
* A retriever that wraps a base retriever and compresses the results. It
* retrieves relevant documents based on a given query and then compresses
* these documents using a specified document compressor.
* @example
* ```typescript
* const retriever = new ContextualCompressionRetriever({
* baseCompressor: new LLMChainExtractor(),
* baseRetriever: new HNSWLib().asRetriever(),
* });
* const retrievedDocs = await retriever.getRelevantDocuments(
* "What did the speaker say about Justice Breyer?",
* );
* ```
*/
export class ContextualCompressionRetriever extends BaseRetriever {
static lc_name() {
return "ContextualCompressionRetriever";
}
lc_namespace = ["langchain", "retrievers", "contextual_compression"];
baseCompressor: BaseDocumentCompressor;
baseRetriever: BaseRetrieverInterface;
constructor(fields: ContextualCompressionRetrieverArgs) {
super(fields);
this.baseCompressor = fields.baseCompressor;
this.baseRetriever = fields.baseRetriever;
}
async _getRelevantDocuments(
query: string,
runManager?: CallbackManagerForRetrieverRun
): Promise<DocumentInterface[]> {
const docs = await this.baseRetriever.getRelevantDocuments(
query,
runManager?.getChild("base_retriever")
);
const compressedDocs = await this.baseCompressor.compressDocuments(
docs,
query,
runManager?.getChild("base_compressor")
);
return compressedDocs;
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/document_compressors/chain_extract_prompt.ts | export const PROMPT_TEMPLATE = (
noOutputStr: string
) => `Given the following question and context, extract any part of the context *AS IS* that is relevant to answer the question. If none of the context is relevant return ${noOutputStr}.
Remember, *DO NOT* edit the extracted parts of the context.
> Question: {question}
> Context:
>>>
{context}
>>>
Extracted relevant parts:`;
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/document_compressors/index.ts | import type { DocumentInterface } from "@langchain/core/documents";
import { BaseDocumentTransformer } from "@langchain/core/documents";
import { Callbacks } from "@langchain/core/callbacks/manager";
/**
* Base Document Compression class. All compressors should extend this class.
*/
export abstract class BaseDocumentCompressor {
/**
* Abstract method that must be implemented by any class that extends
* `BaseDocumentCompressor`. This method takes an array of `Document`
* objects and a query string as parameters and returns a Promise that
* resolves with an array of compressed `Document` objects.
* @param documents An array of `Document` objects to be compressed.
* @param query A query string.
* @returns A Promise that resolves with an array of compressed `Document` objects.
*/
abstract compressDocuments(
documents: DocumentInterface[],
query: string,
callbacks?: Callbacks
): Promise<DocumentInterface[]>;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
static isBaseDocumentCompressor(x: any): x is BaseDocumentCompressor {
return x?.compressDocuments !== undefined;
}
}
/**
* Document compressor that uses a pipeline of Transformers.
* @example
* ```typescript
* const compressorPipeline = new DocumentCompressorPipeline({
* transformers: [
* new RecursiveCharacterTextSplitter({
* chunkSize: 200,
* chunkOverlap: 0,
* }),
* new EmbeddingsFilter({
* embeddings: new OpenAIEmbeddings(),
* similarityThreshold: 0.8,
* k: 5,
* }),
* ],
* });
* const retriever = new ContextualCompressionRetriever({
* baseCompressor: compressorPipeline,
* baseRetriever: new TavilySearchAPIRetriever({
* includeRawContent: true,
* }),
* });
* const retrievedDocs = await retriever.getRelevantDocuments(
* "What did the speaker say about Justice Breyer in the 2022 State of the Union?",
* );
* console.log({ retrievedDocs });
* ```
*/
export class DocumentCompressorPipeline extends BaseDocumentCompressor {
transformers: (BaseDocumentTransformer | BaseDocumentCompressor)[];
constructor(fields: {
transformers: (BaseDocumentTransformer | BaseDocumentCompressor)[];
}) {
super();
this.transformers = fields.transformers;
}
async compressDocuments(
documents: DocumentInterface[],
query: string,
callbacks?: Callbacks
): Promise<DocumentInterface[]> {
let transformedDocuments = documents;
for (const transformer of this.transformers) {
if (BaseDocumentCompressor.isBaseDocumentCompressor(transformer)) {
transformedDocuments = await transformer.compressDocuments(
transformedDocuments,
query,
callbacks
);
} else {
transformedDocuments = await transformer.transformDocuments(
transformedDocuments
);
}
}
return transformedDocuments;
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/document_compressors/chain_extract.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import { type DocumentInterface, Document } from "@langchain/core/documents";
import { PromptTemplate } from "@langchain/core/prompts";
import { BaseOutputParser } from "@langchain/core/output_parsers";
import { LLMChain } from "../../chains/llm_chain.js";
import { BaseDocumentCompressor } from "./index.js";
import { PROMPT_TEMPLATE } from "./chain_extract_prompt.js";
function defaultGetInput(
query: string,
doc: DocumentInterface
): Record<string, unknown> {
return { question: query, context: doc.pageContent };
}
class NoOutputParser extends BaseOutputParser<string> {
lc_namespace = [
"langchain",
"retrievers",
"document_compressors",
"chain_extract",
];
noOutputStr = "NO_OUTPUT";
parse(text: string): Promise<string> {
const cleanedText = text.trim();
if (cleanedText === this.noOutputStr) {
return Promise.resolve("");
}
return Promise.resolve(cleanedText);
}
getFormatInstructions(): string {
throw new Error("Method not implemented.");
}
}
function getDefaultChainPrompt(): PromptTemplate {
const outputParser = new NoOutputParser();
const template = PROMPT_TEMPLATE(outputParser.noOutputStr);
return new PromptTemplate({
template,
inputVariables: ["question", "context"],
outputParser,
});
}
/**
* Interface for the arguments required to create an instance of
* LLMChainExtractor.
*/
export interface LLMChainExtractorArgs {
llmChain: LLMChain;
getInput: (query: string, doc: DocumentInterface) => Record<string, unknown>;
}
/**
* A class that uses an LLM chain to extract relevant parts of documents.
* It extends the BaseDocumentCompressor class.
*/
export class LLMChainExtractor extends BaseDocumentCompressor {
llmChain: LLMChain;
getInput: (query: string, doc: DocumentInterface) => Record<string, unknown> =
defaultGetInput;
constructor({ llmChain, getInput }: LLMChainExtractorArgs) {
super();
this.llmChain = llmChain;
this.getInput = getInput;
}
/**
* Compresses a list of documents based on the output of an LLM chain.
* @param documents The list of documents to be compressed.
* @param query The query to be used for document compression.
* @returns A list of compressed documents.
*/
async compressDocuments(
documents: DocumentInterface[],
query: string
): Promise<DocumentInterface[]> {
const compressedDocs = await Promise.all(
documents.map(async (doc) => {
const input = this.getInput(query, doc);
const output = await this.llmChain.predict(input);
return output.length > 0
? new Document({
pageContent: output,
metadata: doc.metadata,
})
: undefined;
})
);
return compressedDocs.filter((doc): doc is Document => doc !== undefined);
}
/**
* Creates a new instance of LLMChainExtractor from a given LLM, prompt
* template, and getInput function.
* @param llm The BaseLanguageModel instance used for document extraction.
* @param prompt The PromptTemplate instance used for document extraction.
* @param getInput A function used for constructing the chain input from the query and a Document.
* @returns A new instance of LLMChainExtractor.
*/
static fromLLM(
llm: BaseLanguageModelInterface,
prompt?: PromptTemplate,
getInput?: (
query: string,
doc: DocumentInterface
) => Record<string, unknown>
): LLMChainExtractor {
const _prompt = prompt || getDefaultChainPrompt();
const _getInput = getInput || defaultGetInput;
const llmChain = new LLMChain({ llm, prompt: _prompt });
return new LLMChainExtractor({ llmChain, getInput: _getInput });
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/document_compressors/embeddings_filter.ts | import type { EmbeddingsInterface } from "@langchain/core/embeddings";
import type { DocumentInterface } from "@langchain/core/documents";
import { cosineSimilarity } from "@langchain/core/utils/math";
import { BaseDocumentCompressor } from "./index.js";
/**
* Interface for the parameters of the `EmbeddingsFilter` class.
*/
export interface EmbeddingsFilterParams {
embeddings: EmbeddingsInterface;
similarityFn?: (x: number[][], y: number[][]) => number[][];
similarityThreshold?: number;
k?: number;
}
/**
* Class that represents a document compressor that uses embeddings to
* drop documents unrelated to the query.
* @example
* ```typescript
* const embeddingsFilter = new EmbeddingsFilter({
* embeddings: new OpenAIEmbeddings(),
* similarityThreshold: 0.8,
* k: 5,
* });
* const retrievedDocs = await embeddingsFilter.filterDocuments(
* getDocuments(),
* "What did the speaker say about Justice Breyer in the 2022 State of the Union?",
* );
* console.log({ retrievedDocs });
* ```
*/
export class EmbeddingsFilter extends BaseDocumentCompressor {
/**
* Embeddings to use for embedding document contents and queries.
*/
embeddings: EmbeddingsInterface;
/**
* Similarity function for comparing documents.
*/
similarityFn = cosineSimilarity;
/**
* Threshold for determining when two documents are similar enough
* to be considered redundant. Must be specified if `k` is not set.
*/
similarityThreshold?: number;
/**
* The number of relevant documents to return. Can be explicitly set to undefined, in which case
* similarity_threshold` must be specified. Defaults to 20
*/
k? = 20;
constructor(params: EmbeddingsFilterParams) {
super();
this.embeddings = params.embeddings;
this.similarityFn = params.similarityFn ?? this.similarityFn;
this.similarityThreshold = params.similarityThreshold;
this.k = "k" in params ? params.k : this.k;
if (this.k === undefined && this.similarityThreshold === undefined) {
throw new Error(`Must specify one of "k" or "similarity_threshold".`);
}
}
async compressDocuments(
documents: DocumentInterface[],
query: string
): Promise<DocumentInterface[]> {
const embeddedDocuments = await this.embeddings.embedDocuments(
documents.map((doc) => doc.pageContent)
);
const embeddedQuery = await this.embeddings.embedQuery(query);
const similarity = this.similarityFn([embeddedQuery], embeddedDocuments)[0];
let includedIdxs = Array.from(
{ length: embeddedDocuments.length },
(_, i) => i
);
if (this.k !== undefined) {
includedIdxs = includedIdxs
.map((v, i) => [similarity[i], v])
.sort(([a], [b]) => b - a)
.slice(0, this.k)
.map(([, i]) => i);
}
if (this.similarityThreshold !== undefined) {
const threshold = this.similarityThreshold;
includedIdxs = includedIdxs.filter((i) => similarity[i] > threshold);
}
return includedIdxs.map((i) => documents[i]);
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers/document_compressors | lc_public_repos/langchainjs/langchain/src/retrievers/document_compressors/test/document_compressor.int.test.ts | import { test, expect } from "@jest/globals";
import { OpenAIEmbeddings } from "@langchain/openai";
import { Document } from "@langchain/core/documents";
import { RecursiveCharacterTextSplitter } from "../../../text_splitter.js";
import { EmbeddingsFilter } from "../embeddings_filter.js";
import { DocumentCompressorPipeline } from "../index.js";
test("Test DocumentCompressorPipeline", async () => {
const embeddings = new OpenAIEmbeddings();
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 30,
chunkOverlap: 0,
separators: [". "],
});
const relevantFilter = new EmbeddingsFilter({
embeddings,
similarityThreshold: 0.8,
});
const pipelineFilter = new DocumentCompressorPipeline({
transformers: [splitter, relevantFilter],
});
const texts = ["This sentence is about cows", "foo bar baz"];
const docs = [new Document({ pageContent: texts.join(". ") })];
const actual = await pipelineFilter.compressDocuments(
docs,
"Tell me about farm animals"
);
expect(actual.length).toBe(1);
expect(texts[0].includes(actual[0].pageContent)).toBeTruthy();
});
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/self_query/index.ts | import { RunnableInterface } from "@langchain/core/runnables";
import {
BaseRetriever,
type BaseRetrieverInput,
} from "@langchain/core/retrievers";
import { Document } from "@langchain/core/documents";
import { VectorStore } from "@langchain/core/vectorstores";
import {
BaseTranslator,
BasicTranslator,
FunctionalTranslator,
StructuredQuery,
} from "@langchain/core/structured_query";
import { CallbackManagerForRetrieverRun } from "@langchain/core/callbacks/manager";
import {
loadQueryConstructorRunnable,
QueryConstructorRunnableOptions,
} from "../../chains/query_constructor/index.js";
export { BaseTranslator, BasicTranslator, FunctionalTranslator };
/**
* Interface for the arguments required to create a SelfQueryRetriever
* instance. It extends the BaseRetrieverInput interface.
*/
export interface SelfQueryRetrieverArgs<T extends VectorStore>
extends BaseRetrieverInput {
vectorStore: T;
structuredQueryTranslator: BaseTranslator<T>;
queryConstructor: RunnableInterface<{ query: string }, StructuredQuery>;
verbose?: boolean;
useOriginalQuery?: boolean;
searchParams?: {
k?: number;
filter?: T["FilterType"];
mergeFiltersOperator?: "or" | "and" | "replace";
forceDefaultFilter?: boolean;
};
}
/**
* Class for question answering over an index. It retrieves relevant
* documents based on a query. It extends the BaseRetriever class and
* implements the SelfQueryRetrieverArgs interface.
* @example
* ```typescript
* const selfQueryRetriever = SelfQueryRetriever.fromLLM({
* llm: new ChatOpenAI(),
* vectorStore: await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings()),
* documentContents: "Brief summary of a movie",
* attributeInfo: attributeInfo,
* structuredQueryTranslator: new FunctionalTranslator(),
* });
* const relevantDocuments = await selfQueryRetriever.getRelevantDocuments(
* "Which movies are directed by Greta Gerwig?",
* );
* ```
*/
export class SelfQueryRetriever<T extends VectorStore>
extends BaseRetriever
implements SelfQueryRetrieverArgs<T>
{
static lc_name() {
return "SelfQueryRetriever";
}
get lc_namespace() {
return ["langchain", "retrievers", "self_query"];
}
vectorStore: T;
queryConstructor: RunnableInterface<{ query: string }, StructuredQuery>;
verbose?: boolean;
structuredQueryTranslator: BaseTranslator<T>;
useOriginalQuery = false;
searchParams?: {
k?: number;
filter?: T["FilterType"];
mergeFiltersOperator?: "or" | "and" | "replace";
forceDefaultFilter?: boolean;
} = { k: 4, forceDefaultFilter: false };
constructor(options: SelfQueryRetrieverArgs<T>) {
super(options);
this.vectorStore = options.vectorStore;
this.queryConstructor = options.queryConstructor;
this.verbose = options.verbose ?? false;
this.searchParams = options.searchParams ?? this.searchParams;
this.useOriginalQuery = options.useOriginalQuery ?? this.useOriginalQuery;
this.structuredQueryTranslator = options.structuredQueryTranslator;
}
async _getRelevantDocuments(
query: string,
runManager?: CallbackManagerForRetrieverRun
): Promise<Document<Record<string, unknown>>[]> {
const generatedStructuredQuery = await this.queryConstructor.invoke(
{ query },
{
callbacks: runManager?.getChild("query_constructor"),
runName: "query_constructor",
}
);
const nextArg = this.structuredQueryTranslator.visitStructuredQuery(
generatedStructuredQuery
);
const filter = this.structuredQueryTranslator.mergeFilters(
this.searchParams?.filter,
nextArg.filter,
this.searchParams?.mergeFiltersOperator,
this.searchParams?.forceDefaultFilter
);
const generatedQuery = generatedStructuredQuery.query;
let myQuery = query;
if (!this.useOriginalQuery && generatedQuery && generatedQuery.length > 0) {
myQuery = generatedQuery;
}
return this.vectorStore
.asRetriever({
k: this.searchParams?.k,
filter,
})
.invoke(myQuery, { callbacks: runManager?.getChild("retriever") });
}
/**
* Static method to create a new SelfQueryRetriever instance from a
* BaseLanguageModel and a VectorStore. It first loads a query constructor
* chain using the loadQueryConstructorChain function, then creates a new
* SelfQueryRetriever instance with the loaded chain and the provided
* options.
* @param options The options used to create the SelfQueryRetriever instance. It includes the QueryConstructorChainOptions and all the SelfQueryRetrieverArgs except 'llmChain'.
* @returns A new instance of SelfQueryRetriever.
*/
static fromLLM<T extends VectorStore>(
options: QueryConstructorRunnableOptions &
Omit<SelfQueryRetrieverArgs<T>, "queryConstructor">
): SelfQueryRetriever<T> {
const {
structuredQueryTranslator,
allowedComparators,
allowedOperators,
llm,
documentContents,
attributeInfo,
examples,
vectorStore,
...rest
} = options;
const queryConstructor = loadQueryConstructorRunnable({
llm,
documentContents,
attributeInfo,
examples,
allowedComparators:
allowedComparators ?? structuredQueryTranslator.allowedComparators,
allowedOperators:
allowedOperators ?? structuredQueryTranslator.allowedOperators,
});
return new SelfQueryRetriever<T>({
...rest,
queryConstructor,
vectorStore,
structuredQueryTranslator,
});
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/self_query/base.ts | export {
type TranslatorOpts,
BaseTranslator,
BasicTranslator,
} from "@langchain/core/structured_query";
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/self_query/functional.ts | export {
type FunctionFilter,
FunctionalTranslator,
} from "@langchain/core/structured_query";
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers/self_query | lc_public_repos/langchainjs/langchain/src/retrievers/self_query/tests/memory_self_query.int.test.ts | import { test } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { AttributeInfo } from "../../../chains/query_constructor/index.js";
import { SelfQueryRetriever } from "../index.js";
import { FunctionalTranslator } from "../functional.js";
import { MemoryVectorStore } from "../../../vectorstores/memory.js";
test("Memory Vector Store Self Query Retriever Test", async () => {
const docs = [
new Document({
pageContent:
"A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata: { year: 1993, rating: 7.7, genre: "science fiction" },
}),
new Document({
pageContent:
"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 },
}),
new Document({
pageContent:
"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 },
}),
new Document({
pageContent:
"A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 },
}),
new Document({
pageContent: "Toys come alive and have a blast doing so",
metadata: { year: 1995, genre: "animated" },
}),
new Document({
pageContent:
"Three men walk into the Zone, three men walk out of the Zone",
metadata: {
year: 1979,
director: "Andrei Tarkovsky",
genre: "science fiction",
rating: 9.9,
},
}),
];
const attributeInfo: AttributeInfo[] = [
{
name: "genre",
description: "The genre of the movie",
type: "string or array of strings",
},
{
name: "year",
description: "The year the movie was released",
type: "number",
},
{
name: "director",
description: "The director of the movie",
type: "string",
},
{
name: "rating",
description: "The rating of the movie (1-10)",
type: "number",
},
{
name: "length",
description: "The length of the movie in minutes",
type: "number",
},
];
const embeddings = new OpenAIEmbeddings();
const llm = new ChatOpenAI({
model: "gpt-4o-mini",
});
const documentContents = "Brief summary of a movie";
const vectorStore = await MemoryVectorStore.fromDocuments(docs, embeddings);
const selfQueryRetriever = SelfQueryRetriever.fromLLM({
llm,
vectorStore,
documentContents,
attributeInfo,
structuredQueryTranslator: new FunctionalTranslator(),
});
const query1 = await selfQueryRetriever.getRelevantDocuments(
"Which movies are less than 90 minutes?"
);
// console.log(query1);
expect(query1.length).toEqual(0);
const query2 = await selfQueryRetriever.getRelevantDocuments(
"Which movies are rated higher than 8.5?"
);
// console.log(query2);
expect(query2.length).toEqual(2);
const query3 = await selfQueryRetriever.getRelevantDocuments(
"Which movies are directed by Greta Gerwig?"
);
// console.log(query3);
expect(query3.length).toEqual(1);
});
test("Memory Vector Store Self Query Retriever Test With Default Filter Or Merge Operator", async () => {
const docs = [
new Document({
pageContent:
"A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata: {
year: 1993,
rating: 7.7,
genre: "science fiction",
type: "movie",
},
}),
new Document({
pageContent:
"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata: {
year: 2010,
director: "Christopher Nolan",
rating: 8.2,
type: "movie",
},
}),
new Document({
pageContent:
"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata: {
year: 2006,
director: "Satoshi Kon",
rating: 8.6,
type: "movie",
},
}),
new Document({
pageContent:
"A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata: {
year: 2019,
director: "Greta Gerwig",
rating: 8.3,
type: "movie",
},
}),
new Document({
pageContent: "Toys come alive and have a blast doing so",
metadata: { year: 1995, genre: "animated", type: "movie" },
}),
new Document({
pageContent:
"Three men walk into the Zone, three men walk out of the Zone",
metadata: {
year: 1979,
director: "Andrei Tarkovsky",
genre: "science fiction",
rating: 9.9,
type: "movie",
},
}),
new Document({
pageContent: "10x the previous gecs",
metadata: {
year: 2023,
title: "10000 gecs",
artist: "100 gecs",
rating: 9.9,
type: "album",
},
}),
];
const attributeInfo: AttributeInfo[] = [
{
name: "genre",
description: "The genre of the movie",
type: "string or array of strings",
},
{
name: "year",
description: "The year the movie was released",
type: "number",
},
{
name: "director",
description: "The director of the movie",
type: "string",
},
{
name: "rating",
description: "The rating of the movie (1-10)",
type: "number",
},
{
name: "length",
description: "The length of the movie in minutes",
type: "number",
},
];
const embeddings = new OpenAIEmbeddings();
const llm = new ChatOpenAI({
model: "gpt-4o-mini",
});
const documentContents = "Brief summary of a movie";
const vectorStore = await MemoryVectorStore.fromDocuments(docs, embeddings);
const selfQueryRetriever = SelfQueryRetriever.fromLLM({
llm,
vectorStore,
documentContents,
attributeInfo,
structuredQueryTranslator: new FunctionalTranslator(),
searchParams: {
filter: (doc: Document) => doc.metadata && doc.metadata.type === "movie",
mergeFiltersOperator: "or",
k: docs.length,
},
});
const query1 = await selfQueryRetriever.getRelevantDocuments(
"Which movies are less than 90 minutes?"
);
// console.log(query1);
expect(query1.length).toEqual(6);
const query2 = await selfQueryRetriever.getRelevantDocuments(
"Which movies are rated higher than 8.5?"
);
// console.log(query2);
expect(query2.length).toEqual(7);
const query3 = await selfQueryRetriever.getRelevantDocuments(
"Which movies are directed by Greta Gerwig?"
);
// console.log(query3);
expect(query3.length).toEqual(6);
const query4 = await selfQueryRetriever.getRelevantDocuments(
"Awawawa au au au wawawawa hello?"
);
// console.log(query4);
expect(query4.length).toEqual(6); // this one should return documents since default filter takes over
});
test("Memory Vector Store Self Query Retriever Test With Default Filter And Merge Operator", async () => {
const docs = [
new Document({
pageContent:
"A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata: {
year: 1993,
rating: 7.7,
genre: "science fiction",
type: "movie",
},
}),
new Document({
pageContent:
"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata: {
year: 2010,
director: "Christopher Nolan",
rating: 8.2,
type: "movie",
},
}),
new Document({
pageContent:
"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata: {
year: 2006,
director: "Satoshi Kon",
rating: 8.6,
type: "movie",
},
}),
new Document({
pageContent:
"A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata: {
year: 2019,
director: "Greta Gerwig",
rating: 8.3,
type: "movie",
},
}),
new Document({
pageContent: "Toys come alive and have a blast doing so",
metadata: { year: 1995, genre: "animated", type: "movie" },
}),
new Document({
pageContent:
"Three men walk into the Zone, three men walk out of the Zone",
metadata: {
year: 1979,
director: "Andrei Tarkovsky",
genre: "science fiction",
rating: 9.9,
type: "movie",
},
}),
new Document({
pageContent: "10x the previous gecs",
metadata: {
year: 2023,
title: "10000 gecs",
artist: "100 gecs",
rating: 9.9,
type: "album",
},
}),
];
const attributeInfo: AttributeInfo[] = [
{
name: "genre",
description: "The genre of the movie",
type: "string or array of strings",
},
{
name: "year",
description: "The year the movie was released",
type: "number",
},
{
name: "director",
description: "The director of the movie",
type: "string",
},
{
name: "rating",
description: "The rating of the movie (1-10)",
type: "number",
},
{
name: "length",
description: "The length of the movie in minutes",
type: "number",
},
];
const embeddings = new OpenAIEmbeddings();
const llm = new ChatOpenAI({
model: "gpt-4o-mini",
});
const documentContents = "Brief summary of a movie";
const vectorStore = await MemoryVectorStore.fromDocuments(docs, embeddings);
const selfQueryRetriever = SelfQueryRetriever.fromLLM({
llm,
vectorStore,
documentContents,
attributeInfo,
structuredQueryTranslator: new FunctionalTranslator(),
searchParams: {
filter: (doc: Document) => doc.metadata && doc.metadata.type === "movie",
mergeFiltersOperator: "and",
k: docs.length,
},
});
const query1 = await selfQueryRetriever.getRelevantDocuments(
"Which movies are less than 90 minutes?"
);
// console.log(query1);
expect(query1.length).toEqual(0);
const query2 = await selfQueryRetriever.getRelevantDocuments(
"Which movies are rated higher than 8.5?"
);
// console.log(query2);
expect(query2.length).toEqual(2);
const query3 = await selfQueryRetriever.getRelevantDocuments(
"Which movies are directed by Greta Gerwig?"
);
// console.log(query3);
expect(query3.length).toEqual(1);
const query4 = await selfQueryRetriever.getRelevantDocuments(
"Awawawa au au au wawawawa hello?"
);
// console.log(query4);
expect(query4.length).toEqual(7); // this one should return documents since default filter takes over
});
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/tests/score_threshold.int.test.ts | /* eslint-disable no-process-env */
import { expect, test } from "@jest/globals";
import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { ConversationalRetrievalQAChain } from "../../chains/conversational_retrieval_chain.js";
import { BufferMemory } from "../../memory/buffer_memory.js";
import { MemoryVectorStore } from "../../vectorstores/memory.js";
import { ScoreThresholdRetriever } from "../score_threshold.js";
test("ConversationalRetrievalQAChain.fromLLM should use its vector store recursively until it gets all the similar results with the minimum similarity score provided", async () => {
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Buildings are made out of brick",
"Buildings are made out of wood",
"Buildings are made out of stone",
"Cars are made out of metal",
"Cars are made out of plastic",
],
[{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const model = new ChatOpenAI({
modelName: "gpt-3.5-turbo",
temperature: 0,
});
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
ScoreThresholdRetriever.fromVectorStore(vectorStore, {
minSimilarityScore: 0.9,
kIncrement: 1,
}),
{
returnSourceDocuments: true,
memory: new BufferMemory({
memoryKey: "chat_history",
inputKey: "question",
outputKey: "text",
}),
}
);
const res = await chain.invoke({
question: "Buildings are made out of what?",
});
// console.log("response:", res);
expect(res).toEqual(
expect.objectContaining({
text: expect.any(String),
sourceDocuments: expect.arrayContaining([
expect.objectContaining({
metadata: expect.objectContaining({
id: 1,
}),
}),
expect.objectContaining({
metadata: expect.objectContaining({
id: 2,
}),
}),
expect.objectContaining({
metadata: expect.objectContaining({
id: 3,
}),
}),
]),
})
);
});
test("ConversationalRetrievalQAChain.fromLLM should use its vector store to get up to X results that matches the provided similarity score", async () => {
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Buildings are made out of brick",
"Buildings are made out of wood",
"Buildings are made out of stone",
"Cars are made out of metal",
"Cars are made out of plastic",
],
[{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const model = new ChatOpenAI({
modelName: "gpt-3.5-turbo",
temperature: 0,
});
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
ScoreThresholdRetriever.fromVectorStore(vectorStore, {
minSimilarityScore: 0.9,
maxK: 2,
}),
{
returnSourceDocuments: true,
memory: new BufferMemory({
memoryKey: "chat_history",
inputKey: "question",
outputKey: "text",
}),
}
);
const res = await chain.call({
question: "Buildings are made out of what?",
});
expect(res.sourceDocuments).toHaveLength(2);
});
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/tests/parent_document.int.test.ts | import { expect, test } from "@jest/globals";
import { OpenAIEmbeddings } from "@langchain/openai";
import { InMemoryDocstore } from "../../stores/doc/in_memory.js";
import { TextLoader } from "../../document_loaders/fs/text.js";
import { InMemoryStore } from "../../storage/in_memory.js";
import { MemoryVectorStore } from "../../vectorstores/memory.js";
import { ParentDocumentRetriever } from "../parent_document.js";
import { RecursiveCharacterTextSplitter } from "../../text_splitter.js";
import { ScoreThresholdRetriever } from "../score_threshold.js";
test("Should return the full document if an unsplit parent document has been added", async () => {
const vectorstore = new MemoryVectorStore(new OpenAIEmbeddings());
const retriever = new ParentDocumentRetriever({
vectorstore,
docstore: new InMemoryStore(),
childSplitter: new RecursiveCharacterTextSplitter({
chunkOverlap: 0,
chunkSize: 100,
}),
});
const docs = await new TextLoader(
"../examples/state_of_the_union.txt"
).load();
await retriever.addDocuments(docs);
const query = "justice breyer";
const retrievedDocs = await retriever.getRelevantDocuments(query);
expect(retrievedDocs.length).toEqual(1);
expect(retrievedDocs[0].pageContent.length).toBeGreaterThan(1000);
});
test("Should return a part of a document if a parent splitter is passed", async () => {
const vectorstore = new MemoryVectorStore(new OpenAIEmbeddings());
const docstore = new InMemoryStore();
const retriever = new ParentDocumentRetriever({
vectorstore,
docstore,
parentSplitter: new RecursiveCharacterTextSplitter({
chunkOverlap: 0,
chunkSize: 500,
}),
childSplitter: new RecursiveCharacterTextSplitter({
chunkOverlap: 0,
chunkSize: 50,
}),
});
const docs = await new TextLoader(
"../examples/state_of_the_union.txt"
).load();
await retriever.addDocuments(docs);
const query = "justice breyer";
const retrievedDocs = await retriever.getRelevantDocuments(query);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const vectorstoreRetreivedDocs = await vectorstore.similaritySearch(
"justice breyer"
);
// console.log(vectorstoreRetreivedDocs, vectorstoreRetreivedDocs.length);
// console.log(retrievedDocs);
expect(retrievedDocs.length).toBeGreaterThan(1);
expect(retrievedDocs[0].pageContent.length).toBeGreaterThan(100);
});
test("Should work with a backwards compatible docstore too", async () => {
const vectorstore = new MemoryVectorStore(new OpenAIEmbeddings());
const retriever = new ParentDocumentRetriever({
vectorstore,
docstore: new InMemoryDocstore(),
childSplitter: new RecursiveCharacterTextSplitter({
chunkOverlap: 0,
chunkSize: 100,
}),
});
const docs = await new TextLoader(
"../examples/state_of_the_union.txt"
).load();
await retriever.addDocuments(docs);
const query = "justice breyer";
const retrievedDocs = await retriever.getRelevantDocuments(query);
expect(retrievedDocs.length).toEqual(1);
expect(retrievedDocs[0].pageContent.length).toBeGreaterThan(1000);
});
test("Should return a part of a document if a parent splitter is passed", async () => {
const vectorstore = new MemoryVectorStore(new OpenAIEmbeddings());
const docstore = new InMemoryStore();
const retriever = new ParentDocumentRetriever({
vectorstore,
docstore,
parentSplitter: new RecursiveCharacterTextSplitter({
chunkOverlap: 0,
chunkSize: 500,
}),
childSplitter: new RecursiveCharacterTextSplitter({
chunkOverlap: 0,
chunkSize: 50,
}),
});
const docs = await new TextLoader(
"../examples/state_of_the_union.txt"
).load();
await retriever.addDocuments(docs);
const query = "justice breyer";
const retrievedDocs = await retriever.getRelevantDocuments(query);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const vectorstoreRetreivedDocs = await vectorstore.similaritySearch(
"justice breyer"
);
// console.log(vectorstoreRetreivedDocs, vectorstoreRetreivedDocs.length);
// console.log(retrievedDocs);
expect(retrievedDocs.length).toBeGreaterThan(1);
expect(retrievedDocs[0].pageContent.length).toBeGreaterThan(100);
});
test("Should use a custom retriever to retrieve one doc", async () => {
const vectorstore = new MemoryVectorStore(new OpenAIEmbeddings());
const docstore = new InMemoryStore();
const childDocumentRetriever = ScoreThresholdRetriever.fromVectorStore(
vectorstore,
{
minSimilarityScore: 0.01, // Essentially no threshold
maxK: 1, // Only return the top result
}
);
const retriever = new ParentDocumentRetriever({
vectorstore,
docstore,
childDocumentRetriever,
parentSplitter: new RecursiveCharacterTextSplitter({
chunkOverlap: 0,
chunkSize: 500,
}),
childSplitter: new RecursiveCharacterTextSplitter({
chunkOverlap: 0,
chunkSize: 50,
}),
});
const docs = await new TextLoader(
"../examples/state_of_the_union.txt"
).load();
await retriever.addDocuments(docs);
const query = "justice breyer";
const retrievedDocs = await retriever.getRelevantDocuments(query);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const vectorstoreRetreivedDocs = await vectorstore.similaritySearch(
"justice breyer"
);
// console.log(vectorstoreRetreivedDocs, vectorstoreRetreivedDocs.length);
// console.log(retrievedDocs);
expect(retrievedDocs).toHaveLength(1);
});
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/tests/ensemble_retriever.int.test.ts | import { expect, test } from "@jest/globals";
import { CohereEmbeddings } from "@langchain/cohere";
import { MemoryVectorStore } from "../../vectorstores/memory.js";
import { EnsembleRetriever } from "../ensemble.js";
test("Should work with a question input", async () => {
const vectorstore = await MemoryVectorStore.fromTexts(
[
"Buildings are made out of brick",
"Buildings are made out of wood",
"Buildings are made out of stone",
"Cars are made out of metal",
"Cars are made out of plastic",
"mitochondria is the powerhouse of the cell",
"mitochondria is made of lipids",
],
[{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],
new CohereEmbeddings({ model: "embed-english-v3.0" })
);
const retriever = new EnsembleRetriever({
retrievers: [vectorstore.asRetriever()],
});
const query = "What are mitochondria made of?";
const retrievedDocs = await retriever.invoke(query);
expect(retrievedDocs[0].pageContent).toContain("mitochondria");
});
test("Should work with multiple retriever", async () => {
const vectorstore = await MemoryVectorStore.fromTexts(
[
"Buildings are made out of brick",
"Buildings are made out of wood",
"Buildings are made out of stone",
"Cars are made out of metal",
"Cars are made out of plastic",
"mitochondria is the powerhouse of the cell",
"mitochondria is made of lipids",
],
[{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],
new CohereEmbeddings({ model: "embed-english-v3.0" })
);
const vectorstore2 = await MemoryVectorStore.fromTexts(
[
"Buildings are made out of brick",
"Buildings are made out of wood",
"Buildings are made out of stone",
"Cars are made out of metal",
"Cars are made out of plastic",
"mitochondria is the powerhouse of the cell",
"mitochondria is made of lipids",
],
[{ id: 6 }, { id: 7 }, { id: 8 }, { id: 9 }, { id: 10 }],
new CohereEmbeddings({ model: "embed-english-v3.0" })
);
const retriever = new EnsembleRetriever({
retrievers: [vectorstore.asRetriever(), vectorstore2.asRetriever()],
});
const query = "cars";
const retrievedDocs = await retriever.invoke(query);
expect(
retrievedDocs.filter((item) => item.pageContent.includes("Cars")).length
).toBe(2);
});
test("Should work with weights", async () => {
const vectorstore = await MemoryVectorStore.fromTexts(
[
"Buildings are made out of brick",
"Buildings are made out of wood",
"Buildings are made out of stone",
"Cars are made out of metal",
"Cars are made out of plastic",
"mitochondria is the powerhouse of the cell",
"mitochondria is made of lipids",
],
[{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],
new CohereEmbeddings({ model: "embed-english-v3.0" })
);
const vectorstore2 = await MemoryVectorStore.fromTexts(
[
"Buildings are made out of brick",
"Buildings are made out of wood",
"Buildings are made out of stone",
"Cars are made out of metal",
"Cars are made out of plastic",
"mitochondria is the powerhouse of the cell",
"mitochondria is made of lipids",
],
[{ id: 6 }, { id: 7 }, { id: 8 }, { id: 9 }, { id: 10 }],
new CohereEmbeddings({ model: "embed-english-v3.0" })
);
const retriever = new EnsembleRetriever({
retrievers: [vectorstore.asRetriever(), vectorstore2.asRetriever()],
weights: [0.5, 0.9],
});
const query = "cars";
const retrievedDocs = await retriever.invoke(query);
expect(
retrievedDocs.filter((item) => item.pageContent.includes("Cars")).length
).toBe(2);
});
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/tests/matryoshka_retriever.int.test.ts | import { test, expect } from "@jest/globals";
import { OpenAIEmbeddings } from "@langchain/openai";
import { faker } from "@faker-js/faker";
import { Document } from "@langchain/core/documents";
import { v4 as uuidV4 } from "uuid";
import { SyntheticEmbeddings } from "@langchain/core/utils/testing";
import { MatryoshkaRetriever } from "../matryoshka_retriever.js";
import { MemoryVectorStore } from "../../vectorstores/memory.js";
test("MatryoshkaRetriever can retrieve", async () => {
const smallEmbeddings = new OpenAIEmbeddings({
modelName: "text-embedding-3-small",
dimensions: 512, // Min num for small
});
const largeEmbeddings = new OpenAIEmbeddings({
modelName: "text-embedding-3-large",
dimensions: 3072, // Max num for large
});
const vectorStore = new MemoryVectorStore(smallEmbeddings);
const retriever = new MatryoshkaRetriever({
largeEmbeddingModel: largeEmbeddings,
vectorStore,
largeK: 5,
});
const irrelevantDocs = Array.from({ length: 250 }).map(
() =>
new Document({
pageContent: faker.lorem.words(7),
metadata: { id: uuidV4() },
})
);
const relevantDocContents = [
"LangChain is an open source github repo",
"There are JS and PY versions of the LangChain github repos",
"LangGraph is a new open source library by the LangChain team",
"LangChain announced GA of LangSmith last week!",
"I heart LangChain",
];
const relevantDocs = [
new Document({
pageContent: relevantDocContents[0],
metadata: { id: uuidV4() },
}),
new Document({
pageContent: relevantDocContents[1],
metadata: { id: uuidV4() },
}),
new Document({
pageContent: relevantDocContents[2],
metadata: { id: uuidV4() },
}),
new Document({
pageContent: relevantDocContents[3],
metadata: { id: uuidV4() },
}),
new Document({
pageContent: relevantDocContents[4],
metadata: { id: uuidV4() },
}),
];
const allDocs = [...irrelevantDocs, ...relevantDocs];
await retriever.addDocuments(allDocs);
const query = "What is LangChain?";
// console.log("Querying documents");
const results = await retriever.getRelevantDocuments(query);
const retrieverResultContents = new Set(
results.map((doc) => doc.pageContent)
);
// console.log([...retrieverResultContents]);
expect(results.length).toBe(5);
expect(retrieverResultContents).toEqual(new Set(relevantDocContents));
});
test("Can change number of docs returned (largeK)", async () => {
const smallEmbeddings = new SyntheticEmbeddings({
vectorSize: 512,
});
const largeEmbeddings = new SyntheticEmbeddings({
vectorSize: 3072,
});
const vectorStore = new MemoryVectorStore(smallEmbeddings);
const docsWithId = Array.from({ length: 10 }).map(
() =>
new Document({
pageContent: "hello world",
metadata: { id: uuidV4() },
})
);
const retriever = new MatryoshkaRetriever({
largeEmbeddingModel: largeEmbeddings,
vectorStore,
largeK: 10,
});
await retriever.addDocuments(docsWithId);
const query = "hello world";
const results = await retriever.getRelevantDocuments(query);
expect(results.length).toBe(10);
});
test("AddDocunents adds large embeddings metadata field", async () => {
const testId = uuidV4();
const doc = new Document({
pageContent: "hello world",
metadata: { id: testId },
});
const smallEmbeddings = new OpenAIEmbeddings({
modelName: "text-embedding-3-small",
dimensions: 512, // Min num for small
});
const largeEmbeddings = new OpenAIEmbeddings({
modelName: "text-embedding-3-large",
dimensions: 3072, // Max num for large
});
const vectorStore = new MemoryVectorStore(smallEmbeddings);
const retriever = new MatryoshkaRetriever({
largeEmbeddingModel: largeEmbeddings,
vectorStore,
});
await retriever.addDocuments([doc]);
const relevantDocs = await retriever.getRelevantDocuments("hello world");
expect(relevantDocs[0].metadata.id).toBe(testId);
expect(relevantDocs[0].metadata[retriever.largeEmbeddingKey]).toBeDefined();
expect(
JSON.parse(relevantDocs[0].metadata[retriever.largeEmbeddingKey]).length
).toBe(3072);
});
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/tests/time_weighted.test.ts | import { describe, expect, jest, test } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import { FakeEmbeddings } from "@langchain/core/utils/testing";
import { MemoryVectorStore } from "../../vectorstores/memory.js";
import {
BUFFER_IDX,
LAST_ACCESSED_AT_KEY,
TimeWeightedVectorStoreRetriever,
} from "../time_weighted.js";
jest.useFakeTimers();
const mockNow = new Date("2023-04-18 15:30");
jest.setSystemTime(mockNow);
const getSec = (date: Date) => Math.floor(date.getTime() / 1000);
const getMemoryStream = (): Document[] => [
{
pageContent: "foo",
metadata: {
[BUFFER_IDX]: 0,
[LAST_ACCESSED_AT_KEY]: getSec(new Date("2023-04-18 12:00")),
created_at: getSec(new Date("2023-04-18 12:00")),
},
},
{
pageContent: "bar",
metadata: {
[BUFFER_IDX]: 1,
[LAST_ACCESSED_AT_KEY]: getSec(new Date("2023-04-18 13:00")),
created_at: getSec(new Date("2023-04-18 13:00")),
},
},
{
pageContent: "baz",
metadata: {
[BUFFER_IDX]: 2,
[LAST_ACCESSED_AT_KEY]: getSec(new Date("2023-04-18 11:00")),
created_at: getSec(new Date("2023-04-18 11:00")),
},
},
];
describe("Test getRelevantDocuments", () => {
test("Should fail on a vector store with documents that have not been added through the addDocuments method on the retriever", async () => {
const vectorStore = new MemoryVectorStore(new FakeEmbeddings());
const retriever = new TimeWeightedVectorStoreRetriever({
vectorStore,
memoryStream: [],
searchKwargs: 2,
});
await vectorStore.addDocuments([
{ pageContent: "aaa", metadata: {} },
{ pageContent: "aaaa", metadata: {} },
{ pageContent: "bbb", metadata: {} },
]);
const query = "aaa";
await expect(() => retriever.getRelevantDocuments(query)).rejects.toThrow();
});
test("For different pageContent with the same lastAccessedAt, return in descending order of similar words.", async () => {
const retriever = new TimeWeightedVectorStoreRetriever({
vectorStore: new MemoryVectorStore(new FakeEmbeddings()),
memoryStream: [],
searchKwargs: 2,
});
await retriever.addDocuments([
{ pageContent: "aaa", metadata: {} },
{ pageContent: "aaaa", metadata: {} },
{ pageContent: "bbb", metadata: {} },
]);
const query = "aaa";
const resultsDocs = await retriever.getRelevantDocuments(query);
const expected = [
{
pageContent: "aaa",
metadata: {
[BUFFER_IDX]: 0,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(mockNow),
},
},
{
pageContent: "aaaa",
metadata: {
[BUFFER_IDX]: 1,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(mockNow),
},
},
{
pageContent: "bbb",
metadata: {
[BUFFER_IDX]: 2,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(mockNow),
},
},
];
expect(resultsDocs).toStrictEqual(expected);
});
test("Return in descending order of lastAccessedAt when memoryStream of the same pageContent", async () => {
const samePageContent = "Test query";
const samePageContentMemoryStream = getMemoryStream().map((doc) => ({
...doc,
pageContent: samePageContent,
}));
const retriever = new TimeWeightedVectorStoreRetriever({
vectorStore: new MemoryVectorStore(new FakeEmbeddings()),
memoryStream: samePageContentMemoryStream,
});
await retriever.addDocuments([
{ pageContent: samePageContent, metadata: {} },
]);
const query = "Test query";
const resultsDocs = await retriever.getRelevantDocuments(query);
const expected = [
{
pageContent: samePageContent,
metadata: {
[BUFFER_IDX]: 3,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(mockNow),
},
},
{
pageContent: samePageContent,
metadata: {
[BUFFER_IDX]: 1,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(new Date("2023-04-18 13:00")),
},
},
{
pageContent: samePageContent,
metadata: {
[BUFFER_IDX]: 0,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(new Date("2023-04-18 12:00")),
},
},
{
pageContent: samePageContent,
metadata: {
[BUFFER_IDX]: 2,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(new Date("2023-04-18 11:00")),
},
},
];
expect(resultsDocs).toStrictEqual(expected);
});
test("Return in descending order of lastAccessedAt when memoryStream of different pageContent", async () => {
const retriever = new TimeWeightedVectorStoreRetriever({
vectorStore: new MemoryVectorStore(new FakeEmbeddings()),
memoryStream: getMemoryStream(),
});
await retriever.addDocuments([{ pageContent: "qux", metadata: {} }]);
const query = "Test query";
const resultsDocs = await retriever.getRelevantDocuments(query);
const expected = [
{
pageContent: "qux",
metadata: {
[BUFFER_IDX]: 3,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(mockNow),
},
},
{
pageContent: "bar",
metadata: {
[BUFFER_IDX]: 1,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(new Date("2023-04-18 13:00")),
},
},
{
pageContent: "foo",
metadata: {
[BUFFER_IDX]: 0,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(new Date("2023-04-18 12:00")),
},
},
{
pageContent: "baz",
metadata: {
[BUFFER_IDX]: 2,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(new Date("2023-04-18 11:00")),
},
},
];
expect(resultsDocs).toStrictEqual(expected);
});
test("Return in descending order of lastAccessedAt when memoryStream of different pageContent and decayRate", async () => {
const decayRate = 0.5;
const retriever = new TimeWeightedVectorStoreRetriever({
vectorStore: new MemoryVectorStore(new FakeEmbeddings()),
memoryStream: getMemoryStream(),
decayRate,
});
await retriever.addDocuments([{ pageContent: "qux", metadata: {} }]);
const query = "Test query";
const resultsDocs = await retriever.getRelevantDocuments(query);
const expected = [
{
pageContent: "qux",
metadata: {
[BUFFER_IDX]: 3,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(mockNow),
},
},
{
pageContent: "bar",
metadata: {
[BUFFER_IDX]: 1,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(new Date("2023-04-18 13:00")),
},
},
{
pageContent: "foo",
metadata: {
[BUFFER_IDX]: 0,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(new Date("2023-04-18 12:00")),
},
},
{
pageContent: "baz",
metadata: {
[BUFFER_IDX]: 2,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(new Date("2023-04-18 11:00")),
},
},
];
expect(resultsDocs).toStrictEqual(expected);
});
test("Return in descending order of lastAccessedAt when memoryStream of different pageContent and k = 3", async () => {
const retriever = new TimeWeightedVectorStoreRetriever({
vectorStore: new MemoryVectorStore(new FakeEmbeddings()),
memoryStream: getMemoryStream(),
k: 3,
});
await retriever.addDocuments([{ pageContent: "qux", metadata: {} }]);
const query = "Test query";
const resultsDocs = await retriever.getRelevantDocuments(query);
const expected = [
{
pageContent: "qux",
metadata: {
[BUFFER_IDX]: 3,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(mockNow),
},
},
{
pageContent: "bar",
metadata: {
[BUFFER_IDX]: 1,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(new Date("2023-04-18 13:00")),
},
},
{
pageContent: "baz",
metadata: {
[BUFFER_IDX]: 2,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(new Date("2023-04-18 11:00")),
},
},
];
expect(resultsDocs).toStrictEqual(expected);
});
test("Return in descending order of lastAccessedAt when memoryStream of different pageContent and searchKwargs = 2", async () => {
const retriever = new TimeWeightedVectorStoreRetriever({
vectorStore: new MemoryVectorStore(new FakeEmbeddings()),
memoryStream: getMemoryStream(),
searchKwargs: 2,
});
await retriever.addDocuments([
{ pageContent: "qux", metadata: {} },
{ pageContent: "quux", metadata: {} },
{ pageContent: "corge", metadata: {} },
]);
const query = "Test query";
const resultsDocs = await retriever.getRelevantDocuments(query);
const expected = [
{
pageContent: "qux",
metadata: {
[BUFFER_IDX]: 3,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(mockNow),
},
},
{
pageContent: "quux",
metadata: {
[BUFFER_IDX]: 4,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(mockNow),
},
},
{
pageContent: "corge",
metadata: {
[BUFFER_IDX]: 5,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(mockNow),
},
},
{
pageContent: "baz",
metadata: {
[BUFFER_IDX]: 2,
[LAST_ACCESSED_AT_KEY]: getSec(mockNow),
created_at: getSec(new Date("2023-04-18 11:00")),
},
},
];
// console.log(resultsDocs);
expect(resultsDocs).toStrictEqual(expected);
});
});
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/tests/multi_query.int.test.ts | import { expect, test } from "@jest/globals";
import { CohereEmbeddings } from "@langchain/cohere";
import { ChatOpenAI } from "@langchain/openai";
import { MemoryVectorStore } from "../../vectorstores/memory.js";
import { MultiQueryRetriever } from "../multi_query.js";
test("Should work with a question input", async () => {
const vectorstore = await MemoryVectorStore.fromTexts(
[
"Buildings are made out of brick",
"Buildings are made out of wood",
"Buildings are made out of stone",
"Cars are made out of metal",
"Cars are made out of plastic",
"mitochondria is the powerhouse of the cell",
"mitochondria is made of lipids",
],
[{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],
new CohereEmbeddings({ model: "embed-english-v3.0" })
);
const model = new ChatOpenAI({});
const retriever = MultiQueryRetriever.fromLLM({
llm: model,
retriever: vectorstore.asRetriever(),
verbose: true,
});
const query = "What are mitochondria made of?";
const retrievedDocs = await retriever.getRelevantDocuments(query);
expect(retrievedDocs[0].pageContent).toContain("mitochondria");
});
test("Should work with a keyword", async () => {
const vectorstore = await MemoryVectorStore.fromTexts(
[
"Buildings are made out of brick",
"Buildings are made out of wood",
"Buildings are made out of stone",
"Cars are made out of metal",
"Cars are made out of plastic",
"mitochondria is the powerhouse of the cell",
"mitochondria is made of lipids",
],
[{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],
new CohereEmbeddings({ model: "embed-english-v3.0" })
);
const model = new ChatOpenAI({});
const retriever = MultiQueryRetriever.fromLLM({
llm: model,
retriever: vectorstore.asRetriever(),
verbose: true,
});
const query = "cars";
const retrievedDocs = await retriever.getRelevantDocuments(query);
expect(retrievedDocs[0].pageContent).toContain("Cars");
});
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/tests/vectorstores.test.ts | import { Document } from "@langchain/core/documents";
import { FakeEmbeddings } from "@langchain/core/utils/testing";
import { MemoryVectorStore } from "../../vectorstores/memory.js";
test("Test Memory Retriever with Callback", async () => {
const pageContent = "Hello world";
const embeddings = new FakeEmbeddings();
const vectorStore = new MemoryVectorStore(embeddings);
expect(vectorStore).toBeDefined();
await vectorStore.addDocuments([
{ pageContent, metadata: { a: 1 } },
{ pageContent, metadata: { a: 1 } },
{ pageContent, metadata: { a: 1 } },
{ pageContent, metadata: { a: 1 } },
]);
const queryStr = "testing testing";
let startRun = 0;
let endRun = 0;
let startPromiseResolve: (v?: unknown) => void;
const startPromise = new Promise((resolve) => {
startPromiseResolve = resolve;
});
let endPromiseResolve: (v?: unknown) => void;
const endPromise = new Promise((resolve) => {
endPromiseResolve = resolve;
});
const retriever = vectorStore.asRetriever({
k: 1,
vectorStore,
callbacks: [
{
handleRetrieverStart: async (_, query) => {
expect(query).toBe(queryStr);
startRun += 1;
startPromiseResolve();
},
handleRetrieverEnd: async (documents) => {
expect(documents[0].pageContent).toBe(pageContent);
endRun += 1;
endPromiseResolve();
},
},
],
});
const results = await retriever.getRelevantDocuments(queryStr);
expect(results).toEqual([new Document({ metadata: { a: 1 }, pageContent })]);
await startPromise;
await endPromise;
expect(startRun).toBe(1);
expect(endRun).toBe(1);
});
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/tests/hyde.int.test.ts | import { expect, test } from "@jest/globals";
import { OpenAIEmbeddings, OpenAI } from "@langchain/openai";
import { Document } from "@langchain/core/documents";
import { MemoryVectorStore } from "../../vectorstores/memory.js";
import { HydeRetriever } from "../hyde.js";
test("Hyde retriever", async () => {
const embeddings = new OpenAIEmbeddings();
const vectorStore = new MemoryVectorStore(embeddings);
const llm = new OpenAI();
const retriever = new HydeRetriever({
vectorStore,
llm,
k: 1,
});
await vectorStore.addDocuments(
[
"My name is John.",
"My name is Bob.",
"My favourite food is pizza.",
"My favourite food is pasta.",
].map((pageContent) => new Document({ pageContent }))
);
const results = await retriever.getRelevantDocuments(
"What is my favourite food?"
);
expect(results.length).toBe(1);
// console.log(results);
});
test("Hyde retriever with default prompt template", async () => {
const embeddings = new OpenAIEmbeddings();
const vectorStore = new MemoryVectorStore(embeddings);
const llm = new OpenAI();
const retriever = new HydeRetriever({
vectorStore,
llm,
k: 1,
promptTemplate: "websearch",
});
await vectorStore.addDocuments(
[
"My name is John.",
"My name is Bob.",
"My favourite food is pizza.",
"My favourite food is pasta.",
].map((pageContent) => new Document({ pageContent }))
);
const results = await retriever.getRelevantDocuments(
"What is my favourite food?"
);
expect(results.length).toBe(1);
// console.log(results);
});
|
0 | lc_public_repos/langchainjs/langchain/src/retrievers | lc_public_repos/langchainjs/langchain/src/retrievers/tests/chain_extract.int.test.ts | import { test, expect } from "@jest/globals";
import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
import { LLMChain } from "../../chains/llm_chain.js";
import { StuffDocumentsChain } from "../../chains/combine_docs_chain.js";
import { ConversationalRetrievalQAChain } from "../../chains/conversational_retrieval_chain.js";
import { MemoryVectorStore } from "../../vectorstores/memory.js";
import { ContextualCompressionRetriever } from "../contextual_compression.js";
import { LLMChainExtractor } from "../document_compressors/chain_extract.js";
test("Test LLMChainExtractor", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const prompt = PromptTemplate.fromTemplate(
"Print {question}, and ignore {chat_history}"
);
const baseCompressor = LLMChainExtractor.fromLLM(model);
expect(baseCompressor).toBeDefined();
const retriever = new ContextualCompressionRetriever({
baseCompressor,
baseRetriever: await MemoryVectorStore.fromTexts(
["Hello world", "Bye bye", "hello nice world", "bye", "hi"],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
).then((vectorStore) => vectorStore.asRetriever()),
});
const llmChain = new LLMChain({ prompt, llm: model });
const combineDocsChain = new StuffDocumentsChain({
llmChain,
documentVariableName: "foo",
});
const chain = new ConversationalRetrievalQAChain({
retriever,
combineDocumentsChain: combineDocsChain,
questionGeneratorChain: llmChain,
});
const res = await chain.call({ question: "foo", chat_history: "bar" });
expect(res.text.length).toBeGreaterThan(0);
// console.log({ res });
});
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/document_transformers/openai_functions.ts | import { z } from "zod";
import {
zodToJsonSchema,
type JsonSchema7ObjectType,
} from "zod-to-json-schema";
import {
Document,
MappingDocumentTransformer,
} from "@langchain/core/documents";
import { ChatOpenAI } from "@langchain/openai";
import { BaseChain } from "../chains/base.js";
import {
TaggingChainOptions,
createTaggingChain,
} from "../chains/openai_functions/index.js";
/**
* A transformer that tags metadata to a document using a tagging chain.
*/
export class MetadataTagger extends MappingDocumentTransformer {
static lc_name() {
return "MetadataTagger";
}
protected taggingChain: BaseChain;
constructor(fields: { taggingChain: BaseChain }) {
super();
this.taggingChain = fields.taggingChain;
if (this.taggingChain.inputKeys.length !== 1) {
throw new Error(
"Invalid input chain. The input chain must have exactly one input."
);
}
if (this.taggingChain.outputKeys.length !== 1) {
throw new Error(
"Invalid input chain. The input chain must have exactly one output."
);
}
}
async _transformDocument(document: Document): Promise<Document> {
const taggingChainResponse = await this.taggingChain.call({
[this.taggingChain.inputKeys[0]]: document.pageContent,
});
const extractedMetadata =
taggingChainResponse[this.taggingChain.outputKeys[0]];
return new Document({
pageContent: document.pageContent,
metadata: { ...extractedMetadata, ...document.metadata },
});
}
}
export function createMetadataTagger(
schema: JsonSchema7ObjectType,
options: TaggingChainOptions & { llm?: ChatOpenAI }
) {
const { llm = new ChatOpenAI({ modelName: "gpt-3.5-turbo-0613" }), ...rest } =
options;
const taggingChain = createTaggingChain(schema, llm, rest);
return new MetadataTagger({ taggingChain });
}
export function createMetadataTaggerFromZod(
schema: z.AnyZodObject,
options: TaggingChainOptions & { llm?: ChatOpenAI }
) {
return createMetadataTagger(
zodToJsonSchema(schema) as JsonSchema7ObjectType,
options
);
}
|
0 | lc_public_repos/langchainjs/langchain/src/document_transformers | lc_public_repos/langchainjs/langchain/src/document_transformers/tests/openai_functions.int.test.ts | import { z } from "zod";
import { expect, test } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import { ChatOpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
import { createMetadataTaggerFromZod } from "../openai_functions.js";
const taggingChainTemplate = `Extract the desired information from the following passage.
Anonymous critics are actually Roger Ebert.
Passage:
{input}
`;
test("Test OpenAIFunctions MetadataTagger", async () => {
const zodSchema = z.object({
movie_title: z.string(),
critic: z.string(),
tone: z.enum(["positive", "negative"]),
rating: z
.optional(z.number())
.describe("The number of stars the critic rated the movie"),
});
const metadataTagger = createMetadataTaggerFromZod(zodSchema, {
llm: new ChatOpenAI({ modelName: "gpt-3.5-turbo" }),
prompt: PromptTemplate.fromTemplate(taggingChainTemplate),
});
const documents = [
new Document({
pageContent:
"Review of The Bee Movie\nBy Roger Ebert\nThis is the greatest movie ever made. 4 out of 5 stars.",
}),
new Document({
pageContent:
"Review of The Godfather\nBy Anonymous\n\nThis movie was super boring. 1 out of 5 stars.",
metadata: { reliable: false },
}),
];
const newDocuments = await metadataTagger.transformDocuments(documents);
// console.log(newDocuments);
expect(newDocuments.length).toBe(2);
expect(newDocuments[0].metadata.movie_title).toBe("The Bee Movie");
expect(newDocuments[1].metadata.movie_title).toBe("The Godfather");
});
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/document_loaders/base.ts | export * from "@langchain/core/document_loaders/base";
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders | lc_public_repos/langchainjs/langchain/src/document_loaders/fs/buffer.ts | import type { readFile as ReadFileT } from "node:fs/promises";
import { Document } from "@langchain/core/documents";
import { getEnv } from "@langchain/core/utils/env";
import { BaseDocumentLoader } from "../base.js";
/**
* Abstract class that extends the `BaseDocumentLoader` class. It
* represents a document loader that loads documents from a buffer. The
* `load()` method is implemented to read the buffer contents and metadata
* based on the type of `filePathOrBlob`, and then calls the `parse()`
* method to parse the buffer and return the documents.
*/
export abstract class BufferLoader extends BaseDocumentLoader {
constructor(public filePathOrBlob: string | Blob) {
super();
}
/**
* Abstract method that needs to be implemented by subclasses. It is used
* to parse the buffer and return the documents.
* @param raw The buffer to be parsed.
* @param metadata Metadata of the document.
* @returns Promise that resolves with an array of `Document` objects.
*/
protected abstract parse(
raw: Buffer,
metadata: Document["metadata"]
): Promise<Document[]>;
/**
* Method that reads the buffer contents and metadata based on the type of
* `filePathOrBlob`, and then calls the `parse()` method to parse the
* buffer and return the documents.
* @returns Promise that resolves with an array of `Document` objects.
*/
public async load(): Promise<Document[]> {
let buffer: Buffer;
let metadata: Record<string, string>;
if (typeof this.filePathOrBlob === "string") {
const { readFile } = await BufferLoader.imports();
buffer = await readFile(this.filePathOrBlob);
metadata = { source: this.filePathOrBlob };
} else {
buffer = await this.filePathOrBlob
.arrayBuffer()
.then((ab) => Buffer.from(ab));
metadata = { source: "blob", blobType: this.filePathOrBlob.type };
}
return this.parse(buffer, metadata);
}
/**
* Static method that imports the `readFile` function from the
* `fs/promises` module in Node.js. It is used to dynamically import the
* function when needed. If the import fails, it throws an error
* indicating that the `fs/promises` module is not available in the
* current environment.
* @returns Promise that resolves with an object containing the `readFile` function.
*/
static async imports(): Promise<{
readFile: typeof ReadFileT;
}> {
try {
const { readFile } = await import("node:fs/promises");
return { readFile };
} catch (e) {
console.error(e);
throw new Error(
`Failed to load fs/promises. TextLoader available only on environment 'node'. It appears you are running environment '${getEnv()}'. See https://<link to docs> for alternatives.`
);
}
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders | lc_public_repos/langchainjs/langchain/src/document_loaders/fs/multi_file.ts | import { extname, resolve } from "node:path";
import { stat } from "node:fs/promises";
import { Document } from "@langchain/core/documents";
import { BaseDocumentLoader } from "../base.js";
import { type LoadersMapping, UnknownHandling } from "./directory.js";
/**
* A document loader that loads documents from multiple files. It extends the
* `BaseDocumentLoader` class and implements the `load()` method.
* @example
* ```typescript
*
* const multiFileLoader = new MultiFileLoader(
* ["path/to/file1.pdf", "path/to/file2.txt"],
* {
* ".pdf": (path: string) => new PDFLoader(path),
* },
* );
*
* const docs = await multiFileLoader.load();
* console.log({ docs });
*
* ```
*/
export class MultiFileLoader extends BaseDocumentLoader {
constructor(
public filePaths: string[],
public loaders: LoadersMapping,
public unknown: UnknownHandling = UnknownHandling.Warn
) {
super();
if (Object.keys(loaders).length === 0) {
throw new Error("Must provide at least one loader");
}
for (const extension in loaders) {
if (Object.hasOwn(loaders, extension)) {
if (extension[0] !== ".") {
throw new Error(`Extension must start with a dot: ${extension}`);
}
}
}
}
/**
* Loads the documents from the provided file paths. It checks if the file
* is a directory and ignores it. If a file is a file, it checks if there
* is a corresponding loader function for the file extension in the `loaders`
* mapping. If there is, it loads the documents. If there is no
* corresponding loader function and `unknown` is set to `Warn`, it logs a
* warning message. If `unknown` is set to `Error`, it throws an error.
* @returns A promise that resolves to an array of loaded documents.
*/
public async load(): Promise<Document[]> {
const documents: Document[] = [];
for (const filePath of this.filePaths) {
const fullPath = resolve(filePath);
const fileStat = await stat(fullPath);
if (fileStat.isDirectory()) {
console.warn(`Ignoring directory: ${fullPath}`);
continue;
}
const loaderFactory = this.loaders[extname(fullPath)];
if (loaderFactory) {
const loader = loaderFactory(fullPath);
documents.push(...(await loader.load()));
} else {
switch (this.unknown) {
case UnknownHandling.Ignore:
break;
case UnknownHandling.Warn:
console.warn(`Unknown file type: ${fullPath}`);
break;
case UnknownHandling.Error:
throw new Error(`Unknown file type: ${fullPath}`);
default:
throw new Error(`Unknown unknown handling: ${this.unknown}`);
}
}
}
return documents;
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders | lc_public_repos/langchainjs/langchain/src/document_loaders/fs/text.ts | import type { readFile as ReadFileT } from "node:fs/promises";
import { Document } from "@langchain/core/documents";
import { getEnv } from "@langchain/core/utils/env";
import { BaseDocumentLoader } from "../base.js";
/**
* A class that extends the `BaseDocumentLoader` class. It represents a
* document loader that loads documents from a text file. The `load()`
* method is implemented to read the text from the file or blob, parse it
* using the `parse()` method, and create a `Document` instance for each
* parsed page. The metadata includes the source of the text (file path or
* blob) and, if there are multiple pages, the line number of each page.
* @example
* ```typescript
* const loader = new TextLoader("src/document_loaders/example_data/example.txt");
* const docs = await loader.load();
* ```
*/
export class TextLoader extends BaseDocumentLoader {
constructor(public filePathOrBlob: string | Blob) {
super();
}
/**
* A protected method that takes a `raw` string as a parameter and returns
* a promise that resolves to an array containing the raw text as a single
* element.
* @param raw The raw text to be parsed.
* @returns A promise that resolves to an array containing the raw text as a single element.
*/
protected async parse(raw: string): Promise<string[]> {
return [raw];
}
/**
* A method that loads the text file or blob and returns a promise that
* resolves to an array of `Document` instances. It reads the text from
* the file or blob using the `readFile` function from the
* `node:fs/promises` module or the `text()` method of the blob. It then
* parses the text using the `parse()` method and creates a `Document`
* instance for each parsed page. The metadata includes the source of the
* text (file path or blob) and, if there are multiple pages, the line
* number of each page.
* @returns A promise that resolves to an array of `Document` instances.
*/
public async load(): Promise<Document[]> {
let text: string;
let metadata: Record<string, string>;
if (typeof this.filePathOrBlob === "string") {
const { readFile } = await TextLoader.imports();
text = await readFile(this.filePathOrBlob, "utf8");
metadata = { source: this.filePathOrBlob };
} else {
text = await this.filePathOrBlob.text();
metadata = { source: "blob", blobType: this.filePathOrBlob.type };
}
const parsed = await this.parse(text);
parsed.forEach((pageContent, i) => {
if (typeof pageContent !== "string") {
throw new Error(
`Expected string, at position ${i} got ${typeof pageContent}`
);
}
});
return parsed.map(
(pageContent, i) =>
new Document({
pageContent,
metadata:
parsed.length === 1
? metadata
: {
...metadata,
line: i + 1,
},
})
);
}
/**
* A static method that imports the `readFile` function from the
* `node:fs/promises` module. It is used to dynamically import the
* function when needed. If the import fails, it throws an error
* indicating that the `fs/promises` module is not available in the
* current environment.
* @returns A promise that resolves to an object containing the `readFile` function from the `node:fs/promises` module.
*/
static async imports(): Promise<{
readFile: typeof ReadFileT;
}> {
try {
const { readFile } = await import("node:fs/promises");
return { readFile };
} catch (e) {
console.error(e);
throw new Error(
`Failed to load fs/promises. TextLoader available only on environment 'node'. It appears you are running environment '${getEnv()}'. See https://<link to docs> for alternatives.`
);
}
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders | lc_public_repos/langchainjs/langchain/src/document_loaders/fs/json.ts | import jsonpointer from "jsonpointer";
import { TextLoader } from "./text.js";
/**
* Class that extends the `TextLoader` class. It represents a document
* loader that loads documents from JSON files. It has a constructor that
* takes a `filePathOrBlob` parameter representing the path to the JSON
* file or a `Blob` object, and an optional `pointers` parameter that
* specifies the JSON pointers to extract.
*/
export class JSONLoader extends TextLoader {
public pointers: string[];
constructor(filePathOrBlob: string | Blob, pointers: string | string[] = []) {
super(filePathOrBlob);
this.pointers = Array.isArray(pointers) ? pointers : [pointers];
}
/**
* Method that takes a `raw` string as a parameter and returns a promise
* that resolves to an array of strings. It parses the raw JSON string and
* extracts the values based on the specified JSON pointers. If no JSON
* pointers are specified, it extracts all the strings from the JSON
* object.
* @param raw The raw JSON string to parse.
* @returns A promise that resolves to an array of strings.
*/
protected async parse(raw: string): Promise<string[]> {
const json = JSON.parse(raw.trim());
// If there is no pointers specified we extract all strings we found
const extractAllStrings = !(this.pointers.length > 0);
const compiledPointers = this.pointers.map((pointer) =>
jsonpointer.compile(pointer)
);
return this.extractArrayStringsFromObject(
json,
compiledPointers,
extractAllStrings
);
}
/**
* If JSON pointers are specified, return all strings below any of them
* and exclude all other nodes expect if they match a JSON pointer (to allow to extract strings from different levels)
*
* If no JSON pointer is specified then return all string in the object
*/
private extractArrayStringsFromObject(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
json: any,
pointers: jsonpointer[],
extractAllStrings = false,
keyHasBeenFound = false
): string[] {
if (!json) {
return [];
}
if (typeof json === "string" && extractAllStrings) {
return [json];
}
if (Array.isArray(json) && extractAllStrings) {
let extractedString: string[] = [];
for (const element of json) {
extractedString = extractedString.concat(
this.extractArrayStringsFromObject(element, pointers, true)
);
}
return extractedString;
}
if (typeof json === "object") {
if (extractAllStrings) {
return this.extractArrayStringsFromObject(
Object.values(json),
pointers,
true
);
}
const targetedEntries = this.getTargetedEntries(json, pointers);
const thisLevelEntries = Object.values(json) as object[];
const notTargetedEntries = thisLevelEntries.filter(
(entry: object) => !targetedEntries.includes(entry)
);
let extractedStrings: string[] = [];
// If we found a targeted entry, we extract all strings from it
if (targetedEntries.length > 0) {
for (const oneEntry of targetedEntries) {
extractedStrings = extractedStrings.concat(
this.extractArrayStringsFromObject(oneEntry, pointers, true, true)
);
}
for (const oneEntry of notTargetedEntries) {
extractedStrings = extractedStrings.concat(
this.extractArrayStringsFromObject(oneEntry, pointers, false, true)
);
}
} else if (extractAllStrings || !keyHasBeenFound) {
for (const oneEntry of notTargetedEntries) {
extractedStrings = extractedStrings.concat(
this.extractArrayStringsFromObject(
oneEntry,
pointers,
extractAllStrings
)
);
}
}
return extractedStrings;
}
return [];
}
/**
* Method that takes a `json` object and an array of `pointers` as
* parameters and returns an array of targeted entries. It iterates over
* the JSON pointers and uses the `jsonpointer.get()` function to get the
* targeted entries from the JSON object.
* @param json The JSON object to get targeted entries from.
* @param pointers The JSON pointers to get targeted entries.
* @returns An array of targeted entries.
*/
private getTargetedEntries(json: object, pointers: jsonpointer[]): object[] {
const targetEntries = [];
for (const pointer of pointers) {
const targetedEntry = pointer.get(json);
if (targetedEntry) {
targetEntries.push(targetedEntry);
}
}
return targetEntries;
}
}
/**
* Class that extends the `TextLoader` class. It represents a document
* loader that loads documents from JSON Lines files. It has a constructor
* that takes a `filePathOrBlob` parameter representing the path to the
* JSON Lines file or a `Blob` object, and a `pointer` parameter that
* specifies the JSON pointer to extract.
*/
export class JSONLinesLoader extends TextLoader {
constructor(filePathOrBlob: string | Blob, public pointer: string) {
super(filePathOrBlob);
}
/**
* Method that takes a `raw` string as a parameter and returns a promise
* that resolves to an array of strings. It parses the raw JSON Lines
* string, splits it into lines, parses each line as JSON, and extracts
* the values based on the specified JSON pointer.
* @param raw The raw JSON Lines string to parse.
* @returns A promise that resolves to an array of strings.
*/
protected async parse(raw: string): Promise<string[]> {
const lines = raw.split("\n");
const jsons = lines
.map((line) => line.trim())
.filter(Boolean)
.map((line) => JSON.parse(line));
const pointer = jsonpointer.compile(this.pointer);
return jsons.map((json) => pointer.get(json));
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders | lc_public_repos/langchainjs/langchain/src/document_loaders/fs/directory.ts | import type { extname as ExtnameT, resolve as ResolveT } from "node:path";
import type { readdir as ReaddirT } from "node:fs/promises";
import { Document } from "@langchain/core/documents";
import { getEnv } from "@langchain/core/utils/env";
import { BaseDocumentLoader } from "../base.js";
// TypeScript enums are not tree-shakeable, so doing this instead
// See https://bargsten.org/jsts/enums/
export const UnknownHandling = {
Ignore: "ignore",
Warn: "warn",
Error: "error",
} as const;
// eslint-disable-next-line @typescript-eslint/no-redeclare
/**
* An enumeration of possible handling strategies for unknown file types.
*/
export type UnknownHandling =
(typeof UnknownHandling)[keyof typeof UnknownHandling];
/**
* A mapping of file extensions to loader functions. Each loader function
* takes a file path as a parameter and returns a `BaseDocumentLoader`
* instance.
*/
export interface LoadersMapping {
[extension: string]: (filePath: string) => BaseDocumentLoader;
}
/**
* A document loader that loads documents from a directory. It extends the
* `BaseDocumentLoader` class and implements the `load()` method.
* @example
* ```typescript
*
* const directoryLoader = new DirectoryLoader(
* "src/document_loaders/example_data/",
* {
* ".pdf": (path: string) => new PDFLoader(path),
* },
* );
*
* const docs = await directoryLoader.load();
* console.log({ docs });
*
* ```
*/
export class DirectoryLoader extends BaseDocumentLoader {
constructor(
public directoryPath: string,
public loaders: LoadersMapping,
public recursive: boolean = true,
public unknown: UnknownHandling = UnknownHandling.Warn
) {
super();
if (Object.keys(loaders).length === 0) {
throw new Error("Must provide at least one loader");
}
for (const extension in loaders) {
if (Object.hasOwn(loaders, extension)) {
if (extension[0] !== ".") {
throw new Error(`Extension must start with a dot: ${extension}`);
}
}
}
}
/**
* Loads the documents from the directory. If a file is a directory and
* `recursive` is `true`, it recursively loads documents from the
* subdirectory. If a file is a file, it checks if there is a
* corresponding loader function for the file extension in the `loaders`
* mapping. If there is, it loads the documents. If there is no
* corresponding loader function and `unknown` is set to `Warn`, it logs a
* warning message. If `unknown` is set to `Error`, it throws an error.
* @returns A promise that resolves to an array of loaded documents.
*/
public async load(): Promise<Document[]> {
const { readdir, extname, resolve } = await DirectoryLoader.imports();
const files = await readdir(this.directoryPath, { withFileTypes: true });
const documents: Document[] = [];
for (const file of files) {
const fullPath = resolve(this.directoryPath, file.name);
if (file.isDirectory()) {
if (this.recursive) {
const loader = new DirectoryLoader(
fullPath,
this.loaders,
this.recursive,
this.unknown
);
documents.push(...(await loader.load()));
}
} else {
// I'm aware some things won't be files,
// but they will be caught by the "unknown" handling below.
const loaderFactory = this.loaders[extname(file.name)];
if (loaderFactory) {
const loader = loaderFactory(fullPath);
documents.push(...(await loader.load()));
} else {
switch (this.unknown) {
case UnknownHandling.Ignore:
break;
case UnknownHandling.Warn:
console.warn(`Unknown file type: ${file.name}`);
break;
case UnknownHandling.Error:
throw new Error(`Unknown file type: ${file.name}`);
default:
throw new Error(`Unknown unknown handling: ${this.unknown}`);
}
}
}
}
return documents;
}
/**
* Imports the necessary functions from the `node:path` and
* `node:fs/promises` modules. It is used to dynamically import the
* functions when needed. If the import fails, it throws an error
* indicating that the modules failed to load.
* @returns A promise that resolves to an object containing the imported functions.
*/
static async imports(): Promise<{
readdir: typeof ReaddirT;
extname: typeof ExtnameT;
resolve: typeof ResolveT;
}> {
try {
const { extname, resolve } = await import("node:path");
const { readdir } = await import("node:fs/promises");
return { readdir, extname, resolve };
} catch (e) {
console.error(e);
throw new Error(
`Failed to load fs/promises. DirectoryLoader available only on environment 'node'. It appears you are running environment '${getEnv()}'. See https://<link to docs> for alternatives.`
);
}
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders | lc_public_repos/langchainjs/langchain/src/document_loaders/tests/jsonl.test.ts | import * as url from "node:url";
import * as path from "node:path";
import { test, expect } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import { JSONLinesLoader } from "../fs/json.js";
test("Test JSON loader from file", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.jsonl"
);
const loader = new JSONLinesLoader(filePath, "/html");
const docs = await loader.load();
expect(docs.length).toBe(32);
expect(docs[0]).toEqual(
new Document({
metadata: { source: filePath, line: 1 },
pageContent:
"<i>Corruption discovered at the core of the Banking Clan!</i>",
})
);
});
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders | lc_public_repos/langchainjs/langchain/src/document_loaders/tests/jsonl-blob.test.ts | import * as url from "node:url";
import * as path from "node:path";
import * as fs from "node:fs/promises";
import { test, expect } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import { JSONLinesLoader } from "../fs/json.js";
test("Test JSONL loader from blob", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.jsonl"
);
const loader = new JSONLinesLoader(
new Blob([await fs.readFile(filePath)], { type: "application/jsonl+json" }),
"/html"
);
const docs = await loader.load();
expect(docs.length).toBe(32);
expect(docs[0]).toEqual(
new Document({
metadata: { source: "blob", blobType: "application/jsonl+json", line: 1 },
pageContent:
"<i>Corruption discovered at the core of the Banking Clan!</i>",
})
);
});
test("Test JSONL loader from blob", async () => {
const loader = new JSONLinesLoader(
new Blob(
[
`{"html": "This is a sentence."}
{"html": "This is another sentence."}`,
],
{ type: "application/jsonl+json" }
),
"/html"
);
const docs = await loader.load();
expect(docs.length).toBe(2);
expect(docs[0]).toMatchInlineSnapshot(`
Document {
"id": undefined,
"metadata": {
"blobType": "application/jsonl+json",
"line": 1,
"source": "blob",
},
"pageContent": "This is a sentence.",
}
`);
expect(docs[1]).toMatchInlineSnapshot(`
Document {
"id": undefined,
"metadata": {
"blobType": "application/jsonl+json",
"line": 2,
"source": "blob",
},
"pageContent": "This is another sentence.",
}
`);
});
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders | lc_public_repos/langchainjs/langchain/src/document_loaders/tests/json.test.ts | import * as url from "node:url";
import * as path from "node:path";
import { test, expect } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import { JSONLoader } from "../fs/json.js";
test("Test JSON loader", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.json"
);
const loader = new JSONLoader(filePath);
const docs = await loader.load();
expect(docs.length).toBe(32);
expect(docs[0]).toEqual(
new Document({
metadata: { source: filePath, line: 1 },
pageContent:
"<i>Corruption discovered at the core of the Banking Clan!</i>",
})
);
});
test("Test JSON loader for complex json without keys", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/complex.json"
);
const loader = new JSONLoader(filePath);
const docs = await loader.load();
expect(docs.length).toBe(10);
expect(docs[0]).toEqual(
new Document({
metadata: { source: filePath, line: 1 },
pageContent: "BD 2023 SUMMER",
})
);
expect(docs[1]).toEqual(
new Document({
metadata: { source: filePath, line: 2 },
pageContent: "LinkedIn Job",
})
);
expect(docs[2]).toEqual(
new Document({
metadata: { source: filePath, line: 3 },
pageContent: "IMPORTANT",
})
);
});
test("Test JSON loader for complex json with one key that points nothing", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/complex.json"
);
const loader = new JSONLoader(filePath, ["/plop"]);
const docs = await loader.load();
expect(docs.length).toBe(0);
});
test("Test JSON loader for complex json with one key that exists", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/complex.json"
);
const loader = new JSONLoader(filePath, ["/from"]);
const docs = await loader.load();
expect(docs.length).toBe(2);
expect(docs[1]).toEqual(
new Document({
metadata: { source: filePath, line: 2 },
pageContent: "LinkedIn Job2",
})
);
});
test("Test JSON loader for complex json with two keys that exists", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/complex.json"
);
const loader = new JSONLoader(filePath, ["/from", "/labels"]);
const docs = await loader.load();
expect(docs.length).toBe(6);
expect(docs[3]).toEqual(
new Document({
metadata: { source: filePath, line: 4 },
pageContent: "INBOX",
})
);
});
test("Test JSON loader for complex json with two existing keys on different level", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/complex.json"
);
const loader = new JSONLoader(filePath, ["/from", "/surname"]);
const docs = await loader.load();
expect(docs.length).toBe(3);
expect(docs[2]).toEqual(
new Document({
metadata: { source: filePath, line: 3 },
pageContent: "bob",
})
);
});
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders | lc_public_repos/langchainjs/langchain/src/document_loaders/tests/directory.test.ts | import * as url from "node:url";
import * as path from "node:path";
import { test, expect } from "@jest/globals";
import { DirectoryLoader, UnknownHandling } from "../fs/directory.js";
import { TextLoader } from "../fs/text.js";
import { JSONLoader } from "../fs/json.js";
test("Test Directory loader", async () => {
const directoryPath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data"
);
const loader = new DirectoryLoader(
directoryPath,
{
".txt": (p) => new TextLoader(p),
".json": (p) => new JSONLoader(p),
},
false,
UnknownHandling.Ignore
);
const docs = await loader.load();
expect(docs.length).toBe(43);
expect(docs.map((d) => d.metadata.source).sort()).toEqual([
// JSON
...Array.from({ length: 32 }, (_) =>
path.resolve(
directoryPath,
"Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.json"
)
),
...Array.from({ length: 10 }, (_) =>
path.resolve(directoryPath, "complex.json")
),
// TXT
path.resolve(directoryPath, "example.txt"),
]);
});
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders | lc_public_repos/langchainjs/langchain/src/document_loaders/tests/json-blob.test.ts | import * as url from "node:url";
import * as path from "node:path";
import * as fs from "node:fs/promises";
import { test, expect } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import { JSONLoader } from "../fs/json.js";
test("Test JSON loader from blob", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.json"
);
const loader = new JSONLoader(
new Blob([await fs.readFile(filePath)], { type: "application/json" })
);
const docs = await loader.load();
expect(docs.length).toBe(32);
expect(docs[0]).toEqual(
new Document({
metadata: { source: "blob", blobType: "application/json", line: 1 },
pageContent:
"<i>Corruption discovered at the core of the Banking Clan!</i>",
})
);
});
test("Test JSON loader from blob", async () => {
const loader = new JSONLoader(
new Blob(
[
`{
"texts": ["This is a sentence.", "This is another sentence."]
}`,
],
{ type: "application/json" }
)
);
const docs = await loader.load();
expect(docs.length).toBe(2);
expect(docs[0]).toMatchInlineSnapshot(`
Document {
"id": undefined,
"metadata": {
"blobType": "application/json",
"line": 1,
"source": "blob",
},
"pageContent": "This is a sentence.",
}
`);
expect(docs[1]).toMatchInlineSnapshot(`
Document {
"id": undefined,
"metadata": {
"blobType": "application/json",
"line": 2,
"source": "blob",
},
"pageContent": "This is another sentence.",
}
`);
});
test("Test JSON loader from blob", async () => {
const loader = new JSONLoader(
new Blob(
[
`{
"1": {
"body": "BD 2023 SUMMER",
"from": "LinkedIn Job",
"labels": ["IMPORTANT", "CATEGORY_UPDATES", "INBOX"]
},
"2": {
"body": "Intern, Treasury and other roles are available",
"from": "LinkedIn Job2",
"labels": ["IMPORTANT"],
"other": {
"name": "plop",
"surname": "bob"
}
}
}`,
],
{ type: "application/json" }
)
);
const docs = await loader.load();
expect(docs.length).toBe(10);
expect(docs[0]).toMatchInlineSnapshot(`
Document {
"id": undefined,
"metadata": {
"blobType": "application/json",
"line": 1,
"source": "blob",
},
"pageContent": "BD 2023 SUMMER",
}
`);
expect(docs[1]).toMatchInlineSnapshot(`
Document {
"id": undefined,
"metadata": {
"blobType": "application/json",
"line": 2,
"source": "blob",
},
"pageContent": "LinkedIn Job",
}
`);
});
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders | lc_public_repos/langchainjs/langchain/src/document_loaders/tests/multi_file.test.ts | import * as url from "node:url";
import * as path from "node:path";
import { test, expect } from "@jest/globals";
import { MultiFileLoader } from "../fs/multi_file.js";
import { TextLoader } from "../fs/text.js";
import { JSONLoader } from "../fs/json.js";
import { UnknownHandling } from "../fs/directory.js";
test("Test MultiFileLoader", async () => {
const baseDirectory = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data"
);
const filePaths = [
path.resolve(
baseDirectory,
"Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.json"
),
path.resolve(baseDirectory, "complex.json"),
path.resolve(baseDirectory, "example.txt"),
];
const loader = new MultiFileLoader(
filePaths,
{
".txt": (p) => new TextLoader(p),
".json": (p) => new JSONLoader(p),
},
UnknownHandling.Ignore
);
const docs = await loader.load();
expect(docs.length).toBe(43);
const expectedSources = [
// JSON
...Array.from({ length: 32 }, (_) =>
path.resolve(
baseDirectory,
"Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.json"
)
),
...Array.from({ length: 10 }, (_) =>
path.resolve(baseDirectory, "complex.json")
),
// TXT
path.resolve(baseDirectory, "example.txt"),
];
expect(docs.map((d) => d.metadata.source).sort()).toEqual(expectedSources);
});
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders | lc_public_repos/langchainjs/langchain/src/document_loaders/tests/text-blob.test.ts | import { test, expect } from "@jest/globals";
import { TextLoader } from "../fs/text.js";
test("Test Text loader from blob", async () => {
const loader = new TextLoader(
new Blob(["Hello, world!"], { type: "text/plain" })
);
const docs = await loader.load();
expect(docs.length).toBe(1);
expect(docs[0].pageContent).toBe("Hello, world!");
expect(docs[0].metadata).toMatchInlineSnapshot(`
{
"blobType": "text/plain",
"source": "blob",
}
`);
});
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders | lc_public_repos/langchainjs/langchain/src/document_loaders/tests/text.test.ts | import { test, expect } from "@jest/globals";
import * as url from "node:url";
import * as path from "node:path";
import { TextLoader } from "../fs/text.js";
test("Test Text loader from file", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/example.txt"
);
const loader = new TextLoader(filePath);
const docs = await loader.load();
expect(docs.length).toBe(1);
expect(docs[0].pageContent).toMatchInlineSnapshot(`
"Foo
Bar
Baz
"
`);
expect(docs[0].metadata).toMatchInlineSnapshot(`
{
"source": "${filePath}",
}
`);
});
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders/tests | lc_public_repos/langchainjs/langchain/src/document_loaders/tests/example_data/complex.json | {
"1": {
"body": "BD 2023 SUMMER",
"from": "LinkedIn Job",
"labels": ["IMPORTANT", "CATEGORY_UPDATES", "INBOX"]
},
"2": {
"body": "Intern, Treasury and other roles are available",
"from": "LinkedIn Job2",
"labels": ["IMPORTANT"],
"other": {
"name": "plop",
"surname": "bob"
}
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders/tests | lc_public_repos/langchainjs/langchain/src/document_loaders/tests/example_data/example.txt | Foo
Bar
Baz
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders/tests | lc_public_repos/langchainjs/langchain/src/document_loaders/tests/example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.json | [
"<i>Corruption discovered at the core of the Banking Clan!</i>",
"<i>Reunited, Rush Clovis and Senator Amidala</i>",
"<i>discover the full extent of the deception.</i>",
"<i>Anakin Skywalker is sent to the rescue!</i>",
"<i>He refuses to trust Clovis and asks Padm not to work with him.</i>",
"<i>Determined to save the banks, she refuses her husband's request,</i>",
"<i>throwing their relationship into turmoil.</i>",
"<i>Voted for by both the Separatists and the Republic,</i>",
"<i>Rush Clovis is elected new leader of the Galactic Banking Clan.</i>",
"<i>Now, all attention is focused on Scipio</i>",
"<i>as the important transfer of power begins.</i>",
"Welcome back to Scipio, Rush Clovis.",
"Our Separatist government has great hopes for you.",
"Thank you, Senator.",
"Only you and Senator Amidala",
"will be allowed to monitor the exchange proceedings.",
"No forces on either side",
"will be allowed into the Neutral Zone.",
"Senator Amidala, we will be right here",
"if you should need us.",
"Thank you, Commander.",
"It is with great disappointment",
"that I implement the following verdict.",
"By decree of the Muun people,",
"the five representatives standing before me",
"are found guilty of embezzlement.",
"They shall be imprisoned forthwith,",
"and control of the banks shall transfer immediately",
"to Rush Clovis",
"under the guidance of the Muun government.",
"We are grateful to you, Clovis,",
"for everything you have done for the Muun people."
]
|
0 | lc_public_repos/langchainjs/langchain/src/document_loaders/tests | lc_public_repos/langchainjs/langchain/src/document_loaders/tests/example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.jsonl | { "id": 1, "timestamp": "00:00:17,580 --> 00:00:21,920", "html": "<i>Corruption discovered at the core of the Banking Clan!</i>" }
{ "id": 2, "timestamp": "00:00:21,950 --> 00:00:24,620", "html": "<i>Reunited, Rush Clovis and Senator Amidala</i>" }
{ "id": 3, "timestamp": "00:00:24,660 --> 00:00:27,830", "html": "<i>discover the full extent of the deception.</i>" }
{ "id": 4, "timestamp": "00:00:27,870 --> 00:00:30,960", "html": "<i>Anakin Skywalker is sent to the rescue!</i>" }
{ "id": 5, "timestamp": "00:00:31,000 --> 00:00:35,050", "html": "<i>He refuses to trust Clovis and asks Padm not to work with him.</i>" }
{ "id": 6, "timestamp": "00:00:35,090 --> 00:00:39,050", "html": "<i>Determined to save the banks, she refuses her husband's request,</i>" }
{ "id": 7, "timestamp": "00:00:39,090 --> 00:00:42,800", "html": "<i>throwing their relationship into turmoil.</i>" }
{ "id": 8, "timestamp": "00:00:42,840 --> 00:00:45,890", "html": "<i>Voted for by both the Separatists and the Republic,</i>" }
{ "id": 9, "timestamp": "00:00:45,930 --> 00:00:50,260", "html": "<i>Rush Clovis is elected new leader of the Galactic Banking Clan.</i>" }
{ "id": 10, "timestamp": "00:00:50,310 --> 00:00:53,320", "html": "<i>Now, all attention is focused on Scipio</i>" }
{ "id": 11, "timestamp": "00:00:53,350 --> 00:00:56,350", "html": "<i>as the important transfer of power begins.</i>" }
{ "id": 12, "timestamp": "00:01:20,410 --> 00:01:24,330", "html": "Welcome back to Scipio, Rush Clovis." }
{ "id": 13, "timestamp": "00:01:24,370 --> 00:01:27,240", "html": "Our Separatist government has great hopes for you." }
{ "id": 14, "timestamp": "00:01:27,290 --> 00:01:30,080", "html": "Thank you, Senator." }
{ "id": 15, "timestamp": "00:01:30,120 --> 00:01:31,750", "html": "Only you and Senator Amidala" }
{ "id": 16, "timestamp": "00:01:31,790 --> 00:01:34,330", "html": "will be allowed to monitor the exchange proceedings." }
{ "id": 17, "timestamp": "00:01:34,380 --> 00:01:36,050", "html": "No forces on either side" }
{ "id": 18, "timestamp": "00:01:36,080 --> 00:01:38,540", "html": "will be allowed into the Neutral Zone." }
{ "id": 19, "timestamp": "00:01:38,590 --> 00:01:40,750", "html": "Senator Amidala, we will be right here" }
{ "id": 20, "timestamp": "00:01:40,800 --> 00:01:41,850", "html": "if you should need us." }
{ "id": 21, "timestamp": "00:01:41,880 --> 00:01:43,210", "html": "Thank you, Commander." }
{ "id": 22, "timestamp": "00:02:06,600 --> 00:02:09,190", "html": "It is with great disappointment" }
{ "id": 23, "timestamp": "00:02:09,230 --> 00:02:13,020", "html": "that I implement the following verdict." }
{ "id": 24, "timestamp": "00:02:13,070 --> 00:02:15,490", "html": "By decree of the Muun people," }
{ "id": 25, "timestamp": "00:02:15,530 --> 00:02:18,570", "html": "the five representatives standing before me" }
{ "id": 26, "timestamp": "00:02:18,610 --> 00:02:21,280", "html": "are found guilty of embezzlement." }
{ "id": 27, "timestamp": "00:02:21,320 --> 00:02:24,450", "html": "They shall be imprisoned forthwith," }
{ "id": 28, "timestamp": "00:02:24,490 --> 00:02:27,660", "html": "and control of the banks shall transfer immediately" }
{ "id": 29, "timestamp": "00:02:27,700 --> 00:02:29,580", "html": "to Rush Clovis" }
{ "id": 30, "timestamp": "00:02:29,620 --> 00:02:33,080", "html": "under the guidance of the Muun government." }
{ "id": 31, "timestamp": "00:02:41,210 --> 00:02:43,250", "html": "We are grateful to you, Clovis," }
{ "id": 32, "timestamp": "00:02:43,290 --> 00:02:46,630", "html": "for everything you have done for the Muun people." } |
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/callbacks/promises.ts | export * from "@langchain/core/callbacks/promises";
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/callbacks/index.ts | export { type Run, type RunType, BaseTracer } from "./handlers/tracer.js";
export { ConsoleCallbackHandler } from "./handlers/console.js";
export { RunCollectorCallbackHandler } from "./handlers/run_collector.js";
export { LangChainTracer } from "./handlers/tracer_langchain.js";
export { awaitAllCallbacks, consumeCallback } from "./promises.js";
|
0 | lc_public_repos/langchainjs/langchain/src/callbacks | lc_public_repos/langchainjs/langchain/src/callbacks/handlers/tracer.ts | export * from "@langchain/core/tracers/base";
|
0 | lc_public_repos/langchainjs/langchain/src/callbacks | lc_public_repos/langchainjs/langchain/src/callbacks/handlers/tracer_langchain.ts | export * from "@langchain/core/tracers/tracer_langchain";
|
0 | lc_public_repos/langchainjs/langchain/src/callbacks | lc_public_repos/langchainjs/langchain/src/callbacks/handlers/console.ts | export * from "@langchain/core/tracers/console";
|
0 | lc_public_repos/langchainjs/langchain/src/callbacks | lc_public_repos/langchainjs/langchain/src/callbacks/handlers/run_collector.ts | export * from "@langchain/core/tracers/run_collector";
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/chains/retrieval.ts | import type { BaseRetrieverInterface } from "@langchain/core/retrievers";
import {
type Runnable,
RunnableSequence,
type RunnableInterface,
RunnablePassthrough,
} from "@langchain/core/runnables";
import type { BaseMessage } from "@langchain/core/messages";
import type { DocumentInterface, Document } from "@langchain/core/documents";
/**
* Parameters for the createRetrievalChain method.
*/
export type CreateRetrievalChainParams<RunOutput> = {
/**
* Retriever-like object that returns list of documents. Should
* either be a subclass of BaseRetriever or a Runnable that returns
* a list of documents. If a subclass of BaseRetriever, then it
* is expected that an `input` key be passed in - this is what
* is will be used to pass into the retriever. If this is NOT a
* subclass of BaseRetriever, then all the inputs will be passed
* into this runnable, meaning that runnable should take a object
* as input.
*/
retriever:
| BaseRetrieverInterface
// eslint-disable-next-line @typescript-eslint/no-explicit-any
| RunnableInterface<Record<string, any>, DocumentInterface[]>;
/**
* Runnable that takes inputs and produces a string output.
* The inputs to this will be any original inputs to this chain, a new
* context key with the retrieved documents, and chat_history (if not present
* in the inputs) with a value of `[]` (to easily enable conversational
* retrieval).
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
combineDocsChain: RunnableInterface<Record<string, any>, RunOutput>;
};
function isBaseRetriever(x: unknown): x is BaseRetrieverInterface {
return (
!!x &&
typeof (x as BaseRetrieverInterface).getRelevantDocuments === "function"
);
}
/**
* Create a retrieval chain that retrieves documents and then passes them on.
* @param {CreateRetrievalChainParams} params A params object
* containing a retriever and a combineDocsChain.
* @returns An LCEL Runnable which returns a an object
* containing at least `context` and `answer` keys.
* @example
* ```typescript
* // yarn add langchain @langchain/openai
*
* import { ChatOpenAI } from "@langchain/openai";
* import { pull } from "langchain/hub";
* import { createRetrievalChain } from "langchain/chains/retrieval";
* import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
*
* const retrievalQAChatPrompt = await pull("langchain-ai/retrieval-qa-chat");
* const llm = new ChatOpenAI({});
* const retriever = ...
* const combineDocsChain = await createStuffDocumentsChain(...);
* const retrievalChain = await createRetrievalChain({
* retriever,
* combineDocsChain,
* });
* const response = await chain.invoke({ input: "..." });
* ```
*/
export async function createRetrievalChain<RunOutput>({
retriever,
combineDocsChain,
}: CreateRetrievalChainParams<RunOutput>): Promise<
Runnable<
{ input: string; chat_history?: BaseMessage[] | string } & {
[key: string]: unknown;
},
{ context: Document[]; answer: RunOutput } & { [key: string]: unknown }
>
> {
let retrieveDocumentsChain: Runnable<{ input: string }, DocumentInterface[]>;
if (isBaseRetriever(retriever)) {
retrieveDocumentsChain = RunnableSequence.from([
(input) => input.input,
retriever,
]);
} else {
// TODO: Fix typing by adding withConfig to core RunnableInterface
retrieveDocumentsChain = retriever as Runnable;
}
const retrievalChain = RunnableSequence.from<{
input: string;
chat_history?: BaseMessage[] | string;
}>([
RunnablePassthrough.assign({
context: retrieveDocumentsChain.withConfig({
runName: "retrieve_documents",
}),
chat_history: (input) => input.chat_history ?? [],
}),
RunnablePassthrough.assign({
answer: combineDocsChain,
}),
]).withConfig({ runName: "retrieval_chain" });
return retrievalChain;
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/chains/openai_moderation.ts | import { type ClientOptions, OpenAIClient } from "@langchain/openai";
import { ChainValues } from "@langchain/core/utils/types";
import {
AsyncCaller,
AsyncCallerParams,
} from "@langchain/core/utils/async_caller";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { BaseChain, ChainInputs } from "./base.js";
/**
* Interface for the input parameters of the OpenAIModerationChain class.
*/
export interface OpenAIModerationChainInput
extends ChainInputs,
AsyncCallerParams {
apiKey?: string;
/** @deprecated Use "apiKey" instead. */
openAIApiKey?: string;
openAIOrganization?: string;
throwError?: boolean;
configuration?: ClientOptions;
}
/**
* Class representing a chain for moderating text using the OpenAI
* Moderation API. It extends the BaseChain class and implements the
* OpenAIModerationChainInput interface.
* @example
* ```typescript
* const moderation = new ChatOpenAIModerationChain({ throwError: true });
*
* const badString = "Bad naughty words from user";
*
* try {
* const { output: moderatedContent, results } = await moderation.call({
* input: badString,
* });
*
* if (results[0].category_scores["harassment/threatening"] > 0.01) {
* throw new Error("Harassment detected!");
* }
*
* const model = new OpenAI({ temperature: 0 });
* const promptTemplate = "Hello, how are you today {person}?";
* const prompt = new PromptTemplate({
* template: promptTemplate,
* inputVariables: ["person"],
* });
* const chain = new LLMChain({ llm: model, prompt });
* const response = await chain.call({ person: moderatedContent });
* console.log({ response });
* } catch (error) {
* console.error("Naughty words detected!");
* }
* ```
*/
export class OpenAIModerationChain
extends BaseChain
implements OpenAIModerationChainInput
{
static lc_name() {
return "OpenAIModerationChain";
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
openAIApiKey: "OPENAI_API_KEY",
};
}
inputKey = "input";
outputKey = "output";
openAIApiKey?: string;
openAIOrganization?: string;
clientConfig: ClientOptions;
client: OpenAIClient;
throwError: boolean;
caller: AsyncCaller;
constructor(fields?: OpenAIModerationChainInput) {
super(fields);
this.throwError = fields?.throwError ?? false;
this.openAIApiKey =
fields?.apiKey ??
fields?.openAIApiKey ??
getEnvironmentVariable("OPENAI_API_KEY");
if (!this.openAIApiKey) {
throw new Error("OpenAI API key not found");
}
this.openAIOrganization = fields?.openAIOrganization;
this.clientConfig = {
...fields?.configuration,
apiKey: this.openAIApiKey,
organization: this.openAIOrganization,
};
this.client = new OpenAIClient(this.clientConfig);
this.caller = new AsyncCaller(fields ?? {});
}
_moderate(text: string, results: OpenAIClient.Moderation): string {
if (results.flagged) {
const errorStr = "Text was found that violates OpenAI's content policy.";
if (this.throwError) {
throw new Error(errorStr);
} else {
return errorStr;
}
}
return text;
}
async _call(values: ChainValues): Promise<ChainValues> {
const text = values[this.inputKey];
const moderationRequest: OpenAIClient.ModerationCreateParams = {
input: text,
};
let mod;
try {
mod = await this.caller.call(() =>
this.client.moderations.create(moderationRequest)
);
} catch (error) {
// eslint-disable-next-line no-instanceof/no-instanceof
if (error instanceof Error) {
throw error;
} else {
throw new Error(error as string);
}
}
const output = this._moderate(text, mod.results[0]);
return {
[this.outputKey]: output,
results: mod.results,
};
}
_chainType() {
return "moderation_chain";
}
get inputKeys(): string[] {
return [this.inputKey];
}
get outputKeys(): string[] {
return [this.outputKey];
}
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/chains/vector_db_qa.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import type { VectorStoreInterface } from "@langchain/core/vectorstores";
import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager";
import { ChainValues } from "@langchain/core/utils/types";
import { BaseChain, ChainInputs } from "./base.js";
import { SerializedVectorDBQAChain } from "./serde.js";
import { loadQAStuffChain } from "./question_answering/load.js";
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export type LoadValues = Record<string, any>;
/**
* Interface that extends the `ChainInputs` interface and defines the
* input fields required for a VectorDBQAChain. It includes properties
* such as `vectorstore`, `combineDocumentsChain`,
* `returnSourceDocuments`, `k`, and `inputKey`.
*
* @deprecated
* Switch to {@link https://js.langchain.com/docs/modules/chains/ | createRetrievalChain}
* Will be removed in 0.2.0
*/
export interface VectorDBQAChainInput extends Omit<ChainInputs, "memory"> {
vectorstore: VectorStoreInterface;
combineDocumentsChain: BaseChain;
returnSourceDocuments?: boolean;
k?: number;
inputKey?: string;
}
/**
* Class that represents a VectorDBQAChain. It extends the `BaseChain`
* class and implements the `VectorDBQAChainInput` interface. It performs
* a similarity search using a vector store and combines the search
* results using a specified combine documents chain.
*
* @deprecated
* Switch to {@link https://js.langchain.com/docs/modules/chains/ | createRetrievalChain}
* Will be removed in 0.2.0
*/
export class VectorDBQAChain extends BaseChain implements VectorDBQAChainInput {
static lc_name() {
return "VectorDBQAChain";
}
k = 4;
inputKey = "query";
get inputKeys() {
return [this.inputKey];
}
get outputKeys() {
return this.combineDocumentsChain.outputKeys.concat(
this.returnSourceDocuments ? ["sourceDocuments"] : []
);
}
vectorstore: VectorStoreInterface;
combineDocumentsChain: BaseChain;
returnSourceDocuments = false;
constructor(fields: VectorDBQAChainInput) {
super(fields);
this.vectorstore = fields.vectorstore;
this.combineDocumentsChain = fields.combineDocumentsChain;
this.inputKey = fields.inputKey ?? this.inputKey;
this.k = fields.k ?? this.k;
this.returnSourceDocuments =
fields.returnSourceDocuments ?? this.returnSourceDocuments;
}
/** @ignore */
async _call(
values: ChainValues,
runManager?: CallbackManagerForChainRun
): Promise<ChainValues> {
if (!(this.inputKey in values)) {
throw new Error(`Question key ${this.inputKey} not found.`);
}
const question: string = values[this.inputKey];
const docs = await this.vectorstore.similaritySearch(
question,
this.k,
values.filter,
runManager?.getChild("vectorstore")
);
const inputs = { question, input_documents: docs };
const result = await this.combineDocumentsChain.call(
inputs,
runManager?.getChild("combine_documents")
);
if (this.returnSourceDocuments) {
return {
...result,
sourceDocuments: docs,
};
}
return result;
}
_chainType() {
return "vector_db_qa" as const;
}
static async deserialize(
data: SerializedVectorDBQAChain,
values: LoadValues
) {
if (!("vectorstore" in values)) {
throw new Error(
`Need to pass in a vectorstore to deserialize VectorDBQAChain`
);
}
const { vectorstore } = values;
if (!data.combine_documents_chain) {
throw new Error(
`VectorDBQAChain must have combine_documents_chain in serialized data`
);
}
return new VectorDBQAChain({
combineDocumentsChain: await BaseChain.deserialize(
data.combine_documents_chain
),
k: data.k,
vectorstore,
});
}
serialize(): SerializedVectorDBQAChain {
return {
_type: this._chainType(),
combine_documents_chain: this.combineDocumentsChain.serialize(),
k: this.k,
};
}
/**
* Static method that creates a VectorDBQAChain instance from a
* BaseLanguageModel and a vector store. It also accepts optional options
* to customize the chain.
* @param llm The BaseLanguageModel instance.
* @param vectorstore The vector store used for similarity search.
* @param options Optional options to customize the chain.
* @returns A new instance of VectorDBQAChain.
*/
static fromLLM(
llm: BaseLanguageModelInterface,
vectorstore: VectorStoreInterface,
options?: Partial<
Omit<VectorDBQAChainInput, "combineDocumentsChain" | "vectorstore">
>
): VectorDBQAChain {
const qaChain = loadQAStuffChain(llm);
return new this({
vectorstore,
combineDocumentsChain: qaChain,
...options,
});
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.