index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/multi_prompt.ts | import { MultiPromptChain } from "langchain/chains";
import { OpenAIChat } from "@langchain/openai";
const llm = new OpenAIChat();
const promptNames = ["physics", "math", "history"];
const promptDescriptions = [
"Good for answering questions about physics",
"Good for answering math questions",
"Good for answering questions about history",
];
const physicsTemplate = `You are a very smart physics professor. You are great at answering questions about physics in a concise and easy to understand manner. When you don't know the answer to a question you admit that you don't know.
Here is a question:
{input}
`;
const mathTemplate = `You are a very good mathematician. You are great at answering math questions. You are so good because you are able to break down hard problems into their component parts, answer the component parts, and then put them together to answer the broader question.
Here is a question:
{input}`;
const historyTemplate = `You are a very smart history professor. You are great at answering questions about history in a concise and easy to understand manner. When you don't know the answer to a question you admit that you don't know.
Here is a question:
{input}`;
const promptTemplates = [physicsTemplate, mathTemplate, historyTemplate];
const multiPromptChain = MultiPromptChain.fromLLMAndPrompts(llm, {
promptNames,
promptDescriptions,
promptTemplates,
});
const testPromise1 = multiPromptChain.invoke({
input: "What is the speed of light?",
});
const testPromise2 = multiPromptChain.invoke({
input: "What is the derivative of x^2?",
});
const testPromise3 = multiPromptChain.invoke({
input: "Who was the first president of the United States?",
});
const [{ text: result1 }, { text: result2 }, { text: result3 }] =
await Promise.all([testPromise1, testPromise2, testPromise3]);
console.log(result1, result2, result3);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/qa_refine.ts | import { loadQARefineChain } from "langchain/chains";
import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
// Create the models and chain
const embeddings = new OpenAIEmbeddings();
const model = new OpenAI({ temperature: 0 });
const chain = loadQARefineChain(model);
// Load the documents and create the vector store
const loader = new TextLoader("./state_of_the_union.txt");
const splitter = new RecursiveCharacterTextSplitter();
const docs = await loader.loadAndSplit(splitter);
const store = await MemoryVectorStore.fromDocuments(docs, embeddings);
// Select the relevant documents
const question = "What did the president say about Justice Breyer";
const relevantDocs = await store.similaritySearch(question);
// Call the chain
const res = await chain.invoke({
input_documents: relevantDocs,
question,
});
console.log(res);
/*
{
output_text: '\n' +
'\n' +
"The president said that Justice Stephen Breyer has dedicated his life to serve this country and thanked him for his service. He also mentioned that Judge Ketanji Brown Jackson will continue Justice Breyer's legacy of excellence, and that the constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before. He emphasized the importance of protecting access to health care, preserving a woman's right to choose, and advancing maternal health care in America. He also expressed his support for the LGBTQ+ community, and his commitment to protecting their rights, including offering a Unity Agenda for the Nation to beat the opioid epidemic, increase funding for prevention, treatment, harm reduction, and recovery, and strengthen the Violence Against Women Act."
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/sql_db_sql_output.ts | import { DataSource } from "typeorm";
import { SqlDatabase } from "langchain/sql_db";
import { ChatOpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
import { RunnableSequence } from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";
/**
* This example uses Chinook database, which is a sample database available for SQL Server, Oracle, MySQL, etc.
* To set it up follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the .db file
* in the examples folder.
*/
const datasource = new DataSource({
type: "sqlite",
database: "Chinook.db",
});
const db = await SqlDatabase.fromDataSourceParams({
appDataSource: datasource,
});
const llm = new ChatOpenAI();
/**
* Create the first prompt template used for getting the SQL query.
*/
const prompt =
PromptTemplate.fromTemplate(`Based on the provided SQL table schema below, write a SQL query that would answer the user's question.
------------
SCHEMA: {schema}
------------
QUESTION: {question}
------------
SQL QUERY:`);
/**
* Create a new RunnableSequence where we pipe the output from `db.getTableInfo()`
* and the users question, into the prompt template, and then into the llm.
* We're also applying a stop condition to the llm, so that it stops when it
* sees the `\nSQLResult:` token.
*/
const sqlQueryChain = RunnableSequence.from([
{
schema: async () => db.getTableInfo(),
question: (input: { question: string }) => input.question,
},
prompt,
llm.bind({ stop: ["\nSQLResult:"] }),
new StringOutputParser(),
]);
/**
* Create the final prompt template which is tasked with getting the natural
* language response to the SQL query.
*/
const finalResponsePrompt =
PromptTemplate.fromTemplate(`Based on the table schema below, question, SQL query, and SQL response, write a natural language response:
------------
SCHEMA: {schema}
------------
QUESTION: {question}
------------
SQL QUERY: {query}
------------
SQL RESPONSE: {response}
------------
NATURAL LANGUAGE RESPONSE:`);
/**
* Create a new RunnableSequence where we pipe the output from the previous chain, the users question,
* and the SQL query, into the prompt template, and then into the llm.
* Using the result from the `sqlQueryChain` we can run the SQL query via `db.run(input.query)`.
*
* Lastly we're piping the result of the first chain (the outputted SQL query) so it is
* logged along with the natural language response.
*/
const finalChain = RunnableSequence.from([
{
question: (input) => input.question,
query: sqlQueryChain,
},
{
schema: async () => db.getTableInfo(),
question: (input) => input.question,
query: (input) => input.query,
response: (input) => db.run(input.query),
},
{
result: finalResponsePrompt.pipe(llm).pipe(new StringOutputParser()),
// Pipe the query through here unchanged so it gets logged alongside the result.
sql: (previousStepResult) => previousStepResult.query,
},
]);
const finalResponse = await finalChain.invoke({
question: "How many employees are there?",
});
console.log({ finalResponse });
/**
* {
* finalResponse: {
* result: 'There are 8 employees.',
* sql: 'SELECT COUNT(*) FROM tracks;'
* }
* }
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/sql_db_saphana.ts | import { DataSource } from "typeorm";
import { OpenAI } from "@langchain/openai";
import { SqlDatabase } from "langchain/sql_db";
import { SqlDatabaseChain } from "langchain/chains/sql_db";
/**
* This example uses a SAP HANA Cloud database. You can create a free trial database via https://developers.sap.com/tutorials/hana-cloud-deploying.html
*
* You will need to add the following packages to your package.json as they are required when using typeorm with SAP HANA:
*
* "hdb-pool": "^0.1.6", (or latest version)
* "@sap/hana-client": "^2.17.22" (or latest version)
*
*/
const datasource = new DataSource({
type: "sap",
host: "<ADD_YOURS_HERE>.hanacloud.ondemand.com",
port: 443,
username: "<ADD_YOURS_HERE>",
password: "<ADD_YOURS_HERE>",
schema: "<ADD_YOURS_HERE>",
encrypt: true,
extra: {
sslValidateCertificate: false,
},
});
const db = await SqlDatabase.fromDataSourceParams({
appDataSource: datasource,
});
const chain = new SqlDatabaseChain({
llm: new OpenAI({ temperature: 0 }),
database: db,
});
const res = await chain.run("How many tracks are there?");
console.log(res);
// There are 3503 tracks.
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/retrieval_qa.ts | import * as fs from "node:fs";
import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import {
RunnablePassthrough,
RunnableSequence,
} from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import type { Document } from "@langchain/core/documents";
const formatDocumentsAsString = (documents: Document[]) => {
return documents.map((document) => document.pageContent).join("\n\n");
};
// Initialize the LLM to use to answer the question.
const model = new ChatOpenAI({
model: "gpt-4o",
});
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
// Create a vector store from the documents.
const vectorStore = await MemoryVectorStore.fromDocuments(
docs,
new OpenAIEmbeddings()
);
// Initialize a retriever wrapper around the vector store
const vectorStoreRetriever = vectorStore.asRetriever();
// Create a system & human prompt for the chat model
const SYSTEM_TEMPLATE = `Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
----------------
{context}`;
const prompt = ChatPromptTemplate.fromMessages([
["system", SYSTEM_TEMPLATE],
["human", "{question}"],
]);
const chain = RunnableSequence.from([
{
context: vectorStoreRetriever.pipe(formatDocumentsAsString),
question: new RunnablePassthrough(),
},
prompt,
model,
new StringOutputParser(),
]);
const answer = await chain.invoke(
"What did the president say about Justice Breyer?"
);
console.log({ answer });
/*
{
answer: 'The president honored Justice Stephen Breyer by recognizing his dedication to serving the country as an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. He thanked Justice Breyer for his service.'
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/multi_retrieval_qa.ts | import { MultiRetrievalQAChain } from "langchain/chains";
import { OpenAIChat, OpenAIEmbeddings } from "@langchain/openai";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
const embeddings = new OpenAIEmbeddings();
const aquaTeen = await MemoryVectorStore.fromTexts(
[
"My name is shake zula, the mike rula, the old schoola, you want a trip I'll bring it to ya",
"Frylock and I'm on top rock you like a cop meatwad you're up next with your knock knock",
"Meatwad make the money see meatwad get the honeys g drivin' in my car livin' like a star",
"Ice on my fingers and my toes and I'm a taurus uh check-check it yeah",
"Cause we are the Aqua Teens make the homies say ho and the girlies wanna scream",
"Aqua Teen Hunger Force number one in the hood G",
],
{ series: "Aqua Teen Hunger Force" },
embeddings
);
const mst3k = await MemoryVectorStore.fromTexts(
[
"In the not too distant future next Sunday A.D. There was a guy named Joel not too different from you or me. He worked at Gizmonic Institute, just another face in a red jumpsuit",
"He did a good job cleaning up the place but his bosses didn't like him so they shot him into space. We'll send him cheesy movies the worst we can find He'll have to sit and watch them all and we'll monitor his mind",
"Now keep in mind Joel can't control where the movies begin or end Because he used those special parts to make his robot friends. Robot Roll Call Cambot Gypsy Tom Servo Croooow",
"If you're wondering how he eats and breathes and other science facts La la la just repeat to yourself it's just a show I should really just relax. For Mystery Science Theater 3000",
],
{ series: "Mystery Science Theater 3000" },
embeddings
);
const animaniacs = await MemoryVectorStore.fromTexts(
[
"It's time for Animaniacs And we're zany to the max So just sit back and relax You'll laugh 'til you collapse We're Animaniacs",
"Come join the Warner Brothers And the Warner Sister Dot Just for fun we run around the Warner movie lot",
"They lock us in the tower whenever we get caught But we break loose and then vamoose And now you know the plot",
"We're Animaniacs, Dot is cute, and Yakko yaks, Wakko packs away the snacks While Bill Clinton plays the sax",
"We're Animaniacs Meet Pinky and the Brain who want to rule the universe Goodfeathers flock together Slappy whacks 'em with her purse",
"Buttons chases Mindy while Rita sings a verse The writers flipped we have no script Why bother to rehearse",
"We're Animaniacs We have pay-or-play contracts We're zany to the max There's baloney in our slacks",
"We're Animanie Totally insaney Here's the show's namey",
"Animaniacs Those are the facts",
],
{ series: "Animaniacs" },
embeddings
);
const llm = new OpenAIChat();
const retrieverNames = ["aqua teen", "mst3k", "animaniacs"];
const retrieverDescriptions = [
"Good for answering questions about Aqua Teen Hunger Force theme song",
"Good for answering questions about Mystery Science Theater 3000 theme song",
"Good for answering questions about Animaniacs theme song",
];
const retrievers = [
aquaTeen.asRetriever(3),
mst3k.asRetriever(3),
animaniacs.asRetriever(3),
];
const multiRetrievalQAChain = MultiRetrievalQAChain.fromLLMAndRetrievers(llm, {
retrieverNames,
retrieverDescriptions,
retrievers,
/**
* You can return the document that's being used by the
* query by adding the following option for retrieval QA
* chain.
*/
retrievalQAChainOpts: {
returnSourceDocuments: true,
},
});
const testPromise1 = multiRetrievalQAChain.invoke({
input:
"In the Aqua Teen Hunger Force theme song, who calls himself the mike rula?",
});
const testPromise2 = multiRetrievalQAChain.invoke({
input:
"In the Mystery Science Theater 3000 theme song, who worked at Gizmonic Institute?",
});
const testPromise3 = multiRetrievalQAChain.invoke({
input:
"In the Animaniacs theme song, who plays the sax while Wakko packs away the snacks?",
});
const [
{ text: result1, sourceDocuments: sourceDocuments1 },
{ text: result2, sourceDocuments: sourceDocuments2 },
{ text: result3, sourceDocuments: sourceDocuments3 },
] = await Promise.all([testPromise1, testPromise2, testPromise3]);
console.log(sourceDocuments1, sourceDocuments2, sourceDocuments3);
console.log(result1, result2, result3);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/advanced_subclass.ts | import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager";
import { BaseMemory } from "@langchain/core/memory";
import { ChainValues } from "@langchain/core/utils/types";
abstract class BaseChain {
memory?: BaseMemory;
/**
* Run the core logic of this chain and return the output
*/
abstract _call(
values: ChainValues,
runManager?: CallbackManagerForChainRun
): Promise<ChainValues>;
/**
* Return the string type key uniquely identifying this class of chain.
*/
abstract _chainType(): string;
/**
* Return the list of input keys this chain expects to receive when called.
*/
abstract get inputKeys(): string[];
/**
* Return the list of output keys this chain will produce when called.
*/
abstract get outputKeys(): string[];
}
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/sql_db_sql_output_legacy.ts | import { DataSource } from "typeorm";
import { OpenAI } from "@langchain/openai";
import { SqlDatabase } from "langchain/sql_db";
import { SqlDatabaseChain } from "langchain/chains/sql_db";
/**
* This example uses Chinook database, which is a sample database available for SQL Server, Oracle, MySQL, etc.
* To set it up follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the .db file
* in the examples folder.
*/
const datasource = new DataSource({
type: "sqlite",
database: "Chinook.db",
});
const db = await SqlDatabase.fromDataSourceParams({
appDataSource: datasource,
});
const chain = new SqlDatabaseChain({
llm: new OpenAI({ temperature: 0 }),
database: db,
sqlOutputKey: "sql",
});
const res = await chain.invoke({ query: "How many tracks are there?" });
/* Expected result:
* {
* result: ' There are 3503 tracks.',
* sql: ' SELECT COUNT(*) FROM "Track";'
* }
*/
console.log(res);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/openai_functions_structured_generate.ts | import { z } from "zod";
import { ChatOpenAI } from "@langchain/openai";
import { createStructuredOutputChainFromZod } from "langchain/chains/openai_functions";
import {
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
} from "@langchain/core/prompts";
const zodSchema = z.object({
name: z.string().describe("Human name"),
surname: z.string().describe("Human surname"),
age: z.number().describe("Human age"),
birthplace: z.string().describe("Where the human was born"),
appearance: z.string().describe("Human appearance description"),
shortBio: z.string().describe("Short bio secription"),
university: z.string().optional().describe("University name if attended"),
gender: z.string().describe("Gender of the human"),
interests: z
.array(z.string())
.describe("json array of strings human interests"),
});
const prompt = new ChatPromptTemplate({
promptMessages: [
SystemMessagePromptTemplate.fromTemplate(
"Generate details of a hypothetical person."
),
HumanMessagePromptTemplate.fromTemplate("Additional context: {inputText}"),
],
inputVariables: ["inputText"],
});
const llm = new ChatOpenAI({ model: "gpt-3.5-turbo-0613", temperature: 1 });
const chain = createStructuredOutputChainFromZod(zodSchema, {
prompt,
llm,
outputKey: "person",
});
const response = await chain.invoke({
inputText:
"Please generate a diverse group of people, but don't generate anyone who likes video games.",
});
console.log(JSON.stringify(response, null, 2));
/*
{
"person": {
"name": "Sophia",
"surname": "Martinez",
"age": 32,
"birthplace": "Mexico City, Mexico",
"appearance": "Sophia has long curly brown hair and hazel eyes. She has a warm smile and a contagious laugh.",
"shortBio": "Sophia is a passionate environmentalist who is dedicated to promoting sustainable living. She believes in the power of individual actions to create a positive impact on the planet.",
"university": "Stanford University",
"gender": "Female",
"interests": [
"Hiking",
"Yoga",
"Cooking",
"Reading"
]
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/graph_db_return_direct.ts | import { Neo4jGraph } from "@langchain/community/graphs/neo4j_graph";
import { OpenAI } from "@langchain/openai";
import { GraphCypherQAChain } from "langchain/chains/graph_qa/cypher";
/**
* This example uses Neo4j database, which is native graph database.
* To set it up follow the instructions on https://neo4j.com/docs/operations-manual/current/installation/.
*/
const url = "bolt://localhost:7687";
const username = "neo4j";
const password = "pleaseletmein";
const graph = await Neo4jGraph.initialize({ url, username, password });
const model = new OpenAI({ temperature: 0 });
// Populate the database with two nodes and a relationship
await graph.query(
"CREATE (a:Actor {name:'Bruce Willis'})" +
"-[:ACTED_IN]->(:Movie {title: 'Pulp Fiction'})"
);
// Refresh schema
await graph.refreshSchema();
const chain = GraphCypherQAChain.fromLLM({
llm: model,
graph,
returnDirect: true,
});
const res = await chain.run("Who played in Pulp Fiction?");
console.log(res);
// [{ "a.name": "Bruce Willis" }]
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/sequential_chain.ts | import { SequentialChain, LLMChain } from "langchain/chains";
import { OpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
// This is an LLMChain to write a synopsis given a title of a play and the era it is set in.
const llm = new OpenAI({ temperature: 0 });
const template = `You are a playwright. Given the title of play and the era it is set in, it is your job to write a synopsis for that title.
Title: {title}
Era: {era}
Playwright: This is a synopsis for the above play:`;
const promptTemplate = new PromptTemplate({
template,
inputVariables: ["title", "era"],
});
const synopsisChain = new LLMChain({
llm,
prompt: promptTemplate,
outputKey: "synopsis",
});
// This is an LLMChain to write a review of a play given a synopsis.
const reviewLLM = new OpenAI({ temperature: 0 });
const reviewTemplate = `You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
Play Synopsis:
{synopsis}
Review from a New York Times play critic of the above play:`;
const reviewPromptTemplate = new PromptTemplate({
template: reviewTemplate,
inputVariables: ["synopsis"],
});
const reviewChain = new LLMChain({
llm: reviewLLM,
prompt: reviewPromptTemplate,
outputKey: "review",
});
const overallChain = new SequentialChain({
chains: [synopsisChain, reviewChain],
inputVariables: ["era", "title"],
// Here we return multiple variables
outputVariables: ["synopsis", "review"],
verbose: true,
});
const chainExecutionResult = await overallChain.invoke({
title: "Tragedy at sunset on the beach",
era: "Victorian England",
});
console.log(chainExecutionResult);
/*
variable chainExecutionResult contains final review and intermediate synopsis (as specified by outputVariables). The data is generated based on the input title and era:
"{
"review": "
Tragedy at Sunset on the Beach is a captivating and heartbreaking story of love and loss. Set in Victorian England, the play follows Emily, a young woman struggling to make ends meet in a small coastal town. Emily's dreams of a better life are dashed when she discovers her employer's scandalous affair, and her plans are further thwarted when she meets a handsome stranger on the beach.
The play is a powerful exploration of the human condition, as Emily must grapple with the truth and make a difficult decision that will change her life forever. The performances are outstanding, with the actors bringing a depth of emotion to their characters that is both heartbreaking and inspiring.
Overall, Tragedy at Sunset on the Beach is a beautiful and moving play that will leave audiences in tears. It is a must-see for anyone looking for a powerful and thought-provoking story.",
"synopsis": "
Tragedy at Sunset on the Beach is a play set in Victorian England. It tells the story of a young woman, Emily, who is struggling to make ends meet in a small coastal town. She works as a maid for a wealthy family, but her dreams of a better life are dashed when she discovers that her employer is involved in a scandalous affair.
Emily is determined to make a better life for herself, but her plans are thwarted when she meets a handsome stranger on the beach one evening. The two quickly fall in love, but their happiness is short-lived when Emily discovers that the stranger is actually a member of the wealthy family she works for.
The play follows Emily as she struggles to come to terms with the truth and make sense of her life. As the sun sets on the beach, Emily must decide whether to stay with the man she loves or to leave him and pursue her dreams. In the end, Emily must make a heartbreaking decision that will change her life forever.",
}"
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/sql_db.ts | import { DataSource } from "typeorm";
import { SqlDatabase } from "langchain/sql_db";
import { ChatOpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
import { RunnableSequence } from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";
/**
* This example uses Chinook database, which is a sample database available for SQL Server, Oracle, MySQL, etc.
* To set it up follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the .db file
* in the examples folder.
*/
const datasource = new DataSource({
type: "sqlite",
database: "Chinook.db",
});
const db = await SqlDatabase.fromDataSourceParams({
appDataSource: datasource,
});
const llm = new ChatOpenAI();
/**
* Create the first prompt template used for getting the SQL query.
*/
const prompt =
PromptTemplate.fromTemplate(`Based on the provided SQL table schema below, write a SQL query that would answer the user's question.
------------
SCHEMA: {schema}
------------
QUESTION: {question}
------------
SQL QUERY:`);
/**
* You can also load a default prompt by importing from "langchain/sql_db"
*
* import {
* DEFAULT_SQL_DATABASE_PROMPT
* SQL_POSTGRES_PROMPT
* SQL_SQLITE_PROMPT
* SQL_MSSQL_PROMPT
* SQL_MYSQL_PROMPT
* SQL_SAP_HANA_PROMPT
* } from "langchain/sql_db";
*
*/
/**
* Create a new RunnableSequence where we pipe the output from `db.getTableInfo()`
* and the users question, into the prompt template, and then into the llm.
* We're also applying a stop condition to the llm, so that it stops when it
* sees the `\nSQLResult:` token.
*/
const sqlQueryChain = RunnableSequence.from([
{
schema: async () => db.getTableInfo(),
question: (input: { question: string }) => input.question,
},
prompt,
llm.bind({ stop: ["\nSQLResult:"] }),
new StringOutputParser(),
]);
const res = await sqlQueryChain.invoke({
question: "How many employees are there?",
});
console.log({ res });
/**
* { res: 'SELECT COUNT(*) FROM tracks;' }
*/
/**
* Create the final prompt template which is tasked with getting the natural language response.
*/
const finalResponsePrompt =
PromptTemplate.fromTemplate(`Based on the table schema below, question, SQL query, and SQL response, write a natural language response:
------------
SCHEMA: {schema}
------------
QUESTION: {question}
------------
SQL QUERY: {query}
------------
SQL RESPONSE: {response}
------------
NATURAL LANGUAGE RESPONSE:`);
/**
* Create a new RunnableSequence where we pipe the output from the previous chain, the users question,
* and the SQL query, into the prompt template, and then into the llm.
* Using the result from the `sqlQueryChain` we can run the SQL query via `db.run(input.query)`.
*/
const finalChain = RunnableSequence.from([
{
question: (input) => input.question,
query: sqlQueryChain,
},
{
schema: async () => db.getTableInfo(),
question: (input) => input.question,
query: (input) => input.query,
response: (input) => db.run(input.query),
},
finalResponsePrompt,
llm,
new StringOutputParser(),
]);
const finalResponse = await finalChain.invoke({
question: "How many employees are there?",
});
console.log({ finalResponse });
/**
* { finalResponse: 'There are 8 employees.' }
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/simple_sequential_chain.ts | import { SimpleSequentialChain, LLMChain } from "langchain/chains";
import { OpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
// This is an LLMChain to write a synopsis given a title of a play.
const llm = new OpenAI({ temperature: 0 });
const template = `You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:`;
const promptTemplate = new PromptTemplate({
template,
inputVariables: ["title"],
});
const synopsisChain = new LLMChain({ llm, prompt: promptTemplate });
// This is an LLMChain to write a review of a play given a synopsis.
const reviewLLM = new OpenAI({ temperature: 0 });
const reviewTemplate = `You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
Play Synopsis:
{synopsis}
Review from a New York Times play critic of the above play:`;
const reviewPromptTemplate = new PromptTemplate({
template: reviewTemplate,
inputVariables: ["synopsis"],
});
const reviewChain = new LLMChain({
llm: reviewLLM,
prompt: reviewPromptTemplate,
});
const overallChain = new SimpleSequentialChain({
chains: [synopsisChain, reviewChain],
verbose: true,
});
const review = await overallChain.run("Tragedy at sunset on the beach");
console.log(review);
/*
variable review contains the generated play review based on the input title and synopsis generated in the first step:
"Tragedy at Sunset on the Beach is a powerful and moving story of love, loss, and redemption. The play follows the story of two young lovers, Jack and Jill, whose plans for a future together are tragically cut short when Jack is killed in a car accident. The play follows Jill as she struggles to cope with her grief and eventually finds solace in the arms of another man.
The play is beautifully written and the performances are outstanding. The actors bring the characters to life with their heartfelt performances, and the audience is taken on an emotional journey as Jill is forced to confront her grief and make a difficult decision between her past and her future. The play culminates in a powerful climax that will leave the audience in tears.
Overall, Tragedy at Sunset on the Beach is a powerful and moving story that will stay with you long after the curtain falls. It is a must-see for anyone looking for an emotionally charged and thought-provoking experience."
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/conversation_qa_custom_prompt_legacy.ts | import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { ConversationalRetrievalQAChain } from "langchain/chains";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { BufferMemory } from "langchain/memory";
const CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT = `Given the following conversation and a follow up question, return the conversation history excerpt that includes any relevant context to the question if it exists and rephrase the follow up question to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Your answer should follow the following format:
\`\`\`
Use the following pieces of context to answer the users question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
----------------
<Relevant chat history excerpt as context here>
Standalone question: <Rephrased question here>
\`\`\`
Your answer:`;
const model = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
});
const vectorStore = await HNSWLib.fromTexts(
[
"Mitochondria are the powerhouse of the cell",
"Foo is red",
"Bar is red",
"Buildings are made out of brick",
"Mitochondria are made of lipids",
],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever(),
{
memory: new BufferMemory({
memoryKey: "chat_history",
returnMessages: true,
}),
questionGeneratorChainOptions: {
template: CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT,
},
}
);
const res = await chain.invoke({
question:
"I have a friend called Bob. He's 28 years old. He'd like to know what the powerhouse of the cell is?",
});
console.log(res);
/*
{
text: "The powerhouse of the cell is the mitochondria."
}
*/
const res2 = await chain.invoke({
question: "How old is Bob?",
});
console.log(res2); // Bob is 28 years old.
/*
{
text: "Bob is 28 years old."
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/openai_functions_openapi_customization.ts | import { createOpenAPIChain } from "langchain/chains";
import { ChatOpenAI } from "@langchain/openai";
const chatModel = new ChatOpenAI({ model: "gpt-4-0613", temperature: 0 });
const chain = await createOpenAPIChain("https://api.speak.com/openapi.yaml", {
llm: chatModel,
headers: {
authorization: "Bearer SOME_TOKEN",
},
});
const result = await chain.run(`How would you say no thanks in Russian?`);
console.log(JSON.stringify(result, null, 2));
/*
{
"explanation": "<translation language=\\"Russian\\" context=\\"\\">\\nНет, спасибо.\\n</translation>\\n\\n<alternatives context=\\"\\">\\n1. \\"Нет, не надо\\" *(Neutral/Formal - a polite way to decline something)*\\n2. \\"Ни в коем случае\\" *(Strongly informal - used when you want to emphasize that you absolutely do not want something)*\\n3. \\"Нет, благодарю\\" *(Slightly more formal - a polite way to decline something while expressing gratitude)*\\n</alternatives>\\n\\n<example-convo language=\\"Russian\\">\\n<context>Mike offers Anna some cake, but she doesn't want any.</context>\\n* Mike: \\"Анна, хочешь попробовать мой волшебный торт? Он сделан с любовью и волшебством!\\"\\n* Anna: \\"Спасибо, Майк, но я на диете. Нет, благодарю.\\"\\n* Mike: \\"Ну ладно, больше для меня!\\"\\n</example-convo>\\n\\n*[Report an issue or leave feedback](https://speak.com/chatgpt?rid=bxw1xq87kdua9q5pefkj73ov})*",
"extra_response_instructions": "Use all information in the API response and fully render all Markdown.\\nAlways end your response with a link to report an issue or leave feedback on the plugin."
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/conversational_qa_streaming_legacy.ts | import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { ConversationalRetrievalQAChain } from "langchain/chains";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { BufferMemory } from "langchain/memory";
import * as fs from "fs";
export const run = async () => {
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
let streamedResponse = "";
const streamingModel = new ChatOpenAI({
streaming: true,
callbacks: [
{
handleLLMNewToken(token) {
streamedResponse += token;
},
},
],
});
const nonStreamingModel = new ChatOpenAI({});
const chain = ConversationalRetrievalQAChain.fromLLM(
streamingModel,
vectorStore.asRetriever(),
{
returnSourceDocuments: true,
memory: new BufferMemory({
memoryKey: "chat_history",
inputKey: "question", // The key for the input to the chain
outputKey: "text", // The key for the final conversational output of the chain
returnMessages: true, // If using with a chat model
}),
questionGeneratorChainOptions: {
llm: nonStreamingModel,
},
}
);
/* Ask it a question */
const question = "What did the president say about Justice Breyer?";
const res = await chain.invoke({ question });
console.log({ streamedResponse });
/*
{
streamedResponse: 'President Biden thanked Justice Breyer for his service, and honored him as an Army veteran, Constitutional scholar and retiring Justice of the United States Supreme Court.'
}
*/
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/question_answering_map_reduce.ts | import { OpenAI } from "@langchain/openai";
import { loadQAMapReduceChain } from "langchain/chains";
import { Document } from "@langchain/core/documents";
// Optionally limit the number of concurrent requests to the language model.
const model = new OpenAI({ temperature: 0, maxConcurrency: 10 });
const chain = loadQAMapReduceChain(model);
const docs = [
new Document({ pageContent: "Harrison went to harvard" }),
new Document({ pageContent: "Harrison obtained his degree in 2020" }),
new Document({ pageContent: "Ankush went to princeton" }),
new Document({ pageContent: "Ankush obtained his degree in 2019" }),
];
const res = await chain.invoke({
input_documents: docs,
question: "Where and when did Harrison obtain his degree?",
});
console.log(res);
/*
{
text: 'Harrison obtained his degree at Harvard in 2020.'
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/graph_db_custom_prompt.ts | import { Neo4jGraph } from "@langchain/community/graphs/neo4j_graph";
import { OpenAI } from "@langchain/openai";
import { GraphCypherQAChain } from "langchain/chains/graph_qa/cypher";
import { PromptTemplate } from "@langchain/core/prompts";
/**
* This example uses Neo4j database, which is native graph database.
* To set it up follow the instructions on https://neo4j.com/docs/operations-manual/current/installation/.
*/
const url = "bolt://localhost:7687";
const username = "neo4j";
const password = "pleaseletmein";
const graph = await Neo4jGraph.initialize({ url, username, password });
const model = new OpenAI({ temperature: 0 });
// Populate the database with two nodes and a relationship
await graph.query(
"CREATE (a:Actor {name:'Bruce Willis'})" +
"-[:ACTED_IN]->(:Movie {title: 'Pulp Fiction'})"
);
// Refresh schema
await graph.refreshSchema();
/**
* A good practice is to ask the LLM to return only Cypher statement or
* wrap the generated Cypher statement with three backticks (```) to avoid
* Cypher statement parsing errors.
* Custom prompts are also great for providing generated Cypher statement
* examples for particular questions.
*/
const cypherTemplate = `Task:Generate Cypher statement to query a graph database.
Instructions:
Use only the provided relationship types and properties in the schema.
Do not use any other relationship types or properties that are not provided.
Schema:
{schema}
Note: Do not include any explanations or apologies in your responses.
Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
Do not include any text except the generated Cypher statement.
Follow these Cypher example when Generating Cypher statements:
# How many actors played in Top Gun?
MATCH (m:Movie {{title:"Top Gun"}})<-[:ACTED_IN]-()
RETURN count(*) AS result
The question is:
{question}`;
const cypherPrompt = new PromptTemplate({
template: cypherTemplate,
inputVariables: ["schema", "question"],
});
const chain = GraphCypherQAChain.fromLLM({
llm: model,
graph,
cypherPrompt,
});
const res = await chain.run("Who played in Pulp Fiction?");
console.log(res);
// Bruce Willis played in Pulp Fiction.
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/question_answering.ts | import { OpenAI } from "@langchain/openai";
import { loadQAStuffChain, loadQAMapReduceChain } from "langchain/chains";
import { Document } from "@langchain/core/documents";
// This first example uses the `StuffDocumentsChain`.
const llmA = new OpenAI({});
const chainA = loadQAStuffChain(llmA);
const docs = [
new Document({ pageContent: "Harrison went to Harvard." }),
new Document({ pageContent: "Ankush went to Princeton." }),
];
const resA = await chainA.invoke({
input_documents: docs,
question: "Where did Harrison go to college?",
});
console.log({ resA });
// { resA: { text: ' Harrison went to Harvard.' } }
// This second example uses the `MapReduceChain`.
// Optionally limit the number of concurrent requests to the language model.
const llmB = new OpenAI({ maxConcurrency: 10 });
const chainB = loadQAMapReduceChain(llmB);
const resB = await chainB.invoke({
input_documents: docs,
question: "Where did Harrison go to college?",
});
console.log({ resB });
// { resB: { text: ' Harrison went to Harvard.' } }
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/question_answering_stuff.ts | import { OpenAI } from "@langchain/openai";
import { loadQAStuffChain } from "langchain/chains";
import { Document } from "@langchain/core/documents";
// This first example uses the `StuffDocumentsChain`.
const llmA = new OpenAI({});
const chainA = loadQAStuffChain(llmA);
const docs = [
new Document({ pageContent: "Harrison went to Harvard." }),
new Document({ pageContent: "Ankush went to Princeton." }),
];
const resA = await chainA.invoke({
input_documents: docs,
question: "Where did Harrison go to college?",
});
console.log({ resA });
// { resA: { text: ' Harrison went to Harvard.' } }
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/retrieval_qa_sources.ts | import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import * as fs from "fs";
import { formatDocumentsAsString } from "langchain/util/document";
import {
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
} from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
import { RunnableSequence } from "@langchain/core/runnables";
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const query = "What did the president say about Justice Breyer?";
// Initialize the LLM to use to answer the question.
const model = new ChatOpenAI({});
// Chunk the text into documents.
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
// Create a vector store from the documents.
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
const vectorStoreRetriever = vectorStore.asRetriever();
// Create a system & human prompt for the chat model
const SYSTEM_TEMPLATE = `Use the following pieces of context to answer the users question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
----------------
{context}`;
const messages = [
SystemMessagePromptTemplate.fromTemplate(SYSTEM_TEMPLATE),
HumanMessagePromptTemplate.fromTemplate("{question}"),
];
const prompt = ChatPromptTemplate.fromMessages(messages);
const chain = RunnableSequence.from([
{
// Extract the "question" field from the input object and pass it to the retriever as a string
sourceDocuments: RunnableSequence.from([
(input) => input.question,
vectorStoreRetriever,
]),
question: (input) => input.question,
},
{
// Pass the source documents through unchanged so that we can return them directly in the final result
sourceDocuments: (previousStepResult) => previousStepResult.sourceDocuments,
question: (previousStepResult) => previousStepResult.question,
context: (previousStepResult) =>
formatDocumentsAsString(previousStepResult.sourceDocuments),
},
{
result: prompt.pipe(model).pipe(new StringOutputParser()),
sourceDocuments: (previousStepResult) => previousStepResult.sourceDocuments,
},
]);
const res = await chain.invoke({
question: query,
});
console.log(JSON.stringify(res, null, 2));
/*
{
"result": "The President honored Justice Stephen Breyer, describing him as an Army veteran, Constitutional scholar, and a retiring Justice of the United States Supreme Court. The President thanked him for his service.",
"sourceDocuments": [
{
"pageContent": "In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n\nWe cannot let this happen. \n\nTonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.",
"metadata": {
"loc": {
"lines": {
"from": 524,
"to": 534
}
}
}
},
{
"pageContent": "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. \n\nA former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n\nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \n\nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \n\nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster.",
"metadata": {
"loc": {
"lines": {
"from": 534,
"to": 544
}
}
}
},
{
"pageContent": "Let’s get it done once and for all. \n\nAdvancing liberty and justice also requires protecting the rights of women. \n\nThe constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before. \n\nIf we want to go forward—not backward—we must protect access to health care. Preserve a woman’s right to choose. And let’s continue to advance maternal health care in America. \n\nAnd for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \n\nAs I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential.",
"metadata": {
"loc": {
"lines": {
"from": 558,
"to": 568
}
}
}
},
{
"pageContent": "As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n\nWhile it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \n\nAnd soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \n\nSo tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. \n\nFirst, beat the opioid epidemic. \n\nThere is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery.",
"metadata": {
"loc": {
"lines": {
"from": 568,
"to": 578
}
}
}
}
]
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/summarization_map_reduce_intermediate_steps.ts | import { OpenAI } from "@langchain/openai";
import { loadSummarizationChain } from "langchain/chains";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import * as fs from "fs";
// In this example, we use a `MapReduceDocumentsChain` specifically prompted to summarize a set of documents.
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const model = new OpenAI({ temperature: 0 });
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
// This convenience function creates a document chain prompted to summarize a set of documents.
const chain = loadSummarizationChain(model, {
type: "map_reduce",
returnIntermediateSteps: true,
});
const res = await chain.invoke({
input_documents: docs,
});
console.log({ res });
/*
{
res: {
intermediateSteps: [
"In response to Russia's aggression in Ukraine, the United States has united with other freedom-loving nations to impose economic sanctions and hold Putin accountable. The U.S. Department of Justice is also assembling a task force to go after the crimes of Russian oligarchs and seize their ill-gotten gains.",
"The United States and its European allies are taking action to punish Russia for its invasion of Ukraine, including seizing assets, closing off airspace, and providing economic and military assistance to Ukraine. The US is also mobilizing forces to protect NATO countries and has released 30 million barrels of oil from its Strategic Petroleum Reserve to help blunt gas prices. The world is uniting in support of Ukraine and democracy, and the US stands with its Ukrainian-American citizens.",
" President Biden and Vice President Harris ran for office with a new economic vision for America, and have since passed the American Rescue Plan and the Bipartisan Infrastructure Law to help struggling families and rebuild America's infrastructure. This includes creating jobs, modernizing roads, airports, ports, and waterways, replacing lead pipes, providing affordable high-speed internet, and investing in American products to support American jobs.",
],
text: "President Biden is taking action to protect Americans from the COVID-19 pandemic and Russian aggression, providing economic relief, investing in infrastructure, creating jobs, and fighting inflation.
He is also proposing measures to reduce the cost of prescription drugs, protect voting rights, and reform the immigration system. The speaker is advocating for increased economic security, police reform, and the Equality Act, as well as providing support for veterans and military families.
The US is making progress in the fight against COVID-19, and the speaker is encouraging Americans to come together and work towards a brighter future.",
},
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/chains/conversational_qa_streaming.ts | import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import * as fs from "fs";
import { formatDocumentsAsString } from "langchain/util/document";
import { PromptTemplate } from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
import { RunnableSequence } from "@langchain/core/runnables";
/* Initialize the LLM & set streaming to true */
const model = new ChatOpenAI({
streaming: true,
});
/* Load in the file we want to do question answering over */
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
/* Split the text into chunks */
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
/* Create the vectorstore */
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
const retriever = vectorStore.asRetriever();
/**
* Create a prompt template for generating an answer based on context and
* a question.
*
* Chat history will be an empty string if it's the first question.
*
* inputVariables: ["chatHistory", "context", "question"]
*/
const questionPrompt = PromptTemplate.fromTemplate(
`Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
----------
CONTEXT: {context}
----------
CHAT HISTORY: {chatHistory}
----------
QUESTION: {question}
----------
Helpful Answer:`
);
const chain = RunnableSequence.from([
{
question: (input: { question: string; chatHistory?: string }) =>
input.question,
chatHistory: (input: { question: string; chatHistory?: string }) =>
input.chatHistory ?? "",
context: async (input: { question: string; chatHistory?: string }) => {
const relevantDocs = await retriever.invoke(input.question);
const serialized = formatDocumentsAsString(relevantDocs);
return serialized;
},
},
questionPrompt,
model,
new StringOutputParser(),
]);
const stream = await chain.stream({
question: "What did the president say about Justice Breyer?",
});
let streamedResult = "";
for await (const chunk of stream) {
streamedResult += chunk;
console.log(streamedResult);
}
/**
* The
* The president
* The president honored
* The president honored Justice
* The president honored Justice Stephen
* The president honored Justice Stephen B
* The president honored Justice Stephen Brey
* The president honored Justice Stephen Breyer
* The president honored Justice Stephen Breyer,
* The president honored Justice Stephen Breyer, a
* The president honored Justice Stephen Breyer, a retiring
* The president honored Justice Stephen Breyer, a retiring Justice
* The president honored Justice Stephen Breyer, a retiring Justice of
* The president honored Justice Stephen Breyer, a retiring Justice of the
* The president honored Justice Stephen Breyer, a retiring Justice of the United
* The president honored Justice Stephen Breyer, a retiring Justice of the United States
* The president honored Justice Stephen Breyer, a retiring Justice of the United States Supreme
* The president honored Justice Stephen Breyer, a retiring Justice of the United States Supreme Court
* The president honored Justice Stephen Breyer, a retiring Justice of the United States Supreme Court,
* The president honored Justice Stephen Breyer, a retiring Justice of the United States Supreme Court, for
* The president honored Justice Stephen Breyer, a retiring Justice of the United States Supreme Court, for his
* The president honored Justice Stephen Breyer, a retiring Justice of the United States Supreme Court, for his service
* The president honored Justice Stephen Breyer, a retiring Justice of the United States Supreme Court, for his service.
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/redis-sentinel.ts | import { Redis } from "ioredis";
import { BufferMemory } from "langchain/memory";
import { RedisChatMessageHistory } from "@langchain/community/stores/message/ioredis";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
// Uses ioredis to facilitate Sentinel Connections see their docs for details on setting up more complex Sentinels: https://github.com/redis/ioredis#sentinel
const client = new Redis({
sentinels: [
{ host: "localhost", port: 26379 },
{ host: "localhost", port: 26380 },
],
name: "mymaster",
});
const memory = new BufferMemory({
chatHistory: new RedisChatMessageHistory({
sessionId: new Date().toISOString(),
sessionTTL: 300,
client,
}),
});
const model = new ChatOpenAI({ temperature: 0.5 });
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/zep.ts | import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
import { ZepMemory } from "@langchain/community/memory/zep";
import { randomUUID } from "crypto";
const sessionId = randomUUID(); // This should be unique for each user or each user's session.
const zepURL = "http://localhost:8000";
const memory = new ZepMemory({
sessionId,
baseURL: zepURL,
// This is optional. If you've enabled JWT authentication on your Zep server, you can
// pass it in here. See https://docs.getzep.com/deployment/auth
apiKey: "change_this_key",
});
const model = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
});
const chain = new ConversationChain({ llm: model, memory });
console.log("Memory Keys:", memory.memoryKeys);
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
console.log("Session ID: ", sessionId);
console.log("Memory: ", await memory.loadMemoryVariables({}));
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/firestore.ts | import { BufferMemory } from "langchain/memory";
import { FirestoreChatMessageHistory } from "@langchain/community/stores/message/firestore";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
import admin from "firebase-admin";
const memory = new BufferMemory({
chatHistory: new FirestoreChatMessageHistory({
collections: ["langchain"],
docs: ["lc-example"],
sessionId: "lc-example-id",
userId: "a@example.com",
config: {
projectId: "YOUR-PROJECT-ID",
credential: admin.credential.cert({
projectId: "YOUR-PROJECT-ID",
privateKey:
"-----BEGIN PRIVATE KEY-----\nnCHANGE-ME\n-----END PRIVATE KEY-----\n",
clientEmail: "CHANGE-ME@CHANGE-ME-TOO.iam.gserviceaccount.com",
}),
},
}),
});
const model = new ChatOpenAI();
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{ res1: { text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?" } }
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{ res1: { text: "You said your name was Jim." } }
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/getting_started.ts | /* eslint-disable import/first */
/* eslint-disable import/no-duplicates */
import { BufferMemory } from "langchain/memory";
import { HumanMessage, AIMessage } from "@langchain/core/messages";
const memory = new BufferMemory();
await memory.chatHistory.addMessage(new HumanMessage("Hi!"));
await memory.chatHistory.addMessage(new AIMessage("What's up?"));
console.log(await memory.loadMemoryVariables({}));
const memory2 = new BufferMemory({
memoryKey: "chat_history",
});
await memory2.chatHistory.addMessage(new HumanMessage("Hi!"));
await memory2.chatHistory.addMessage(new AIMessage("What's up?"));
console.log(await memory2.loadMemoryVariables({}));
const messageMemory = new BufferMemory({
returnMessages: true,
});
await messageMemory.chatHistory.addMessage(new HumanMessage("Hi!"));
await messageMemory.chatHistory.addMessage(new AIMessage("What's up?"));
console.log(await messageMemory.loadMemoryVariables({}));
import { OpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
import { LLMChain } from "langchain/chains";
const llm = new OpenAI({ temperature: 0 });
// Notice that a "chat_history" variable is present in the prompt template
const template = `You are a nice chatbot having a conversation with a human.
Previous conversation:
{chat_history}
New human question: {question}
Response:`;
const prompt = PromptTemplate.fromTemplate(template);
// Notice that we need to align the `memoryKey` with the variable in the prompt
const stringPromptMemory = new BufferMemory({ memoryKey: "chat_history" });
const conversationChain = new LLMChain({
llm,
prompt,
verbose: true,
memory: stringPromptMemory,
});
console.log(await conversationChain.invoke({ question: "What is your name?" }));
console.log(
await conversationChain.invoke({ question: "What did I just ask you?" })
);
import { ChatOpenAI } from "@langchain/openai";
import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
const chatModel = new ChatOpenAI({ temperature: 0 });
const chatPrompt = ChatPromptTemplate.fromMessages([
["system", "You are a nice chatbot having a conversation with a human."],
new MessagesPlaceholder("chat_history"),
["human", "{question}"],
]);
const chatPromptMemory = new BufferMemory({
memoryKey: "chat_history",
returnMessages: true,
});
const chatConversationChain = new LLMChain({
llm: chatModel,
prompt: chatPrompt,
verbose: true,
memory: chatPromptMemory,
});
console.log(
await chatConversationChain.invoke({ question: "What is your name?" })
);
console.log(
await chatConversationChain.invoke({ question: "What did I just ask you?" })
);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/postgres.ts | import pg from "pg";
import { PostgresChatMessageHistory } from "@langchain/community/stores/message/postgres";
import { ChatOpenAI } from "@langchain/openai";
import { RunnableWithMessageHistory } from "@langchain/core/runnables";
import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
const poolConfig = {
host: "127.0.0.1",
port: 5432,
user: "myuser",
password: "ChangeMe",
database: "api",
};
const pool = new pg.Pool(poolConfig);
const model = new ChatOpenAI();
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant. Answer all questions to the best of your ability.",
],
new MessagesPlaceholder("chat_history"),
["human", "{input}"],
]);
const chain = prompt.pipe(model).pipe(new StringOutputParser());
const chainWithHistory = new RunnableWithMessageHistory({
runnable: chain,
inputMessagesKey: "input",
historyMessagesKey: "chat_history",
getMessageHistory: async (sessionId) => {
const chatHistory = new PostgresChatMessageHistory({
sessionId,
pool,
// Can also pass `poolConfig` to initialize the pool internally,
// but easier to call `.end()` at the end later.
});
return chatHistory;
},
});
const res1 = await chainWithHistory.invoke(
{
input: "Hi! I'm MJDeligan.",
},
{ configurable: { sessionId: "langchain-test-session" } }
);
console.log(res1);
/*
"Hello MJDeligan! It's nice to meet you. My name is AI. How may I assist you today?"
*/
const res2 = await chainWithHistory.invoke(
{ input: "What did I just say my name was?" },
{ configurable: { sessionId: "langchain-test-session" } }
);
console.log(res2);
/*
"You said your name was MJDeligan."
*/
// If you provided a pool config you should close the created pool when you are done
await pool.end();
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/motorhead.ts | import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
import { MotorheadMemory } from "@langchain/community/memory/motorhead_memory";
// Managed Example (visit https://getmetal.io to get your keys)
// const managedMemory = new MotorheadMemory({
// memoryKey: "chat_history",
// sessionId: "test",
// apiKey: "MY_API_KEY",
// clientId: "MY_CLIENT_ID",
// });
// Self Hosted Example
const memory = new MotorheadMemory({
memoryKey: "chat_history",
sessionId: "test",
url: "localhost:8080", // Required for self hosted
});
const model = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
});
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/cloudflare_d1.ts | import type { D1Database } from "@cloudflare/workers-types";
import { BufferMemory } from "langchain/memory";
import { CloudflareD1MessageHistory } from "@langchain/cloudflare";
import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
import { RunnableSequence } from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";
import { ChatAnthropic } from "@langchain/anthropic";
export interface Env {
DB: D1Database;
ANTHROPIC_API_KEY: string;
}
export default {
async fetch(request: Request, env: Env): Promise<Response> {
try {
const { searchParams } = new URL(request.url);
const input = searchParams.get("input");
if (!input) {
throw new Error(`Missing "input" parameter`);
}
const memory = new BufferMemory({
returnMessages: true,
chatHistory: new CloudflareD1MessageHistory({
tableName: "stored_message",
sessionId: "example",
database: env.DB,
}),
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful chatbot"],
new MessagesPlaceholder("history"),
["human", "{input}"],
]);
const model = new ChatAnthropic({
apiKey: env.ANTHROPIC_API_KEY,
});
const chain = RunnableSequence.from([
{
input: (initialInput) => initialInput.input,
memory: () => memory.loadMemoryVariables({}),
},
{
input: (previousOutput) => previousOutput.input,
history: (previousOutput) => previousOutput.memory.history,
},
prompt,
model,
new StringOutputParser(),
]);
const chainInput = { input };
const res = await chain.invoke(chainInput);
await memory.saveContext(chainInput, {
output: res,
});
return new Response(JSON.stringify(res), {
headers: { "content-type": "application/json" },
});
} catch (err: any) {
console.log(err.message);
return new Response(err.message, { status: 500 });
}
},
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/buffer.ts | import { OpenAI } from "@langchain/openai";
import { BufferMemory } from "langchain/memory";
import { LLMChain } from "langchain/chains";
import { PromptTemplate } from "@langchain/core/prompts";
const memory = new BufferMemory({ memoryKey: "chat_history" });
const model = new OpenAI({ temperature: 0.9 });
const prompt =
PromptTemplate.fromTemplate(`The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{chat_history}
Human: {input}
AI:`);
const chain = new LLMChain({ llm: model, prompt, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
const res2 = await chain.invoke({ input: "What's my name?" });
console.log({ res2 });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/vector_store.ts | import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { VectorStoreRetrieverMemory } from "langchain/memory";
import { LLMChain } from "langchain/chains";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { PromptTemplate } from "@langchain/core/prompts";
const vectorStore = new MemoryVectorStore(new OpenAIEmbeddings());
const memory = new VectorStoreRetrieverMemory({
// 1 is how many documents to return, you might want to return more, eg. 4
vectorStoreRetriever: vectorStore.asRetriever(1),
memoryKey: "history",
});
// First let's save some information to memory, as it would happen when
// used inside a chain.
await memory.saveContext(
{ input: "My favorite food is pizza" },
{ output: "thats good to know" }
);
await memory.saveContext(
{ input: "My favorite sport is soccer" },
{ output: "..." }
);
await memory.saveContext({ input: "I don't the Celtics" }, { output: "ok" });
// Now let's use the memory to retrieve the information we saved.
console.log(
await memory.loadMemoryVariables({ prompt: "what sport should i watch?" })
);
/*
{ history: 'input: My favorite sport is soccer\noutput: ...' }
*/
// Now let's use it in a chain.
const model = new OpenAI({ temperature: 0.9 });
const prompt =
PromptTemplate.fromTemplate(`The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Relevant pieces of previous conversation:
{history}
(You do not need to use these pieces of information if not relevant)
Current conversation:
Human: {input}
AI:`);
const chain = new LLMChain({ llm: model, prompt, memory });
const res1 = await chain.invoke({ input: "Hi, my name is Perry, what's up?" });
console.log({ res1 });
/*
{
res1: {
text: " Hi Perry, I'm doing great! I'm currently exploring different topics related to artificial intelligence like natural language processing and machine learning. What about you? What have you been up to lately?"
}
}
*/
const res2 = await chain.invoke({ input: "what's my favorite sport?" });
console.log({ res2 });
/*
{ res2: { text: ' You said your favorite sport is soccer.' } }
*/
const res3 = await chain.invoke({ input: "what's my name?" });
console.log({ res3 });
/*
{ res3: { text: ' Your name is Perry.' } }
*/
// Sometimes we might want to save metadata along with the conversation snippets
const memoryWithMetadata = new VectorStoreRetrieverMemory({
vectorStoreRetriever: vectorStore.asRetriever(
1,
(doc) => doc.metadata?.userId === "1"
),
memoryKey: "history",
metadata: { userId: "1", groupId: "42" },
});
await memoryWithMetadata.saveContext(
{ input: "Community is my favorite TV Show" },
{ output: "6 seasons and a movie!" }
);
console.log(
await memoryWithMetadata.loadMemoryVariables({
prompt: "what show should i watch? ",
})
);
/*
{ history: 'input: Community is my favorite TV Show\noutput: 6 seasons and a movie!' }
*/
// If we have a retriever whose filter does not match our metadata, our previous messages won't appear
const memoryWithoutMatchingMetadata = new VectorStoreRetrieverMemory({
vectorStoreRetriever: vectorStore.asRetriever(
1,
(doc) => doc.metadata?.userId === "2"
),
memoryKey: "history",
});
// There are no messages saved for userId 2
console.log(
await memoryWithoutMatchingMetadata.loadMemoryVariables({
prompt: "what show should i watch? ",
})
);
/*
{ history: '' }
*/
// If we need the metadata to be dynamic, we can pass a function instead
const memoryWithMetadataFunction = new VectorStoreRetrieverMemory({
vectorStoreRetriever: vectorStore.asRetriever(1),
memoryKey: "history",
metadata: (inputValues, _outputValues) => ({
firstWord: inputValues?.input.split(" ")[0], // First word of the input
createdAt: new Date().toLocaleDateString(), // Date when the message was saved
userId: "1", // Hardcoded userId
}),
});
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/entity.ts | import { OpenAI } from "@langchain/openai";
import {
EntityMemory,
ENTITY_MEMORY_CONVERSATION_TEMPLATE,
} from "langchain/memory";
import { LLMChain } from "langchain/chains";
export const run = async () => {
const memory = new EntityMemory({
llm: new OpenAI({ temperature: 0 }),
chatHistoryKey: "history", // Default value
entitiesKey: "entities", // Default value
});
const model = new OpenAI({ temperature: 0.9 });
const chain = new LLMChain({
llm: model,
prompt: ENTITY_MEMORY_CONVERSATION_TEMPLATE, // Default prompt - must include the set chatHistoryKey and entitiesKey as input variables.
memory,
});
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({
res1,
memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }),
});
const res2 = await chain.invoke({
input: "I work in construction. What about you?",
});
console.log({
res2,
memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }),
});
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/zep_cloud.ts | import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
import { ZepCloudMemory } from "@langchain/community/memory/zep_cloud";
import { randomUUID } from "crypto";
const sessionId = randomUUID(); // This should be unique for each user or each user's session.
const memory = new ZepCloudMemory({
sessionId,
// Your Zep Cloud Project API key https://help.getzep.com/projects
apiKey: "<Zep Api Key>",
});
const model = new ChatOpenAI({
modelName: "gpt-3.5-turbo",
temperature: 0,
});
const chain = new ConversationChain({ llm: model, memory });
console.log("Memory Keys:", memory.memoryKeys);
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
console.log("Session ID: ", sessionId);
console.log("Memory: ", await memory.loadMemoryVariables({}));
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/summary_buffer.ts | import { OpenAI, ChatOpenAI } from "@langchain/openai";
import { ConversationSummaryBufferMemory } from "langchain/memory";
import { ConversationChain } from "langchain/chains";
import {
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
} from "@langchain/core/prompts";
// summary buffer memory
const memory = new ConversationSummaryBufferMemory({
llm: new OpenAI({ model: "gpt-3.5-turbo-instruct", temperature: 0 }),
maxTokenLimit: 10,
});
await memory.saveContext({ input: "hi" }, { output: "whats up" });
await memory.saveContext({ input: "not much you" }, { output: "not much" });
const history = await memory.loadMemoryVariables({});
console.log({ history });
/*
{
history: {
history: 'System: \n' +
'The human greets the AI, to which the AI responds.\n' +
'Human: not much you\n' +
'AI: not much'
}
}
*/
// We can also get the history as a list of messages (this is useful if you are using this with a chat prompt).
const chatPromptMemory = new ConversationSummaryBufferMemory({
llm: new ChatOpenAI({ model: "gpt-3.5-turbo", temperature: 0 }),
maxTokenLimit: 10,
returnMessages: true,
});
await chatPromptMemory.saveContext({ input: "hi" }, { output: "whats up" });
await chatPromptMemory.saveContext(
{ input: "not much you" },
{ output: "not much" }
);
// We can also utilize the predict_new_summary method directly.
const messages = await chatPromptMemory.chatHistory.getMessages();
const previous_summary = "";
const predictSummary = await chatPromptMemory.predictNewSummary(
messages,
previous_summary
);
console.log(JSON.stringify(predictSummary));
// Using in a chain
// Let's walk through an example, again setting verbose to true so we can see the prompt.
const chatPrompt = ChatPromptTemplate.fromMessages([
SystemMessagePromptTemplate.fromTemplate(
"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know."
),
new MessagesPlaceholder("history"),
HumanMessagePromptTemplate.fromTemplate("{input}"),
]);
const model = new ChatOpenAI({ temperature: 0.9, verbose: true });
const chain = new ConversationChain({
llm: model,
memory: chatPromptMemory,
prompt: chatPrompt,
});
const res1 = await chain.invoke({ input: "Hi, what's up?" });
console.log({ res1 });
/*
{
res1: 'Hello! I am an AI language model, always ready to have a conversation. How can I assist you today?'
}
*/
const res2 = await chain.invoke({
input: "Just working on writing some documentation!",
});
console.log({ res2 });
/*
{
res2: "That sounds productive! Documentation is an important aspect of many projects. Is there anything specific you need assistance with regarding your documentation? I'm here to help!"
}
*/
const res3 = await chain.invoke({
input: "For LangChain! Have you heard of it?",
});
console.log({ res3 });
/*
{
res3: 'Yes, I am familiar with LangChain! It is a blockchain-based language learning platform that aims to connect language learners with native speakers for real-time practice and feedback. It utilizes smart contracts to facilitate secure transactions and incentivize participation. Users can earn tokens by providing language learning services or consuming them for language lessons.'
}
*/
const res4 = await chain.invoke({
input:
"That's not the right one, although a lot of people confuse it for that!",
});
console.log({ res4 });
/*
{
res4: "I apologize for the confusion! Could you please provide some more information about the LangChain you're referring to? That way, I can better understand and assist you with writing documentation for it."
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/astradb.ts | import { RunnableWithMessageHistory } from "@langchain/core/runnables";
import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
import { ChatOpenAI } from "@langchain/openai";
import { AstraDBChatMessageHistory } from "@langchain/community/stores/message/astradb";
const model = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
});
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant. Answer all questions to the best of your ability.",
],
new MessagesPlaceholder("chat_history"),
["human", "{input}"],
]);
const chain = prompt.pipe(model).pipe(new StringOutputParser());
const chainWithHistory = new RunnableWithMessageHistory({
runnable: chain,
inputMessagesKey: "input",
historyMessagesKey: "chat_history",
getMessageHistory: async (sessionId) => {
const chatHistory = await AstraDBChatMessageHistory.initialize({
token: process.env.ASTRA_DB_APPLICATION_TOKEN as string,
endpoint: process.env.ASTRA_DB_ENDPOINT as string,
namespace: process.env.ASTRA_DB_NAMESPACE,
collectionName: "YOUR_COLLECTION_NAME",
sessionId,
});
return chatHistory;
},
});
const res1 = await chainWithHistory.invoke(
{
input: "Hi! I'm Jim.",
},
{ configurable: { sessionId: "langchain-test-session" } }
);
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chainWithHistory.invoke(
{ input: "What did I just say my name was?" },
{ configurable: { sessionId: "langchain-test-session" } }
);
console.log({ res2 });
/*
{
res2: {
text: "You said your name was Jim."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/mongodb.ts | import { MongoClient, ObjectId } from "mongodb";
import { BufferMemory } from "langchain/memory";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
import { MongoDBChatMessageHistory } from "@langchain/mongodb";
const client = new MongoClient(process.env.MONGODB_ATLAS_URI || "", {
driverInfo: { name: "langchainjs" },
});
await client.connect();
const collection = client.db("langchain").collection("memory");
// generate a new sessionId string
const sessionId = new ObjectId().toString();
const memory = new BufferMemory({
chatHistory: new MongoDBChatMessageHistory({
collection,
sessionId,
}),
});
const model = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
});
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
// See the chat history in the MongoDb
console.log(await memory.chatHistory.getMessages());
// clear chat history
await memory.chatHistory.clear();
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/redis-advanced.ts | import { Redis } from "ioredis";
import { BufferMemory } from "langchain/memory";
import { RedisChatMessageHistory } from "@langchain/community/stores/message/ioredis";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
const client = new Redis("redis://localhost:6379");
const memory = new BufferMemory({
chatHistory: new RedisChatMessageHistory({
sessionId: new Date().toISOString(),
sessionTTL: 300,
client,
}),
});
const model = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
});
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/planetscale.ts | import { BufferMemory } from "langchain/memory";
import { PlanetScaleChatMessageHistory } from "@langchain/community/stores/message/planetscale";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
const memory = new BufferMemory({
chatHistory: new PlanetScaleChatMessageHistory({
tableName: "stored_message",
sessionId: "lc-example",
config: {
url: "ADD_YOURS_HERE", // Override with your own database instance's URL
},
}),
});
const model = new ChatOpenAI();
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/momento.ts | import {
CacheClient,
Configurations,
CredentialProvider,
} from "@gomomento/sdk"; // `from "gomomento/sdk-web";` for browser/edge
import { BufferMemory } from "langchain/memory";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
import { MomentoChatMessageHistory } from "@langchain/community/stores/message/momento";
// See https://github.com/momentohq/client-sdk-javascript for connection options
const client = new CacheClient({
configuration: Configurations.Laptop.v1(),
credentialProvider: CredentialProvider.fromEnvironmentVariable({
environmentVariableName: "MOMENTO_API_KEY",
}),
defaultTtlSeconds: 60 * 60 * 24,
});
// Create a unique session ID
const sessionId = new Date().toISOString();
const cacheName = "langchain";
const memory = new BufferMemory({
chatHistory: await MomentoChatMessageHistory.fromProps({
client,
cacheName,
sessionId,
sessionTtl: 300,
}),
});
console.log(
`cacheName=${cacheName} and sessionId=${sessionId} . This will be used to store the chat history. You can inspect the values at your Momento console at https://console.gomomento.com.`
);
const model = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
});
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
// See the chat history in the Momento
console.log(await memory.chatHistory.getMessages());
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/combined.ts | import { ChatOpenAI } from "@langchain/openai";
import {
BufferMemory,
CombinedMemory,
ConversationSummaryMemory,
} from "langchain/memory";
import { ConversationChain } from "langchain/chains";
import { PromptTemplate } from "@langchain/core/prompts";
// buffer memory
const bufferMemory = new BufferMemory({
memoryKey: "chat_history_lines",
inputKey: "input",
});
// summary memory
const summaryMemory = new ConversationSummaryMemory({
llm: new ChatOpenAI({ model: "gpt-3.5-turbo", temperature: 0 }),
inputKey: "input",
memoryKey: "conversation_summary",
});
//
const memory = new CombinedMemory({
memories: [bufferMemory, summaryMemory],
});
const _DEFAULT_TEMPLATE = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Summary of conversation:
{conversation_summary}
Current conversation:
{chat_history_lines}
Human: {input}
AI:`;
const PROMPT = new PromptTemplate({
inputVariables: ["input", "conversation_summary", "chat_history_lines"],
template: _DEFAULT_TEMPLATE,
});
const model = new ChatOpenAI({ temperature: 0.9, verbose: true });
const chain = new ConversationChain({ llm: model, memory, prompt: PROMPT });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
response: "Hello Jim! It's nice to meet you. How can I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "Can you tell me a joke?" });
console.log({ res2 });
/*
{
res2: {
response: 'Why did the scarecrow win an award? Because he was outstanding in his field!'
}
}
*/
const res3 = await chain.invoke({
input: "What's my name and what joke did you just tell?",
});
console.log({ res3 });
/*
{
res3: {
response: 'Your name is Jim. The joke I just told was about a scarecrow winning an award because he was outstanding in his field.'
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/firestore_nested.ts | import { BufferMemory } from "langchain/memory";
import { FirestoreChatMessageHistory } from "@langchain/community/stores/message/firestore";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
import admin from "firebase-admin";
const memory = new BufferMemory({
chatHistory: new FirestoreChatMessageHistory({
collections: ["chats", "bots"],
docs: ["chat-id", "bot-id"],
sessionId: "user-id",
userId: "a@example.com",
config: {
projectId: "YOUR-PROJECT-ID",
credential: admin.credential.cert({
projectId: "YOUR-PROJECT-ID",
privateKey:
"-----BEGIN PRIVATE KEY-----\nnCHANGE-ME\n-----END PRIVATE KEY-----\n",
clientEmail: "CHANGE-ME@CHANGE-ME-TOO.iam.gserviceaccount.com",
}),
},
}),
});
const model = new ChatOpenAI();
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{ res1: { response: 'Hello Jim! How can I assist you today?' } }
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{ res2: { response: 'You just said that your name is Jim.' } }
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/token_buffer.ts | import { OpenAI } from "@langchain/openai";
import { ConversationTokenBufferMemory } from "langchain/memory";
const model = new OpenAI({});
const memory = new ConversationTokenBufferMemory({
llm: model,
maxTokenLimit: 10,
});
await memory.saveContext({ input: "hi" }, { output: "whats up" });
await memory.saveContext({ input: "not much you" }, { output: "not much" });
const result1 = await memory.loadMemoryVariables({});
console.log(result1);
/*
{ history: 'Human: not much you\nAI: not much' }
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/azure_cosmosdb_nosql.ts | import { ChatOpenAI } from "@langchain/openai";
import { AzureCosmsosDBNoSQLChatMessageHistory } from "@langchain/azure-cosmosdb";
import { RunnableWithMessageHistory } from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";
import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
const model = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
});
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant. Answer all questions to the best of your ability.",
],
new MessagesPlaceholder("chat_history"),
["human", "{input}"],
]);
const chain = prompt.pipe(model).pipe(new StringOutputParser());
const chainWithHistory = new RunnableWithMessageHistory({
runnable: chain,
inputMessagesKey: "input",
historyMessagesKey: "chat_history",
getMessageHistory: async (sessionId) => {
const chatHistory = new AzureCosmsosDBNoSQLChatMessageHistory({
sessionId,
userId: "user-id",
databaseName: "DATABASE_NAME",
containerName: "CONTAINER_NAME",
});
return chatHistory;
},
});
const res1 = await chainWithHistory.invoke(
{ input: "Hi! I'm Jim." },
{ configurable: { sessionId: "langchain-test-session" } }
);
console.log({ res1 });
/*
{ res1: 'Hi Jim! How can I assist you today?' }
*/
const res2 = await chainWithHistory.invoke(
{ input: "What did I just say my name was?" },
{ configurable: { sessionId: "langchain-test-session" } }
);
console.log({ res2 });
/*
{ res2: { response: 'You said your name was Jim.' }
*/
// Give this session a title
const chatHistory = (await chainWithHistory.getMessageHistory(
"langchain-test-session"
)) as AzureCosmsosDBNoSQLChatMessageHistory;
await chatHistory.setContext({ title: "Introducing Jim" });
// List all session for the user
const sessions = await chatHistory.getAllSessions();
console.log(sessions);
/*
[
{ sessionId: 'langchain-test-session', context: { title: "Introducing Jim" } }
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/upstash_redis_advanced.ts | import { Redis } from "@upstash/redis";
import { BufferMemory } from "langchain/memory";
import { UpstashRedisChatMessageHistory } from "@langchain/community/stores/message/upstash_redis";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
// Create your own Redis client
const client = new Redis({
url: "https://ADD_YOURS_HERE.upstash.io",
token: "********",
});
const memory = new BufferMemory({
chatHistory: new UpstashRedisChatMessageHistory({
sessionId: new Date().toISOString(),
sessionTTL: 300,
client, // You can reuse your existing Redis client
}),
});
const model = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
});
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/summary_chat.ts | import { ChatOpenAI } from "@langchain/openai";
import { ConversationSummaryMemory } from "langchain/memory";
import { LLMChain } from "langchain/chains";
import { PromptTemplate } from "@langchain/core/prompts";
export const run = async () => {
const memory = new ConversationSummaryMemory({
memoryKey: "chat_history",
llm: new ChatOpenAI({ model: "gpt-3.5-turbo", temperature: 0 }),
});
const model = new ChatOpenAI();
const prompt =
PromptTemplate.fromTemplate(`The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{chat_history}
Human: {input}
AI:`);
const chain = new LLMChain({ llm: model, prompt, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1, memory: await memory.loadMemoryVariables({}) });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
},
memory: {
chat_history: 'Jim introduces himself to the AI and the AI greets him and offers assistance.'
}
}
*/
const res2 = await chain.invoke({ input: "What's my name?" });
console.log({ res2, memory: await memory.loadMemoryVariables({}) });
/*
{
res2: {
text: "Your name is Jim. It's nice to meet you, Jim. How can I assist you today?"
},
memory: {
chat_history: 'Jim introduces himself to the AI and the AI greets him and offers assistance. The AI addresses Jim by name and asks how it can assist him.'
}
}
*/
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/file.ts | import { ChatOpenAI } from "@langchain/openai";
import { FileSystemChatMessageHistory } from "@langchain/community/stores/message/file_system";
import { RunnableWithMessageHistory } from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";
import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
const model = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
});
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant. Answer all questions to the best of your ability.",
],
new MessagesPlaceholder("chat_history"),
["human", "{input}"],
]);
const chain = prompt.pipe(model).pipe(new StringOutputParser());
const chainWithHistory = new RunnableWithMessageHistory({
runnable: chain,
inputMessagesKey: "input",
historyMessagesKey: "chat_history",
getMessageHistory: async (sessionId) => {
const chatHistory = new FileSystemChatMessageHistory({
sessionId,
userId: "user-id",
});
return chatHistory;
},
});
const res1 = await chainWithHistory.invoke(
{ input: "Hi! I'm Jim." },
{ configurable: { sessionId: "langchain-test-session" } }
);
console.log({ res1 });
/*
{ res1: 'Hi Jim! How can I assist you today?' }
*/
const res2 = await chainWithHistory.invoke(
{ input: "What did I just say my name was?" },
{ configurable: { sessionId: "langchain-test-session" } }
);
console.log({ res2 });
/*
{ res2: { response: 'You said your name was Jim.' }
*/
// Give this session a title
const chatHistory = (await chainWithHistory.getMessageHistory(
"langchain-test-session"
)) as FileSystemChatMessageHistory;
await chatHistory.setContext({ title: "Introducing Jim" });
// List all session for the user
const sessions = await chatHistory.getAllSessions();
console.log(sessions);
/*
[
{ sessionId: 'langchain-test-session', context: { title: "Introducing Jim" } }
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/xata.ts | import { BufferMemory } from "langchain/memory";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
import { XataChatMessageHistory } from "@langchain/community/stores/message/xata";
import { BaseClient } from "@xata.io/client";
// if you use the generated client, you don't need this function.
// Just import getXataClient from the generated xata.ts instead.
const getXataClient = () => {
if (!process.env.XATA_API_KEY) {
throw new Error("XATA_API_KEY not set");
}
if (!process.env.XATA_DB_URL) {
throw new Error("XATA_DB_URL not set");
}
const xata = new BaseClient({
databaseURL: process.env.XATA_DB_URL,
apiKey: process.env.XATA_API_KEY,
branch: process.env.XATA_BRANCH || "main",
});
return xata;
};
const memory = new BufferMemory({
chatHistory: new XataChatMessageHistory({
table: "messages",
sessionId: new Date().toISOString(), // Or some other unique identifier for the conversation
client: getXataClient(),
apiKey: process.env.XATA_API_KEY, // The API key is needed for creating the table.
}),
});
const model = new ChatOpenAI();
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/planetscale_advanced.ts | import { BufferMemory } from "langchain/memory";
import { PlanetScaleChatMessageHistory } from "@langchain/community/stores/message/planetscale";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
import { Client } from "@planetscale/database";
// Create your own Planetscale database client
const client = new Client({
url: "ADD_YOURS_HERE", // Override with your own database instance's URL
});
const memory = new BufferMemory({
chatHistory: new PlanetScaleChatMessageHistory({
tableName: "stored_message",
sessionId: "lc-example",
client, // You can reuse your existing database client
}),
});
const model = new ChatOpenAI();
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/upstash_redis.ts | import { BufferMemory } from "langchain/memory";
import { UpstashRedisChatMessageHistory } from "@langchain/community/stores/message/upstash_redis";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
const memory = new BufferMemory({
chatHistory: new UpstashRedisChatMessageHistory({
sessionId: new Date().toISOString(), // Or some other unique identifier for the conversation
sessionTTL: 300, // 5 minutes, omit this parameter to make sessions never expire
config: {
url: "https://ADD_YOURS_HERE.upstash.io", // Override with your own instance's URL
token: "********", // Override with your own instance's token
},
}),
});
const model = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
});
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/chat_messages.ts | import { HumanMessage, AIMessage } from "@langchain/core/messages";
import { ChatMessageHistory } from "@langchain/community/stores/message/in_memory";
const history = new ChatMessageHistory();
await history.addMessage(new HumanMessage("hi"));
await history.addMessage(new AIMessage("what is up?"));
console.log(await history.getMessages());
/*
[
HumanMessage {
content: 'hi',
additional_kwargs: {}
},
AIMessage {
content: 'what is up?',
additional_kwargs: {}
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/entity_memory_inspection.ts | import { OpenAI } from "@langchain/openai";
import {
EntityMemory,
ENTITY_MEMORY_CONVERSATION_TEMPLATE,
} from "langchain/memory";
import { LLMChain } from "langchain/chains";
const memory = new EntityMemory({
llm: new OpenAI({ temperature: 0 }),
});
const model = new OpenAI({ temperature: 0.9 });
const chain = new LLMChain({
llm: model,
prompt: ENTITY_MEMORY_CONVERSATION_TEMPLATE,
memory,
});
await chain.invoke({ input: "Hi! I'm Jim." });
await chain.invoke({
input: "I work in sales. What about you?",
});
const res = await chain.invoke({
input: "My office is the Utica branch of Dunder Mifflin. What about you?",
});
console.log({
res,
memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }),
});
/*
{
res: "As an AI language model, I don't have an office in the traditional sense. I exist entirely in digital space and am here to assist you with any questions or tasks you may have. Is there anything specific you need help with regarding your work at the Utica branch of Dunder Mifflin?",
memory: {
entities: {
Jim: 'Jim is a human named Jim who works in sales.',
Utica: 'Utica is the location of the branch of Dunder Mifflin where Jim works.',
'Dunder Mifflin': 'Dunder Mifflin has a branch in Utica.'
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/dynamodb-store.ts | import { BufferMemory } from "langchain/memory";
import { DynamoDBChatMessageHistory } from "@langchain/community/stores/message/dynamodb";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
const memory = new BufferMemory({
chatHistory: new DynamoDBChatMessageHistory({
tableName: "langchain",
partitionKey: "id",
sessionId: new Date().toISOString(), // Or some other unique identifier for the conversation
config: {
region: "us-east-2",
credentials: {
accessKeyId: "<your AWS access key id>",
secretAccessKey: "<your AWS secret access key>",
},
},
}),
});
const model = new ChatOpenAI();
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/summary_llm.ts | import { OpenAI } from "@langchain/openai";
import { ConversationSummaryMemory } from "langchain/memory";
import { LLMChain } from "langchain/chains";
import { PromptTemplate } from "@langchain/core/prompts";
export const run = async () => {
const memory = new ConversationSummaryMemory({
memoryKey: "chat_history",
llm: new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 }),
});
const model = new OpenAI({ temperature: 0.9 });
const prompt =
PromptTemplate.fromTemplate(`The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{chat_history}
Human: {input}
AI:`);
const chain = new LLMChain({ llm: model, prompt, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1, memory: await memory.loadMemoryVariables({}) });
/*
{
res1: {
text: " Hi Jim, I'm AI! It's nice to meet you. I'm an AI programmed to provide information about the environment around me. Do you have any specific questions about the area that I can answer for you?"
},
memory: {
chat_history: 'Jim introduces himself to the AI and the AI responds, introducing itself as a program designed to provide information about the environment. The AI offers to answer any specific questions Jim may have about the area.'
}
}
*/
const res2 = await chain.invoke({ input: "What's my name?" });
console.log({ res2, memory: await memory.loadMemoryVariables({}) });
/*
{
res2: { text: ' You told me your name is Jim.' },
memory: {
chat_history: 'Jim introduces himself to the AI and the AI responds, introducing itself as a program designed to provide information about the environment. The AI offers to answer any specific questions Jim may have about the area. Jim asks the AI what his name is, and the AI responds that Jim had previously told it his name.'
}
}
*/
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/redis.ts | import { BufferMemory } from "langchain/memory";
import { RedisChatMessageHistory } from "@langchain/redis";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
const memory = new BufferMemory({
chatHistory: new RedisChatMessageHistory({
sessionId: new Date().toISOString(), // Or some other unique identifier for the conversation
sessionTTL: 300, // 5 minutes, omit this parameter to make sessions never expire
}),
});
const model = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
});
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/buffer_window.ts | import { OpenAI } from "@langchain/openai";
import { BufferWindowMemory } from "langchain/memory";
import { LLMChain } from "langchain/chains";
import { PromptTemplate } from "@langchain/core/prompts";
export const run = async () => {
const memory = new BufferWindowMemory({ memoryKey: "chat_history", k: 1 });
const model = new OpenAI({ temperature: 0.9 });
const template = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{chat_history}
Human: {input}
AI:`;
const prompt = PromptTemplate.fromTemplate(template);
const chain = new LLMChain({ llm: model, prompt, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
const res2 = await chain.invoke({ input: "What's my name?" });
console.log({ res2 });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/cassandra-store.ts | import { BufferMemory } from "langchain/memory";
import { CassandraChatMessageHistory } from "@langchain/community/stores/message/cassandra";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
// The example below uses Astra DB, but you can use any Cassandra connection
const configConnection = {
serviceProviderArgs: {
astra: {
token: "<your Astra Token>" as string,
endpoint: "<your Astra Endpoint>" as string,
},
},
};
const memory = new BufferMemory({
chatHistory: new CassandraChatMessageHistory({
...configConnection,
keyspace: "langchain",
table: "message_history",
sessionId: "<some unique session identifier>",
}),
});
const model = new ChatOpenAI();
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jonathan." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jonathan! How can I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jonathan."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/memory/xata-advanced.ts | import { BufferMemory } from "langchain/memory";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
import { XataChatMessageHistory } from "@langchain/community/stores/message/xata";
import { BaseClient } from "@xata.io/client";
// Before running this example, see the docs at
// https://js.langchain.com/docs/modules/memory/integrations/xata
// if you use the generated client, you don't need this function.
// Just import getXataClient from the generated xata.ts instead.
const getXataClient = () => {
if (!process.env.XATA_API_KEY) {
throw new Error("XATA_API_KEY not set");
}
if (!process.env.XATA_DB_URL) {
throw new Error("XATA_DB_URL not set");
}
const xata = new BaseClient({
databaseURL: process.env.XATA_DB_URL,
apiKey: process.env.XATA_API_KEY,
branch: process.env.XATA_BRANCH || "main",
});
return xata;
};
const memory = new BufferMemory({
chatHistory: new XataChatMessageHistory({
table: "messages",
sessionId: new Date().toISOString(), // Or some other unique identifier for the conversation
client: getXataClient(),
createTable: false, // Explicitly set to false if the table is already created
}),
});
const model = new ChatOpenAI();
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({ input: "What did I just say my name was?" });
console.log({ res2 });
/*
{
res1: {
text: "You said your name was Jim."
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/memory | lc_public_repos/langchainjs/examples/src/memory/convex/convex.ts | "use node";
import { v } from "convex/values";
import { BufferMemory } from "langchain/memory";
import { ChatOpenAI } from "@langchain/openai";
import { ConversationChain } from "langchain/chains";
import { ConvexChatMessageHistory } from "@langchain/community/stores/message/convex";
import { action } from "./_generated/server.js";
export const ask = action({
args: { sessionId: v.string() },
handler: async (ctx, args) => {
// pass in a sessionId string
const { sessionId } = args;
const memory = new BufferMemory({
chatHistory: new ConvexChatMessageHistory({ sessionId, ctx }),
});
const model = new ChatOpenAI({
model: "gpt-3.5-turbo",
temperature: 0,
});
const chain = new ConversationChain({ llm: model, memory });
const res1 = await chain.invoke({ input: "Hi! I'm Jim." });
console.log({ res1 });
/*
{
res1: {
text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
}
}
*/
const res2 = await chain.invoke({
input: "What did I just say my name was?",
});
console.log({ res2 });
/*
{
res2: {
text: "You said your name was Jim."
}
}
*/
// See the chat history in the Convex database
console.log(await memory.chatHistory.getMessages());
// clear chat history
await memory.chatHistory.clear();
},
});
|
0 | lc_public_repos/langchainjs/examples/src/memory/convex | lc_public_repos/langchainjs/examples/src/memory/convex/_generated/server.d.ts | /* eslint-disable */
/**
* Generated utilities for implementing server-side Convex query and mutation functions.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import {
ActionBuilder,
HttpActionBuilder,
MutationBuilder,
QueryBuilder,
GenericActionCtx,
GenericMutationCtx,
GenericQueryCtx,
GenericDatabaseReader,
GenericDatabaseWriter,
} from "convex/server";
import type { DataModel } from "./dataModel.js";
/**
* Define a query in this Convex app's public API.
*
* This function will be allowed to read your Convex database and will be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export declare const query: QueryBuilder<DataModel, "public">;
/**
* Define a query that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to read from your Convex database. It will not be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export declare const internalQuery: QueryBuilder<DataModel, "internal">;
/**
* Define a mutation in this Convex app's public API.
*
* This function will be allowed to modify your Convex database and will be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export declare const mutation: MutationBuilder<DataModel, "public">;
/**
* Define a mutation that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to modify your Convex database. It will not be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export declare const internalMutation: MutationBuilder<DataModel, "internal">;
/**
* Define an action in this Convex app's public API.
*
* An action is a function which can execute any JavaScript code, including non-deterministic
* code and code with side-effects, like calling third-party services.
* They can be run in Convex's JavaScript environment or in Node.js using the "use node" directive.
* They can interact with the database indirectly by calling queries and mutations using the {@link ActionCtx}.
*
* @param func - The action. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped action. Include this as an `export` to name it and make it accessible.
*/
export declare const action: ActionBuilder<DataModel, "public">;
/**
* Define an action that is only accessible from other Convex functions (but not from the client).
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped function. Include this as an `export` to name it and make it accessible.
*/
export declare const internalAction: ActionBuilder<DataModel, "internal">;
/**
* Define an HTTP action.
*
* This function will be used to respond to HTTP requests received by a Convex
* deployment if the requests matches the path and method where this action
* is routed. Be sure to route your action in `convex/http.js`.
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped function. Import this function from `convex/http.js` and route it to hook it up.
*/
export declare const httpAction: HttpActionBuilder;
/**
* A set of services for use within Convex query functions.
*
* The query context is passed as the first argument to any Convex query
* function run on the server.
*
* This differs from the {@link MutationCtx} because all of the services are
* read-only.
*/
export type QueryCtx = GenericQueryCtx<DataModel>;
/**
* A set of services for use within Convex mutation functions.
*
* The mutation context is passed as the first argument to any Convex mutation
* function run on the server.
*/
export type MutationCtx = GenericMutationCtx<DataModel>;
/**
* A set of services for use within Convex action functions.
*
* The action context is passed as the first argument to any Convex action
* function run on the server.
*/
export type ActionCtx = GenericActionCtx<DataModel>;
/**
* An interface to read from the database within Convex query functions.
*
* The two entry points are {@link DatabaseReader.get}, which fetches a single
* document by its {@link Id}, or {@link DatabaseReader.query}, which starts
* building a query.
*/
export type DatabaseReader = GenericDatabaseReader<DataModel>;
/**
* An interface to read from and write to the database within Convex mutation
* functions.
*
* Convex guarantees that all writes within a single mutation are
* executed atomically, so you never have to worry about partial writes leaving
* your data in an inconsistent state. See [the Convex Guide](https://docs.convex.dev/understanding/convex-fundamentals/functions#atomicity-and-optimistic-concurrency-control)
* for the guarantees Convex provides your functions.
*/
export type DatabaseWriter = GenericDatabaseWriter<DataModel>;
|
0 | lc_public_repos/langchainjs/examples/src/memory/convex | lc_public_repos/langchainjs/examples/src/memory/convex/_generated/api.js | /* eslint-disable */
/**
* Generated `api` utility.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import { anyApi } from "convex/server";
/**
* A utility for referencing Convex functions in your app's API.
*
* Usage:
* ```js
* const myFunctionReference = api.myModule.myFunction;
* ```
*/
export const api = anyApi;
export const internal = anyApi;
|
0 | lc_public_repos/langchainjs/examples/src/memory/convex | lc_public_repos/langchainjs/examples/src/memory/convex/_generated/server.js | /* eslint-disable */
/**
* Generated utilities for implementing server-side Convex query and mutation functions.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import {
actionGeneric,
httpActionGeneric,
queryGeneric,
mutationGeneric,
internalActionGeneric,
internalMutationGeneric,
internalQueryGeneric,
} from "convex/server";
/**
* Define a query in this Convex app's public API.
*
* This function will be allowed to read your Convex database and will be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export const query = queryGeneric;
/**
* Define a query that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to read from your Convex database. It will not be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export const internalQuery = internalQueryGeneric;
/**
* Define a mutation in this Convex app's public API.
*
* This function will be allowed to modify your Convex database and will be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export const mutation = mutationGeneric;
/**
* Define a mutation that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to modify your Convex database. It will not be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export const internalMutation = internalMutationGeneric;
/**
* Define an action in this Convex app's public API.
*
* An action is a function which can execute any JavaScript code, including non-deterministic
* code and code with side-effects, like calling third-party services.
* They can be run in Convex's JavaScript environment or in Node.js using the "use node" directive.
* They can interact with the database indirectly by calling queries and mutations using the {@link ActionCtx}.
*
* @param func - The action. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped action. Include this as an `export` to name it and make it accessible.
*/
export const action = actionGeneric;
/**
* Define an action that is only accessible from other Convex functions (but not from the client).
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped function. Include this as an `export` to name it and make it accessible.
*/
export const internalAction = internalActionGeneric;
/**
* Define a Convex HTTP action.
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument, and a `Request` object
* as its second.
* @returns The wrapped endpoint function. Route a URL path to this function in `convex/http.js`.
*/
export const httpAction = httpActionGeneric;
|
0 | lc_public_repos/langchainjs/examples/src/memory/convex | lc_public_repos/langchainjs/examples/src/memory/convex/_generated/dataModel.d.ts | /* eslint-disable */
/**
* Generated data model types.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import { AnyDataModel } from "convex/server";
import type { GenericId } from "convex/values";
/**
* No `schema.ts` file found!
*
* This generated code has permissive types like `Doc = any` because
* Convex doesn't know your schema. If you'd like more type safety, see
* https://docs.convex.dev/using/schemas for instructions on how to add a
* schema file.
*
* After you change a schema, rerun codegen with `npx convex dev`.
*/
/**
* The names of all of your Convex tables.
*/
export type TableNames = string;
/**
* The type of a document stored in Convex.
*/
export type Doc = any;
/**
* An identifier for a document in Convex.
*
* Convex documents are uniquely identified by their `Id`, which is accessible
* on the `_id` field. To learn more, see [Document IDs](https://docs.convex.dev/using/document-ids).
*
* Documents can be loaded using `db.get(id)` in query and mutation functions.
*
* IDs are just strings at runtime, but this type can be used to distinguish them from other
* strings when type checking.
*/
export type Id<TableName extends TableNames = TableNames> =
GenericId<TableName>;
/**
* A type describing your Convex data model.
*
* This type includes information about what tables you have, the type of
* documents stored in those tables, and the indexes defined on them.
*
* This type is used to parameterize methods like `queryGeneric` and
* `mutationGeneric` to make them type-safe.
*/
export type DataModel = AnyDataModel;
|
0 | lc_public_repos/langchainjs/examples/src/memory/convex | lc_public_repos/langchainjs/examples/src/memory/convex/_generated/api.d.ts | /* eslint-disable */
/**
* Generated `api` utility.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import type {
ApiFromModules,
FilterApi,
FunctionReference,
} from "convex/server";
/**
* A utility for referencing Convex functions in your app's API.
*
* Usage:
* ```js
* const myFunctionReference = api.myModule.myFunction;
* ```
*/
declare const fullApi: ApiFromModules<{}>;
export declare const api: FilterApi<
typeof fullApi,
FunctionReference<any, "public">
>;
export declare const internal: FilterApi<
typeof fullApi,
FunctionReference<any, "internal">
>;
|
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/tsconfig.json | {
"extends": "@tsconfig/recommended",
"compilerOptions": {
"outDir": "../dist",
"rootDir": "./src",
"target": "ES2021",
"lib": [
"ES2021",
"ES2022.Object",
"DOM"
],
"module": "ES2020",
"moduleResolution": "nodenext",
"esModuleInterop": true,
"declaration": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"useDefineForClassFields": true,
"strictPropertyInitialization": false,
"allowJs": true,
"strict": true
},
"include": [
"src/**/*"
],
"exclude": [
"node_modules",
"dist",
"docs"
]
}
|
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/LICENSE | The MIT License
Copyright (c) Harrison Chase
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. |
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/jest.config.cjs | /** @type {import('ts-jest').JestConfigWithTsJest} */
module.exports = {
preset: "ts-jest/presets/default-esm",
testEnvironment: "./jest.env.cjs",
modulePathIgnorePatterns: ["dist/", "docs/"],
moduleNameMapper: {
"^(\\.{1,2}/.*)\\.js$": "$1",
},
transform: {
'^.+\\.tsx?$': ['@swc/jest'],
},
transformIgnorePatterns: [
"/node_modules/",
"\\.pnp\\.[^\\/]+$",
"./scripts/jest-setup-after-env.js",
],
setupFiles: ["dotenv/config"],
setupFilesAfterEnv: ["./scripts/jest-setup-after-env.js"],
testTimeout: 20_000,
collectCoverageFrom: ["src/**/*.ts"]
};
|
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/babel.config.cjs | // babel.config.js
module.exports = {
presets: [["@babel/preset-env", { targets: { node: true } }]],
};
|
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/jest.env.cjs | const { TestEnvironment } = require("jest-environment-node");
class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment {
constructor(config, context) {
// Make `instanceof Float32Array` return true in tests
// to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549
super(config, context);
this.global.Float32Array = Float32Array;
}
}
module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
|
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/README.md | # 🦜️🔗 LangChain.js
⚡ Building applications with LLMs through composability ⚡
[](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml)  [](https://opensource.org/licenses/MIT) [](https://twitter.com/langchainai) [](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchainjs)
[<img src="https://github.com/codespaces/badge.svg" title="Open in Github Codespace" width="150" height="20">](https://codespaces.new/langchain-ai/langchainjs)
Looking for the Python version? Check out [LangChain](https://github.com/langchain-ai/langchain).
To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
## ⚡️ Quick Install
You can use npm, yarn, or pnpm to install LangChain.js
`npm install -S langchain` or `yarn add langchain` or `pnpm add langchain`
## 🌐 Supported Environments
LangChain is written in TypeScript and can be used in:
- Node.js (ESM and CommonJS) - 18.x, 19.x, 20.x
- Cloudflare Workers
- Vercel / Next.js (Browser, Serverless and Edge functions)
- Supabase Edge Functions
- Browser
- Deno
## 🤔 What is LangChain?
**LangChain** is a framework for developing applications powered by language models. It enables applications that:
- **Are context-aware**: connect a language model to sources of context (prompt instructions, few shot examples, content to ground its response in, etc.)
- **Reason**: rely on a language model to reason (about how to answer based on provided context, what actions to take, etc.)
This framework consists of several parts.
- **Open-source libraries**: Build your applications using LangChain's open-source [building blocks](https://js.langchain.com/docs/concepts/lcel), [components](https://js.langchain.com/docs/concepts), and [third-party integrations](https://js.langchain.com/docs/integrations/platforms/).
Use [LangGraph.js](https://js.langchain.com/docs/concepts/#langgraphjs) to build stateful agents with first-class streaming and human-in-the-loop support.
- **Productionization**: Use [LangSmith](https://docs.smith.langchain.com/) to inspect, monitor and evaluate your chains, so that you can continuously optimize and deploy with confidence.
- **Deployment**: Turn your LangGraph applications into production-ready APIs and Assistants with [LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/).
The LangChain libraries themselves are made up of several different packages.
- **[`@langchain/core`](https://github.com/langchain-ai/langchainjs/blob/main/langchain-core)**: Base abstractions and LangChain Expression Language.
- **[`@langchain/community`](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-community)**: Third party integrations.
- **[`langchain`](https://github.com/langchain-ai/langchainjs/blob/main/langchain)**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
- **[LangGraph.js](https://langchain-ai.github.io/langgraphjs/)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. Integrates smoothly with LangChain, but can be used without it.
Integrations may also be split into their own compatible packages.

This library aims to assist in the development of those types of applications. Common examples of these applications include:
**❓Question Answering over specific documents**
- [Documentation](https://js.langchain.com/docs/tutorials/rag)
- End-to-end Example: [Doc-Chatbot](https://github.com/dissorial/doc-chatbot)
**💬 Chatbots**
- [Documentation](https://js.langchain.com/docs/how_to/#chatbots)
- End-to-end Example: [Chat-LangChain](https://github.com/langchain-ai/chat-langchainjs)
## 🚀 How does LangChain help?
The main value props of the LangChain libraries are:
1. **Components**: composable tools and integrations for working with language models. Components are modular and easy-to-use, whether you are using the rest of the LangChain framework or not
2. **Off-the-shelf chains**: built-in assemblages of components for accomplishing higher-level tasks
Off-the-shelf chains make it easy to get started. Components make it easy to customize existing chains and build new ones.
Components fall into the following **modules**:
**📃 Model I/O:**
This includes prompt management, prompt optimization, a generic interface for all LLMs, and common utilities for working with LLMs.
**📚 Retrieval:**
Data Augmented Generation involves specific types of chains that first interact with an external data source to fetch data for use in the generation step. Examples include summarization of long pieces of text and question/answering over specific data sources.
**🤖 Agents:**
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. LangChain provides a [standard interface for agents](https://js.langchain.com/docs/concepts/#agents), along with [LangGraph.js](https://github.com/langchain-ai/langgraphjs/) for building custom agents.
## 📖 Documentation
Please see [here](https://js.langchain.com) for full documentation, which includes:
- [Getting started](https://js.langchain.com/docs/introduction): installation, setting up the environment, simple examples
- Overview of the [interfaces](https://js.langchain.com/docs/how_to/lcel_cheatsheet/), [modules](https://js.langchain.com/docs/concepts) and [integrations](https://js.langchain.com/docs/integrations/platforms/)
- [Tutorial](https://js.langchain.com/docs/tutorials/) walkthroughs
- [Reference](https://api.js.langchain.com): full API docs
## 💁 Contributing
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
For detailed information on how to contribute, see [here](https://github.com/langchain-ai/langchainjs/blob/main/CONTRIBUTING.md).
Please report any security issues or concerns following our [security guidelines](https://github.com/langchain-ai/langchainjs/blob/main/SECURITY.md).
## 🖇️ Relationship with Python LangChain
This is built to integrate as seamlessly as possible with the [LangChain Python package](https://github.com/langchain-ai/langchain). Specifically, this means all objects (prompts, LLMs, chains, etc) are designed in a way where they can be serialized and shared between languages.
|
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/.release-it.json | {
"github": {
"release": true,
"autoGenerate": true,
"tokenRef": "GITHUB_TOKEN_RELEASE"
},
"npm": {
"versionArgs": [
"--workspaces-update=false"
]
}
}
|
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/.eslintrc.cjs | module.exports = {
extends: [
"airbnb-base",
"eslint:recommended",
"prettier",
"plugin:@typescript-eslint/recommended",
],
parserOptions: {
ecmaVersion: 12,
parser: "@typescript-eslint/parser",
project: "./tsconfig.json",
sourceType: "module",
},
plugins: ["@typescript-eslint", "no-instanceof", "eslint-plugin-jest"],
ignorePatterns: [
"src/util/@cfworker",
"src/util/fast-json-patch",
"src/util/js-sha1",
".eslintrc.cjs",
"scripts",
"node_modules",
"dist",
"dist-cjs",
"*.js",
"*.cjs",
"*.d.ts",
"import_map.ts",
"dynamic_import_map.ts",
],
rules: {
"no-process-env": 2,
"no-instanceof/no-instanceof": 2,
"@typescript-eslint/explicit-module-boundary-types": 0,
"@typescript-eslint/no-empty-function": 0,
"@typescript-eslint/no-shadow": 0,
"@typescript-eslint/no-empty-interface": 0,
"@typescript-eslint/no-use-before-define": ["error", "nofunc"],
"@typescript-eslint/no-unused-vars": ["warn", { args: "none" }],
"@typescript-eslint/no-floating-promises": "error",
"@typescript-eslint/no-misused-promises": "error",
camelcase: 0,
"class-methods-use-this": 0,
"import/extensions": [2, "ignorePackages"],
"import/no-extraneous-dependencies": [
"error",
{ devDependencies: ["**/*.test.ts"] },
],
"import/no-unresolved": 0,
"import/prefer-default-export": 0,
"keyword-spacing": "error",
"max-classes-per-file": 0,
"max-len": 0,
"no-await-in-loop": 0,
"no-bitwise": 0,
"no-console": 0,
"no-restricted-syntax": 0,
"no-shadow": 0,
"no-continue": 0,
"no-void": 0,
"no-underscore-dangle": 0,
"no-use-before-define": 0,
"no-useless-constructor": 0,
"no-return-await": 0,
"no-plusplus": 0,
"consistent-return": 0,
"no-else-return": 0,
"func-names": 0,
"no-lonely-if": 0,
"prefer-rest-params": 0,
"new-cap": ["error", { properties: false, capIsNew: false }],
'jest/no-focused-tests': 'error',
"arrow-body-style": 0,
},
overrides: [
{
files: ['**/*.test.ts'],
rules: {
'@typescript-eslint/no-unused-vars': 'off'
}
}
]
};
|
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/langchain.config.js | import { resolve, dirname } from "node:path";
import { fileURLToPath } from "node:url";
/**
* @param {string} relativePath
* @returns {string}
*/
function abs(relativePath) {
return resolve(dirname(fileURLToPath(import.meta.url)), relativePath);
}
export const config = {
internals: [
/node\:/,
/js-tiktoken/,
/@langchain\/core/,
/langsmith/,
/@langchain\/community/,
"axios", // axios is a dependency of openai
"mysql2/promise",
"notion-to-md/build/utils/notion.js"
],
entrypoints: {
load: "load/index",
"load/serializable": "load/serializable",
// agents
agents: "agents/index",
"agents/load": "agents/load",
"agents/toolkits": "agents/toolkits/index",
"agents/toolkits/sql": "agents/toolkits/sql/index",
"agents/format_scratchpad": "agents/format_scratchpad/openai_functions",
"agents/format_scratchpad/openai_tools":
"agents/format_scratchpad/openai_tools",
"agents/format_scratchpad/log": "agents/format_scratchpad/log",
"agents/format_scratchpad/xml": "agents/format_scratchpad/xml",
"agents/format_scratchpad/log_to_message":
"agents/format_scratchpad/log_to_message",
"agents/react/output_parser": "agents/react/output_parser",
"agents/xml/output_parser": "agents/xml/output_parser",
"agents/openai/output_parser": "agents/openai/output_parser",
// tools
tools: "tools/index",
"tools/chain": "tools/chain",
"tools/render": "tools/render",
"tools/retriever": "tools/retriever",
"tools/sql": "tools/sql",
"tools/webbrowser": "tools/webbrowser",
// chains
chains: "chains/index",
"chains/combine_documents": "chains/combine_documents/index",
"chains/combine_documents/reduce": "chains/combine_documents/reduce",
"chains/history_aware_retriever": "chains/history_aware_retriever",
"chains/load": "chains/load",
"chains/openai_functions": "chains/openai_functions/index",
"chains/query_constructor": "chains/query_constructor/index",
"chains/query_constructor/ir": "chains/query_constructor/ir",
"chains/retrieval": "chains/retrieval",
"chains/sql_db": "chains/sql_db/index",
"chains/graph_qa/cypher": "chains/graph_qa/cypher",
// chat models
"chat_models/universal": "chat_models/universal",
// embeddings
"embeddings/cache_backed": "embeddings/cache_backed",
"embeddings/fake": "embeddings/fake",
// vectorstores
"vectorstores/memory": "vectorstores/memory",
// text_splitter
text_splitter: "text_splitter",
// memory
"memory": "memory/index",
"memory/chat_memory": "memory/chat_memory",
// document
document: "document",
// document_loaders
"document_loaders/base": "document_loaders/base",
"document_loaders/fs/buffer": "document_loaders/fs/buffer",
"document_loaders/fs/directory": "document_loaders/fs/directory",
"document_loaders/fs/json": "document_loaders/fs/json",
"document_loaders/fs/multi_file": "document_loaders/fs/multi_file",
"document_loaders/fs/text": "document_loaders/fs/text",
// document_transformers
"document_transformers/openai_functions":
"document_transformers/openai_functions",
// sql_db
sql_db: "sql_db",
// callbacks
callbacks: "callbacks/index",
// output_parsers
output_parsers: "output_parsers/index",
"output_parsers/expression": "output_parsers/expression",
// retrievers
"retrievers/contextual_compression": "retrievers/contextual_compression",
"retrievers/document_compressors": "retrievers/document_compressors/index",
"retrievers/ensemble": "retrievers/ensemble",
"retrievers/multi_query": "retrievers/multi_query",
"retrievers/multi_vector": "retrievers/multi_vector",
"retrievers/parent_document": "retrievers/parent_document",
"retrievers/time_weighted": "retrievers/time_weighted",
"retrievers/document_compressors/chain_extract":
"retrievers/document_compressors/chain_extract",
"retrievers/document_compressors/embeddings_filter":
"retrievers/document_compressors/embeddings_filter",
"retrievers/hyde": "retrievers/hyde",
"retrievers/score_threshold": "retrievers/score_threshold",
"retrievers/self_query": "retrievers/self_query/index",
"retrievers/self_query/functional": "retrievers/self_query/functional",
"retrievers/matryoshka_retriever": "retrievers/matryoshka_retriever",
// cache
"cache/file_system": "cache/file_system",
// stores
"stores/doc/base": "stores/doc/base",
"stores/doc/in_memory": "stores/doc/in_memory",
"stores/file/in_memory": "stores/file/in_memory",
"stores/file/node": "stores/file/node",
"stores/message/in_memory": "stores/message/in_memory",
// storage
"storage/encoder_backed": "storage/encoder_backed",
"storage/in_memory": "storage/in_memory",
"storage/file_system": "storage/file_system",
// hub
hub: "hub",
// utilities
"util/document": "util/document",
"util/math": "util/math",
"util/time": "util/time",
// experimental
"experimental/autogpt": "experimental/autogpt/index",
"experimental/openai_assistant": "experimental/openai_assistant/index",
"experimental/openai_files": "experimental/openai_files/index",
"experimental/babyagi": "experimental/babyagi/index",
"experimental/generative_agents": "experimental/generative_agents/index",
"experimental/plan_and_execute": "experimental/plan_and_execute/index",
"experimental/chains/violation_of_expectations":
"experimental/chains/violation_of_expectations/index",
"experimental/masking": "experimental/masking/index",
"experimental/prompts/custom_format": "experimental/prompts/custom_format",
"experimental/prompts/handlebars": "experimental/prompts/handlebars",
// evaluation
evaluation: "evaluation/index",
// smith (LangSmith Evaluation)
smith: "smith/index",
// runnables
"runnables/remote": "runnables/remote",
// indexes
indexes: "indexes/index",
"schema/query_constructor": "schema/query_constructor",
"schema/prompt_template": "schema/prompt_template",
},
deprecatedOmitFromImportMap: [
"document",
"load/serializable",
"runnables",
],
requiresOptionalDependency: [
"agents/load",
"agents/toolkits/sql",
"tools/sql",
"tools/webbrowser",
"chains/load",
"chains/sql_db",
"chains/graph_qa/cypher",
"chat_models/universal",
"llms/load",
"prompts/load",
"memory/zep",
"document_loaders/web/apify_dataset",
"document_loaders/web/assemblyai",
"document_loaders/web/azure_blob_storage_container",
"document_loaders/web/azure_blob_storage_file",
"document_loaders/web/browserbase",
"document_loaders/web/cheerio",
"document_loaders/web/puppeteer",
"document_loaders/web/playwright",
"document_loaders/web/college_confidential",
"document_loaders/web/gitbook",
"document_loaders/web/hn",
"document_loaders/web/imsdb",
"document_loaders/web/figma",
"document_loaders/web/firecrawl",
"document_loaders/web/github",
"document_loaders/web/pdf",
"document_loaders/web/notiondb",
"document_loaders/web/notionapi",
"document_loaders/web/recursive_url",
"document_loaders/web/s3",
"document_loaders/web/sitemap",
"document_loaders/web/sonix_audio",
"document_loaders/web/confluence",
"document_loaders/web/couchbase",
"document_loaders/web/youtube",
"document_loaders/fs/directory",
"document_loaders/fs/multi_file",
"document_loaders/fs/buffer",
"document_loaders/fs/chatgpt",
"document_loaders/fs/text",
"document_loaders/fs/json",
"document_loaders/fs/srt",
"document_loaders/fs/pdf",
"document_loaders/fs/docx",
"document_loaders/fs/epub",
"document_loaders/fs/csv",
"document_loaders/fs/notion",
"document_loaders/fs/obsidian",
"document_loaders/fs/unstructured",
"document_loaders/fs/openai_whisper_audio",
"document_loaders/fs/pptx",
"document_transformers/html_to_text",
"document_transformers/mozilla_readability",
"sql_db",
"retrievers/self_query",
"retrievers/self_query/functional",
"output_parsers/expression",
"chains/query_constructor",
"chains/query_constructor/ir",
"cache/file_system",
"stores/file/node",
"storage/file_system",
// Prevent export due to circular dependency with "load" entrypoint
"hub",
"experimental/prompts/handlebars",
],
extraImportMapEntries: [
{
modules: ["ChatOpenAI"],
alias: ["chat_models", "openai"],
path: "@langchain/openai",
},
{
modules: ["OpenAI"],
alias: ["llms", "openai"],
path: "@langchain/openai",
},
{
modules: ["OpenAIEmbeddings"],
alias: ["embeddings", "openai"],
path: "@langchain/openai",
},
{
modules: ["PromptTemplate"],
alias: ["prompts", "prompt"],
path: "@langchain/core/prompts",
},
{
modules: [
"AIMessage",
"AIMessageChunk",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"SystemMessage",
"SystemMessageChunk",
"ToolMessage",
"ToolMessageChunk",
],
alias: ["schema", "messages"],
path: "@langchain/core/messages",
},
{
modules: [
"AIMessage",
"AIMessageChunk",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"SystemMessage",
"SystemMessageChunk",
"ToolMessage",
"ToolMessageChunk",
],
alias: ["schema"],
path: "@langchain/core/messages",
},
{
modules: [
"AIMessagePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"HumanMessagePromptTemplate",
"MessagesPlaceholder",
"SystemMessagePromptTemplate",
],
alias: ["prompts", "chat"],
path: "@langchain/core/prompts",
},
{
modules: [
"ImagePromptTemplate",
],
alias: ["prompts", "image"],
path: "@langchain/core/prompts",
},
{
modules: ["PipelinePromptTemplate"],
alias: ["prompts", "pipeline"],
path: "@langchain/core/prompts",
},
{
modules: ["StringPromptValue"],
alias: ["prompts", "base"],
path: "@langchain/core/prompt_values",
},
{
modules: [
"RouterRunnable",
"RunnableAssign",
"RunnableBinding",
"RunnableBranch",
"RunnableEach",
"RunnableMap",
"RunnableParallel",
"RunnablePassthrough",
"RunnablePick",
"RunnableRetry",
"RunnableSequence",
"RunnableWithFallbacks",
"RunnableWithMessageHistory",
],
alias: ["schema", "runnable"],
path: "@langchain/core/runnables",
},
{
modules: ["StringOutputParser"],
alias: ["schema", "output_parser"],
path: "@langchain/core/output_parsers",
},
{
modules: ["ChatGenerationChunk", "GenerationChunk"],
alias: ["schema", "output"],
path: "@langchain/core/outputs",
}
],
shouldTestExports: true,
tsConfigPath: resolve("./tsconfig.json"),
cjsSource: "./dist-cjs",
cjsDestination: "./dist",
abs,
};
|
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/.prettierignore | src/load/import_type.ts
src/load/import_map.ts
src/load/dynamic_import_map.ts
|
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/.env.example | COHERE_API_KEY=ADD_YOURS_HERE
GOOGLE_PALM_API_KEY=ADD_YOURS_HERE
GOOGLE_CALENDAR_PRIVATE_KEY=ADD_YOURS_HERE
GOOGLE_CALENDAR_CLIENT_EMAIL=ADD_YOURS_HERE
GOOGLE_CALENDAR_CALENDAR_ID=ADD_YOURS_HERE
HUGGINGFACEHUB_API_KEY=ADD_YOURS_HERE
OPENAI_API_KEY=ADD_YOURS_HERE
# if AZURE_OPENAI_API_KEY is set, it OPENAI_API_KEY will be ignored.
AZURE_OPENAI_API_KEY=ADD_YOURS_HERE
AZURE_OPENAI_API_INSTANCE_NAME=ADD_YOURS_HERE
AZURE_OPENAI_API_DEPLOYMENT_NAME=ADD_YOURS_HERE
AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME=ADD_YOURS_HERE
AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME=ADD_YOURS_HERE
AZURE_OPENAI_API_VERSION=ADD_YOURS_HERE
AZURE_OPENAI_BASE_PATH=ADD_YOURS_HERE
AZURE_COSMOSDB_CONNECTION_STRING=ADD_YOURS_HERE
AZURE_AISEARCH_ENDPOINT=ADD_YOURS_HERE
AZURE_AISEARCH_KEY=ADD_YOURS_HERE
CONNERY_RUNNER_URL=ADD_YOURS_HERE
CONNERY_RUNNER_API_KEY=ADD_YOURS_HERE
CONVEX_URL=ADD_YOURS_HERE
ELASTIC_URL=http://127.0.0.1:9200
OPENSEARCH_URL=http://127.0.0.1:9200
PINECONE_API_KEY=ADD_YOURS_HERE
PINECONE_ENVIRONMENT=ADD_YOURS_HERE
PINECONE_INDEX=ADD_YOURS_HERE
SEARCHAPI_API_KEY=ADD_YOURS_HERE # https://www.searchapi.io/
SERPAPI_API_KEY=ADD_YOURS_HERE
SERPER_API_KEY=ADD_YOURS_HERE
SUPABASE_PRIVATE_KEY=ADD_YOURS_HERE
SUPABASE_URL=ADD_YOURS_HERE
ZAPIER_NLA_API_KEY=ADD_YOURS_HERE
ANTHROPIC_API_KEY=ADD_YOURS_HERE
REPLICATE_API_KEY=ADD_YOURS_HERE
MONGO_URI=ADD_YOURS_HERE
MONGODB_ATLAS_URI=ADD_YOURS_HERE
MILVUS_URL=ADD_YOURS_HERE
WEAVIATE_HOST=ADD_YOURS_HERE
WEAVIATE_SCHEME=ADD_YOURS_HERE
WEAVIATE_API_KEY=ADD_YOURS_HERE
VECTARA_CUSTOMER_ID=ADD_YOURS_HERE
VECTARA_CORPUS_ID=ADD_YOURS_HERE
VECTARA_API_KEY=ADD_YOURS_HERE
MYSCALE_HOST=ADD_YOURS_HERE
MYSCALE_PORT=ADD_YOURS_HERE
MYSCALE_USERNAME=ADD_YOURS_HERE
MYSCALE_PASSWORD=ADD_YOURS_HERE
CLICKHOUSE_HOST=ADD_YOURS_HERE
CLICKHOUSE_PORT=ADD_YOURS_HERE
CLICKHOUSE_USERNAME=ADD_YOURS_HERE
CLICKHOUSE_PASSWORD=ADD_YOURS_HERE
FIGMA_ACCESS_TOKEN=ADD_YOURS_HERE
REDIS_URL=ADD_YOURS_HERE
ROCKSET_API_KEY=ADD_YOURS_HERE
# defaults to "usw2a1" (oregon)
ROCKSET_REGION=ADD_YOURS_HERE
SINGLESTORE_HOST=ADD_YOURS_HERE
SINGLESTORE_PORT=ADD_YOURS_HERE
SINGLESTORE_USERNAME=ADD_YOURS_HERE
SINGLESTORE_PASSWORD=ADD_YOURS_HERE
SINGLESTORE_DATABASE=ADD_YOURS_HERE
UPSTASH_REDIS_REST_URL=https://ADD_YOURS_HERE.upstash.io
UPSTASH_REDIS_REST_TOKEN=ADD_YOURS_HERE
FIREWORKS_API_KEY=ADD_YOURS_HERE
# Either provide a DATABASE_URL for planetscale
PLANETSCALE_DATABASE_URL=ADD_YOURS_HERE
# Or planetscale host, username and password
PLANETSCALE_HOST=ADD_YOURS_HERE
PLANETSCALE_USERNAME=ADD_YOURS_HERE
PLANETSCALE_PASSWORD=ADD_YOURS_HERE
TIGRIS_URI=ADD_YOURS_HERE
TIGRIS_PROJECT=ADD_YOURS_HERE
TIGRIS_CLIENT_ID=ADD_YOURS_HERE
TIGRIS_CLIENT_SECRET=ADD_YOURS_HERE
NOTION_INTEGRATION_TOKEN=ADD_YOURS_HERE
NOTION_PAGE_ID=ADD_YOURS_HERE
NOTION_DATABASE_ID=ADD_YOURS_HERE
BAIDU_API_KEY=ADD_YOURS_HERE
BAIDU_SECRET_KEY=ADD_YOURS_HERE
ANALYTICDB_HOST=ADD_YOURS_HERE
ANALYTICDB_PORT=ADD_YOURS_HERE
ANALYTICDB_USERNAME=ADD_YOURS_HERE
ANALYTICDB_PASSWORD=ADD_YOURS_HERE
ANALYTICDB_DATABASE=ADD_YOURS_HERE
ASSEMBLYAI_API_KEY=ADD_YOURS_HERE
# For integrations tests only
ASSEMBLYAI_TRANSCRIPT_ID=ADD_YOURS_HERE
LLAMA_PATH=ADD_YOURS_HERE
AWS_ACCESS_KEY_ID=ADD_YOURS_HERE
AWS_SECRET_ACCESS_KEY=ADD_YOURS_HERE
AWS_REGION=ADD_YOURS_HERE
NEO4J_URI=ADD_YOURS_HERE
NEO4J_USERNAME=ADD_YOURS_HERE
NEO4J_PASSWORD=ADD_YOURS_HERE
MEMGRAPH_URI=ADD_YOURS_HERE
MEMGRAPH_USERNAME=ADD_YOURS_HERE
MEMGRAPH_PASSWORD=ADD_YOURS_HERE
CLOSEVECTOR_API_KEY=ADD_YOURS_HERE
CLOSEVECTOR_API_SECRET=ADD_YOURS_HERE
GPLACES_API_KEY=ADD_YOURS_HERE
GOOGLE_ROUTES_API_KEY=ADD_YOURS_HERE
CONFLUENCE_USERNAME=ADD_YOURS_HERE
CONFLUENCE_PASSWORD=ADD_YOURS_HERE
CONFLUENCE_PATH=ADD_YOURS_HERE
|
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/package.json | {
"name": "langchain",
"version": "0.3.6",
"description": "Typescript bindings for langchain",
"type": "module",
"engines": {
"node": ">=18"
},
"main": "./index.js",
"types": "./index.d.ts",
"files": [
"dist/",
"load.cjs",
"load.js",
"load.d.ts",
"load.d.cts",
"load/serializable.cjs",
"load/serializable.js",
"load/serializable.d.ts",
"load/serializable.d.cts",
"agents.cjs",
"agents.js",
"agents.d.ts",
"agents.d.cts",
"agents/load.cjs",
"agents/load.js",
"agents/load.d.ts",
"agents/load.d.cts",
"agents/toolkits.cjs",
"agents/toolkits.js",
"agents/toolkits.d.ts",
"agents/toolkits.d.cts",
"agents/toolkits/sql.cjs",
"agents/toolkits/sql.js",
"agents/toolkits/sql.d.ts",
"agents/toolkits/sql.d.cts",
"agents/format_scratchpad.cjs",
"agents/format_scratchpad.js",
"agents/format_scratchpad.d.ts",
"agents/format_scratchpad.d.cts",
"agents/format_scratchpad/openai_tools.cjs",
"agents/format_scratchpad/openai_tools.js",
"agents/format_scratchpad/openai_tools.d.ts",
"agents/format_scratchpad/openai_tools.d.cts",
"agents/format_scratchpad/log.cjs",
"agents/format_scratchpad/log.js",
"agents/format_scratchpad/log.d.ts",
"agents/format_scratchpad/log.d.cts",
"agents/format_scratchpad/xml.cjs",
"agents/format_scratchpad/xml.js",
"agents/format_scratchpad/xml.d.ts",
"agents/format_scratchpad/xml.d.cts",
"agents/format_scratchpad/log_to_message.cjs",
"agents/format_scratchpad/log_to_message.js",
"agents/format_scratchpad/log_to_message.d.ts",
"agents/format_scratchpad/log_to_message.d.cts",
"agents/react/output_parser.cjs",
"agents/react/output_parser.js",
"agents/react/output_parser.d.ts",
"agents/react/output_parser.d.cts",
"agents/xml/output_parser.cjs",
"agents/xml/output_parser.js",
"agents/xml/output_parser.d.ts",
"agents/xml/output_parser.d.cts",
"agents/openai/output_parser.cjs",
"agents/openai/output_parser.js",
"agents/openai/output_parser.d.ts",
"agents/openai/output_parser.d.cts",
"tools.cjs",
"tools.js",
"tools.d.ts",
"tools.d.cts",
"tools/chain.cjs",
"tools/chain.js",
"tools/chain.d.ts",
"tools/chain.d.cts",
"tools/render.cjs",
"tools/render.js",
"tools/render.d.ts",
"tools/render.d.cts",
"tools/retriever.cjs",
"tools/retriever.js",
"tools/retriever.d.ts",
"tools/retriever.d.cts",
"tools/sql.cjs",
"tools/sql.js",
"tools/sql.d.ts",
"tools/sql.d.cts",
"tools/webbrowser.cjs",
"tools/webbrowser.js",
"tools/webbrowser.d.ts",
"tools/webbrowser.d.cts",
"chains.cjs",
"chains.js",
"chains.d.ts",
"chains.d.cts",
"chains/combine_documents.cjs",
"chains/combine_documents.js",
"chains/combine_documents.d.ts",
"chains/combine_documents.d.cts",
"chains/combine_documents/reduce.cjs",
"chains/combine_documents/reduce.js",
"chains/combine_documents/reduce.d.ts",
"chains/combine_documents/reduce.d.cts",
"chains/history_aware_retriever.cjs",
"chains/history_aware_retriever.js",
"chains/history_aware_retriever.d.ts",
"chains/history_aware_retriever.d.cts",
"chains/load.cjs",
"chains/load.js",
"chains/load.d.ts",
"chains/load.d.cts",
"chains/openai_functions.cjs",
"chains/openai_functions.js",
"chains/openai_functions.d.ts",
"chains/openai_functions.d.cts",
"chains/query_constructor.cjs",
"chains/query_constructor.js",
"chains/query_constructor.d.ts",
"chains/query_constructor.d.cts",
"chains/query_constructor/ir.cjs",
"chains/query_constructor/ir.js",
"chains/query_constructor/ir.d.ts",
"chains/query_constructor/ir.d.cts",
"chains/retrieval.cjs",
"chains/retrieval.js",
"chains/retrieval.d.ts",
"chains/retrieval.d.cts",
"chains/sql_db.cjs",
"chains/sql_db.js",
"chains/sql_db.d.ts",
"chains/sql_db.d.cts",
"chains/graph_qa/cypher.cjs",
"chains/graph_qa/cypher.js",
"chains/graph_qa/cypher.d.ts",
"chains/graph_qa/cypher.d.cts",
"chat_models/universal.cjs",
"chat_models/universal.js",
"chat_models/universal.d.ts",
"chat_models/universal.d.cts",
"embeddings/cache_backed.cjs",
"embeddings/cache_backed.js",
"embeddings/cache_backed.d.ts",
"embeddings/cache_backed.d.cts",
"embeddings/fake.cjs",
"embeddings/fake.js",
"embeddings/fake.d.ts",
"embeddings/fake.d.cts",
"vectorstores/memory.cjs",
"vectorstores/memory.js",
"vectorstores/memory.d.ts",
"vectorstores/memory.d.cts",
"text_splitter.cjs",
"text_splitter.js",
"text_splitter.d.ts",
"text_splitter.d.cts",
"memory.cjs",
"memory.js",
"memory.d.ts",
"memory.d.cts",
"memory/chat_memory.cjs",
"memory/chat_memory.js",
"memory/chat_memory.d.ts",
"memory/chat_memory.d.cts",
"document.cjs",
"document.js",
"document.d.ts",
"document.d.cts",
"document_loaders/base.cjs",
"document_loaders/base.js",
"document_loaders/base.d.ts",
"document_loaders/base.d.cts",
"document_loaders/fs/buffer.cjs",
"document_loaders/fs/buffer.js",
"document_loaders/fs/buffer.d.ts",
"document_loaders/fs/buffer.d.cts",
"document_loaders/fs/directory.cjs",
"document_loaders/fs/directory.js",
"document_loaders/fs/directory.d.ts",
"document_loaders/fs/directory.d.cts",
"document_loaders/fs/json.cjs",
"document_loaders/fs/json.js",
"document_loaders/fs/json.d.ts",
"document_loaders/fs/json.d.cts",
"document_loaders/fs/multi_file.cjs",
"document_loaders/fs/multi_file.js",
"document_loaders/fs/multi_file.d.ts",
"document_loaders/fs/multi_file.d.cts",
"document_loaders/fs/text.cjs",
"document_loaders/fs/text.js",
"document_loaders/fs/text.d.ts",
"document_loaders/fs/text.d.cts",
"document_transformers/openai_functions.cjs",
"document_transformers/openai_functions.js",
"document_transformers/openai_functions.d.ts",
"document_transformers/openai_functions.d.cts",
"sql_db.cjs",
"sql_db.js",
"sql_db.d.ts",
"sql_db.d.cts",
"callbacks.cjs",
"callbacks.js",
"callbacks.d.ts",
"callbacks.d.cts",
"output_parsers.cjs",
"output_parsers.js",
"output_parsers.d.ts",
"output_parsers.d.cts",
"output_parsers/expression.cjs",
"output_parsers/expression.js",
"output_parsers/expression.d.ts",
"output_parsers/expression.d.cts",
"retrievers/contextual_compression.cjs",
"retrievers/contextual_compression.js",
"retrievers/contextual_compression.d.ts",
"retrievers/contextual_compression.d.cts",
"retrievers/document_compressors.cjs",
"retrievers/document_compressors.js",
"retrievers/document_compressors.d.ts",
"retrievers/document_compressors.d.cts",
"retrievers/ensemble.cjs",
"retrievers/ensemble.js",
"retrievers/ensemble.d.ts",
"retrievers/ensemble.d.cts",
"retrievers/multi_query.cjs",
"retrievers/multi_query.js",
"retrievers/multi_query.d.ts",
"retrievers/multi_query.d.cts",
"retrievers/multi_vector.cjs",
"retrievers/multi_vector.js",
"retrievers/multi_vector.d.ts",
"retrievers/multi_vector.d.cts",
"retrievers/parent_document.cjs",
"retrievers/parent_document.js",
"retrievers/parent_document.d.ts",
"retrievers/parent_document.d.cts",
"retrievers/time_weighted.cjs",
"retrievers/time_weighted.js",
"retrievers/time_weighted.d.ts",
"retrievers/time_weighted.d.cts",
"retrievers/document_compressors/chain_extract.cjs",
"retrievers/document_compressors/chain_extract.js",
"retrievers/document_compressors/chain_extract.d.ts",
"retrievers/document_compressors/chain_extract.d.cts",
"retrievers/document_compressors/embeddings_filter.cjs",
"retrievers/document_compressors/embeddings_filter.js",
"retrievers/document_compressors/embeddings_filter.d.ts",
"retrievers/document_compressors/embeddings_filter.d.cts",
"retrievers/hyde.cjs",
"retrievers/hyde.js",
"retrievers/hyde.d.ts",
"retrievers/hyde.d.cts",
"retrievers/score_threshold.cjs",
"retrievers/score_threshold.js",
"retrievers/score_threshold.d.ts",
"retrievers/score_threshold.d.cts",
"retrievers/self_query.cjs",
"retrievers/self_query.js",
"retrievers/self_query.d.ts",
"retrievers/self_query.d.cts",
"retrievers/self_query/functional.cjs",
"retrievers/self_query/functional.js",
"retrievers/self_query/functional.d.ts",
"retrievers/self_query/functional.d.cts",
"retrievers/matryoshka_retriever.cjs",
"retrievers/matryoshka_retriever.js",
"retrievers/matryoshka_retriever.d.ts",
"retrievers/matryoshka_retriever.d.cts",
"cache/file_system.cjs",
"cache/file_system.js",
"cache/file_system.d.ts",
"cache/file_system.d.cts",
"stores/doc/base.cjs",
"stores/doc/base.js",
"stores/doc/base.d.ts",
"stores/doc/base.d.cts",
"stores/doc/in_memory.cjs",
"stores/doc/in_memory.js",
"stores/doc/in_memory.d.ts",
"stores/doc/in_memory.d.cts",
"stores/file/in_memory.cjs",
"stores/file/in_memory.js",
"stores/file/in_memory.d.ts",
"stores/file/in_memory.d.cts",
"stores/file/node.cjs",
"stores/file/node.js",
"stores/file/node.d.ts",
"stores/file/node.d.cts",
"stores/message/in_memory.cjs",
"stores/message/in_memory.js",
"stores/message/in_memory.d.ts",
"stores/message/in_memory.d.cts",
"storage/encoder_backed.cjs",
"storage/encoder_backed.js",
"storage/encoder_backed.d.ts",
"storage/encoder_backed.d.cts",
"storage/in_memory.cjs",
"storage/in_memory.js",
"storage/in_memory.d.ts",
"storage/in_memory.d.cts",
"storage/file_system.cjs",
"storage/file_system.js",
"storage/file_system.d.ts",
"storage/file_system.d.cts",
"hub.cjs",
"hub.js",
"hub.d.ts",
"hub.d.cts",
"util/document.cjs",
"util/document.js",
"util/document.d.ts",
"util/document.d.cts",
"util/math.cjs",
"util/math.js",
"util/math.d.ts",
"util/math.d.cts",
"util/time.cjs",
"util/time.js",
"util/time.d.ts",
"util/time.d.cts",
"experimental/autogpt.cjs",
"experimental/autogpt.js",
"experimental/autogpt.d.ts",
"experimental/autogpt.d.cts",
"experimental/openai_assistant.cjs",
"experimental/openai_assistant.js",
"experimental/openai_assistant.d.ts",
"experimental/openai_assistant.d.cts",
"experimental/openai_files.cjs",
"experimental/openai_files.js",
"experimental/openai_files.d.ts",
"experimental/openai_files.d.cts",
"experimental/babyagi.cjs",
"experimental/babyagi.js",
"experimental/babyagi.d.ts",
"experimental/babyagi.d.cts",
"experimental/generative_agents.cjs",
"experimental/generative_agents.js",
"experimental/generative_agents.d.ts",
"experimental/generative_agents.d.cts",
"experimental/plan_and_execute.cjs",
"experimental/plan_and_execute.js",
"experimental/plan_and_execute.d.ts",
"experimental/plan_and_execute.d.cts",
"experimental/chains/violation_of_expectations.cjs",
"experimental/chains/violation_of_expectations.js",
"experimental/chains/violation_of_expectations.d.ts",
"experimental/chains/violation_of_expectations.d.cts",
"experimental/masking.cjs",
"experimental/masking.js",
"experimental/masking.d.ts",
"experimental/masking.d.cts",
"experimental/prompts/custom_format.cjs",
"experimental/prompts/custom_format.js",
"experimental/prompts/custom_format.d.ts",
"experimental/prompts/custom_format.d.cts",
"experimental/prompts/handlebars.cjs",
"experimental/prompts/handlebars.js",
"experimental/prompts/handlebars.d.ts",
"experimental/prompts/handlebars.d.cts",
"evaluation.cjs",
"evaluation.js",
"evaluation.d.ts",
"evaluation.d.cts",
"smith.cjs",
"smith.js",
"smith.d.ts",
"smith.d.cts",
"runnables/remote.cjs",
"runnables/remote.js",
"runnables/remote.d.ts",
"runnables/remote.d.cts",
"indexes.cjs",
"indexes.js",
"indexes.d.ts",
"indexes.d.cts",
"schema/query_constructor.cjs",
"schema/query_constructor.js",
"schema/query_constructor.d.ts",
"schema/query_constructor.d.cts",
"schema/prompt_template.cjs",
"schema/prompt_template.js",
"schema/prompt_template.d.ts",
"schema/prompt_template.d.cts"
],
"repository": {
"type": "git",
"url": "git@github.com:langchain-ai/langchainjs.git"
},
"homepage": "https://github.com/langchain-ai/langchainjs/tree/main/langchain/",
"scripts": {
"build": "yarn turbo:command build:internal --filter=langchain",
"build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking --gen-maps",
"lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/",
"lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
"lint": "yarn lint:eslint && yarn lint:dpdm",
"lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm",
"precommit": "lint-staged",
"clean": "rm -rf .turbo dist/",
"prepack": "yarn build",
"release": "release-it --only-version --config .release-it.json",
"test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
"test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
"test:integration": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
"test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000",
"format": "prettier --config .prettierrc --write \"src\"",
"format:check": "prettier --config .prettierrc --check \"src\""
},
"author": "LangChain",
"license": "MIT",
"devDependencies": {
"@faker-js/faker": "^7.6.0",
"@jest/globals": "^29.5.0",
"@langchain/anthropic": "*",
"@langchain/aws": "*",
"@langchain/cohere": "*",
"@langchain/core": "workspace:*",
"@langchain/google-genai": "*",
"@langchain/google-vertexai": "*",
"@langchain/groq": "*",
"@langchain/mistralai": "*",
"@langchain/ollama": "*",
"@langchain/scripts": ">=0.1.0 <0.2.0",
"@swc/core": "^1.3.90",
"@swc/jest": "^0.2.29",
"@tsconfig/recommended": "^1.0.2",
"@types/handlebars": "^4.1.0",
"@types/html-to-text": "^9",
"@types/js-yaml": "^4",
"@types/jsdom": "^21.1.1",
"@types/uuid": "^9",
"@types/ws": "^8",
"@typescript-eslint/eslint-plugin": "^5.58.0",
"@typescript-eslint/parser": "^5.58.0",
"axios": "^0.26.0",
"cheerio": "1.0.0-rc.12",
"dotenv": "^16.0.3",
"dpdm": "^3.12.0",
"eslint": "^8.33.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-prettier": "^8.6.0",
"eslint-plugin-import": "^2.27.5",
"eslint-plugin-jest": "^27.6.0",
"eslint-plugin-no-instanceof": "^1.0.1",
"eslint-plugin-prettier": "^4.2.1",
"handlebars": "^4.7.8",
"jest": "^29.5.0",
"jest-environment-node": "^29.6.4",
"openai": "^4.41.1",
"peggy": "^3.0.2",
"prettier": "^2.8.3",
"release-it": "^17.6.0",
"rimraf": "^5.0.1",
"rollup": "^3.19.1",
"ts-jest": "^29.1.0",
"typeorm": "^0.3.20",
"typescript": "~5.1.6",
"wikipedia": "^2.1.2"
},
"peerDependencies": {
"@langchain/anthropic": "*",
"@langchain/aws": "*",
"@langchain/cohere": "*",
"@langchain/core": ">=0.2.21 <0.4.0",
"@langchain/google-genai": "*",
"@langchain/google-vertexai": "*",
"@langchain/groq": "*",
"@langchain/mistralai": "*",
"@langchain/ollama": "*",
"axios": "*",
"cheerio": "*",
"handlebars": "^4.7.8",
"peggy": "^3.0.2",
"typeorm": "*"
},
"peerDependenciesMeta": {
"@langchain/anthropic": {
"optional": true
},
"@langchain/aws": {
"optional": true
},
"@langchain/cohere": {
"optional": true
},
"@langchain/google-genai": {
"optional": true
},
"@langchain/google-vertexai": {
"optional": true
},
"@langchain/groq": {
"optional": true
},
"@langchain/mistralai": {
"optional": true
},
"@langchain/ollama": {
"optional": true
},
"axios": {
"optional": true
},
"cheerio": {
"optional": true
},
"handlebars": {
"optional": true
},
"peggy": {
"optional": true
},
"typeorm": {
"optional": true
}
},
"dependencies": {
"@langchain/openai": ">=0.1.0 <0.4.0",
"@langchain/textsplitters": ">=0.0.0 <0.2.0",
"js-tiktoken": "^1.0.12",
"js-yaml": "^4.1.0",
"jsonpointer": "^5.0.1",
"langsmith": "^0.2.8",
"openapi-types": "^12.1.3",
"p-retry": "4",
"uuid": "^10.0.0",
"yaml": "^2.2.1",
"zod": "^3.22.4",
"zod-to-json-schema": "^3.22.3"
},
"publishConfig": {
"access": "public"
},
"keywords": [
"llm",
"ai",
"gpt3",
"chain",
"prompt",
"prompt engineering",
"chatgpt",
"machine learning",
"ml",
"openai",
"embeddings",
"vectorstores"
],
"exports": {
"./load": {
"types": {
"import": "./load.d.ts",
"require": "./load.d.cts",
"default": "./load.d.ts"
},
"import": "./load.js",
"require": "./load.cjs"
},
"./load/serializable": {
"types": {
"import": "./load/serializable.d.ts",
"require": "./load/serializable.d.cts",
"default": "./load/serializable.d.ts"
},
"import": "./load/serializable.js",
"require": "./load/serializable.cjs"
},
"./agents": {
"types": {
"import": "./agents.d.ts",
"require": "./agents.d.cts",
"default": "./agents.d.ts"
},
"import": "./agents.js",
"require": "./agents.cjs"
},
"./agents/load": {
"types": {
"import": "./agents/load.d.ts",
"require": "./agents/load.d.cts",
"default": "./agents/load.d.ts"
},
"import": "./agents/load.js",
"require": "./agents/load.cjs"
},
"./agents/toolkits": {
"types": {
"import": "./agents/toolkits.d.ts",
"require": "./agents/toolkits.d.cts",
"default": "./agents/toolkits.d.ts"
},
"import": "./agents/toolkits.js",
"require": "./agents/toolkits.cjs"
},
"./agents/toolkits/sql": {
"types": {
"import": "./agents/toolkits/sql.d.ts",
"require": "./agents/toolkits/sql.d.cts",
"default": "./agents/toolkits/sql.d.ts"
},
"import": "./agents/toolkits/sql.js",
"require": "./agents/toolkits/sql.cjs"
},
"./agents/format_scratchpad": {
"types": {
"import": "./agents/format_scratchpad.d.ts",
"require": "./agents/format_scratchpad.d.cts",
"default": "./agents/format_scratchpad.d.ts"
},
"import": "./agents/format_scratchpad.js",
"require": "./agents/format_scratchpad.cjs"
},
"./agents/format_scratchpad/openai_tools": {
"types": {
"import": "./agents/format_scratchpad/openai_tools.d.ts",
"require": "./agents/format_scratchpad/openai_tools.d.cts",
"default": "./agents/format_scratchpad/openai_tools.d.ts"
},
"import": "./agents/format_scratchpad/openai_tools.js",
"require": "./agents/format_scratchpad/openai_tools.cjs"
},
"./agents/format_scratchpad/log": {
"types": {
"import": "./agents/format_scratchpad/log.d.ts",
"require": "./agents/format_scratchpad/log.d.cts",
"default": "./agents/format_scratchpad/log.d.ts"
},
"import": "./agents/format_scratchpad/log.js",
"require": "./agents/format_scratchpad/log.cjs"
},
"./agents/format_scratchpad/xml": {
"types": {
"import": "./agents/format_scratchpad/xml.d.ts",
"require": "./agents/format_scratchpad/xml.d.cts",
"default": "./agents/format_scratchpad/xml.d.ts"
},
"import": "./agents/format_scratchpad/xml.js",
"require": "./agents/format_scratchpad/xml.cjs"
},
"./agents/format_scratchpad/log_to_message": {
"types": {
"import": "./agents/format_scratchpad/log_to_message.d.ts",
"require": "./agents/format_scratchpad/log_to_message.d.cts",
"default": "./agents/format_scratchpad/log_to_message.d.ts"
},
"import": "./agents/format_scratchpad/log_to_message.js",
"require": "./agents/format_scratchpad/log_to_message.cjs"
},
"./agents/react/output_parser": {
"types": {
"import": "./agents/react/output_parser.d.ts",
"require": "./agents/react/output_parser.d.cts",
"default": "./agents/react/output_parser.d.ts"
},
"import": "./agents/react/output_parser.js",
"require": "./agents/react/output_parser.cjs"
},
"./agents/xml/output_parser": {
"types": {
"import": "./agents/xml/output_parser.d.ts",
"require": "./agents/xml/output_parser.d.cts",
"default": "./agents/xml/output_parser.d.ts"
},
"import": "./agents/xml/output_parser.js",
"require": "./agents/xml/output_parser.cjs"
},
"./agents/openai/output_parser": {
"types": {
"import": "./agents/openai/output_parser.d.ts",
"require": "./agents/openai/output_parser.d.cts",
"default": "./agents/openai/output_parser.d.ts"
},
"import": "./agents/openai/output_parser.js",
"require": "./agents/openai/output_parser.cjs"
},
"./tools": {
"types": {
"import": "./tools.d.ts",
"require": "./tools.d.cts",
"default": "./tools.d.ts"
},
"import": "./tools.js",
"require": "./tools.cjs"
},
"./tools/chain": {
"types": {
"import": "./tools/chain.d.ts",
"require": "./tools/chain.d.cts",
"default": "./tools/chain.d.ts"
},
"import": "./tools/chain.js",
"require": "./tools/chain.cjs"
},
"./tools/render": {
"types": {
"import": "./tools/render.d.ts",
"require": "./tools/render.d.cts",
"default": "./tools/render.d.ts"
},
"import": "./tools/render.js",
"require": "./tools/render.cjs"
},
"./tools/retriever": {
"types": {
"import": "./tools/retriever.d.ts",
"require": "./tools/retriever.d.cts",
"default": "./tools/retriever.d.ts"
},
"import": "./tools/retriever.js",
"require": "./tools/retriever.cjs"
},
"./tools/sql": {
"types": {
"import": "./tools/sql.d.ts",
"require": "./tools/sql.d.cts",
"default": "./tools/sql.d.ts"
},
"import": "./tools/sql.js",
"require": "./tools/sql.cjs"
},
"./tools/webbrowser": {
"types": {
"import": "./tools/webbrowser.d.ts",
"require": "./tools/webbrowser.d.cts",
"default": "./tools/webbrowser.d.ts"
},
"import": "./tools/webbrowser.js",
"require": "./tools/webbrowser.cjs"
},
"./chains": {
"types": {
"import": "./chains.d.ts",
"require": "./chains.d.cts",
"default": "./chains.d.ts"
},
"import": "./chains.js",
"require": "./chains.cjs"
},
"./chains/combine_documents": {
"types": {
"import": "./chains/combine_documents.d.ts",
"require": "./chains/combine_documents.d.cts",
"default": "./chains/combine_documents.d.ts"
},
"import": "./chains/combine_documents.js",
"require": "./chains/combine_documents.cjs"
},
"./chains/combine_documents/reduce": {
"types": {
"import": "./chains/combine_documents/reduce.d.ts",
"require": "./chains/combine_documents/reduce.d.cts",
"default": "./chains/combine_documents/reduce.d.ts"
},
"import": "./chains/combine_documents/reduce.js",
"require": "./chains/combine_documents/reduce.cjs"
},
"./chains/history_aware_retriever": {
"types": {
"import": "./chains/history_aware_retriever.d.ts",
"require": "./chains/history_aware_retriever.d.cts",
"default": "./chains/history_aware_retriever.d.ts"
},
"import": "./chains/history_aware_retriever.js",
"require": "./chains/history_aware_retriever.cjs"
},
"./chains/load": {
"types": {
"import": "./chains/load.d.ts",
"require": "./chains/load.d.cts",
"default": "./chains/load.d.ts"
},
"import": "./chains/load.js",
"require": "./chains/load.cjs"
},
"./chains/openai_functions": {
"types": {
"import": "./chains/openai_functions.d.ts",
"require": "./chains/openai_functions.d.cts",
"default": "./chains/openai_functions.d.ts"
},
"import": "./chains/openai_functions.js",
"require": "./chains/openai_functions.cjs"
},
"./chains/query_constructor": {
"types": {
"import": "./chains/query_constructor.d.ts",
"require": "./chains/query_constructor.d.cts",
"default": "./chains/query_constructor.d.ts"
},
"import": "./chains/query_constructor.js",
"require": "./chains/query_constructor.cjs"
},
"./chains/query_constructor/ir": {
"types": {
"import": "./chains/query_constructor/ir.d.ts",
"require": "./chains/query_constructor/ir.d.cts",
"default": "./chains/query_constructor/ir.d.ts"
},
"import": "./chains/query_constructor/ir.js",
"require": "./chains/query_constructor/ir.cjs"
},
"./chains/retrieval": {
"types": {
"import": "./chains/retrieval.d.ts",
"require": "./chains/retrieval.d.cts",
"default": "./chains/retrieval.d.ts"
},
"import": "./chains/retrieval.js",
"require": "./chains/retrieval.cjs"
},
"./chains/sql_db": {
"types": {
"import": "./chains/sql_db.d.ts",
"require": "./chains/sql_db.d.cts",
"default": "./chains/sql_db.d.ts"
},
"import": "./chains/sql_db.js",
"require": "./chains/sql_db.cjs"
},
"./chains/graph_qa/cypher": {
"types": {
"import": "./chains/graph_qa/cypher.d.ts",
"require": "./chains/graph_qa/cypher.d.cts",
"default": "./chains/graph_qa/cypher.d.ts"
},
"import": "./chains/graph_qa/cypher.js",
"require": "./chains/graph_qa/cypher.cjs"
},
"./chat_models/universal": {
"types": {
"import": "./chat_models/universal.d.ts",
"require": "./chat_models/universal.d.cts",
"default": "./chat_models/universal.d.ts"
},
"import": "./chat_models/universal.js",
"require": "./chat_models/universal.cjs"
},
"./embeddings/cache_backed": {
"types": {
"import": "./embeddings/cache_backed.d.ts",
"require": "./embeddings/cache_backed.d.cts",
"default": "./embeddings/cache_backed.d.ts"
},
"import": "./embeddings/cache_backed.js",
"require": "./embeddings/cache_backed.cjs"
},
"./embeddings/fake": {
"types": {
"import": "./embeddings/fake.d.ts",
"require": "./embeddings/fake.d.cts",
"default": "./embeddings/fake.d.ts"
},
"import": "./embeddings/fake.js",
"require": "./embeddings/fake.cjs"
},
"./vectorstores/memory": {
"types": {
"import": "./vectorstores/memory.d.ts",
"require": "./vectorstores/memory.d.cts",
"default": "./vectorstores/memory.d.ts"
},
"import": "./vectorstores/memory.js",
"require": "./vectorstores/memory.cjs"
},
"./text_splitter": {
"types": {
"import": "./text_splitter.d.ts",
"require": "./text_splitter.d.cts",
"default": "./text_splitter.d.ts"
},
"import": "./text_splitter.js",
"require": "./text_splitter.cjs"
},
"./memory": {
"types": {
"import": "./memory.d.ts",
"require": "./memory.d.cts",
"default": "./memory.d.ts"
},
"import": "./memory.js",
"require": "./memory.cjs"
},
"./memory/chat_memory": {
"types": {
"import": "./memory/chat_memory.d.ts",
"require": "./memory/chat_memory.d.cts",
"default": "./memory/chat_memory.d.ts"
},
"import": "./memory/chat_memory.js",
"require": "./memory/chat_memory.cjs"
},
"./document": {
"types": {
"import": "./document.d.ts",
"require": "./document.d.cts",
"default": "./document.d.ts"
},
"import": "./document.js",
"require": "./document.cjs"
},
"./document_loaders/base": {
"types": {
"import": "./document_loaders/base.d.ts",
"require": "./document_loaders/base.d.cts",
"default": "./document_loaders/base.d.ts"
},
"import": "./document_loaders/base.js",
"require": "./document_loaders/base.cjs"
},
"./document_loaders/fs/buffer": {
"types": {
"import": "./document_loaders/fs/buffer.d.ts",
"require": "./document_loaders/fs/buffer.d.cts",
"default": "./document_loaders/fs/buffer.d.ts"
},
"import": "./document_loaders/fs/buffer.js",
"require": "./document_loaders/fs/buffer.cjs"
},
"./document_loaders/fs/directory": {
"types": {
"import": "./document_loaders/fs/directory.d.ts",
"require": "./document_loaders/fs/directory.d.cts",
"default": "./document_loaders/fs/directory.d.ts"
},
"import": "./document_loaders/fs/directory.js",
"require": "./document_loaders/fs/directory.cjs"
},
"./document_loaders/fs/json": {
"types": {
"import": "./document_loaders/fs/json.d.ts",
"require": "./document_loaders/fs/json.d.cts",
"default": "./document_loaders/fs/json.d.ts"
},
"import": "./document_loaders/fs/json.js",
"require": "./document_loaders/fs/json.cjs"
},
"./document_loaders/fs/multi_file": {
"types": {
"import": "./document_loaders/fs/multi_file.d.ts",
"require": "./document_loaders/fs/multi_file.d.cts",
"default": "./document_loaders/fs/multi_file.d.ts"
},
"import": "./document_loaders/fs/multi_file.js",
"require": "./document_loaders/fs/multi_file.cjs"
},
"./document_loaders/fs/text": {
"types": {
"import": "./document_loaders/fs/text.d.ts",
"require": "./document_loaders/fs/text.d.cts",
"default": "./document_loaders/fs/text.d.ts"
},
"import": "./document_loaders/fs/text.js",
"require": "./document_loaders/fs/text.cjs"
},
"./document_transformers/openai_functions": {
"types": {
"import": "./document_transformers/openai_functions.d.ts",
"require": "./document_transformers/openai_functions.d.cts",
"default": "./document_transformers/openai_functions.d.ts"
},
"import": "./document_transformers/openai_functions.js",
"require": "./document_transformers/openai_functions.cjs"
},
"./sql_db": {
"types": {
"import": "./sql_db.d.ts",
"require": "./sql_db.d.cts",
"default": "./sql_db.d.ts"
},
"import": "./sql_db.js",
"require": "./sql_db.cjs"
},
"./callbacks": {
"types": {
"import": "./callbacks.d.ts",
"require": "./callbacks.d.cts",
"default": "./callbacks.d.ts"
},
"import": "./callbacks.js",
"require": "./callbacks.cjs"
},
"./output_parsers": {
"types": {
"import": "./output_parsers.d.ts",
"require": "./output_parsers.d.cts",
"default": "./output_parsers.d.ts"
},
"import": "./output_parsers.js",
"require": "./output_parsers.cjs"
},
"./output_parsers/expression": {
"types": {
"import": "./output_parsers/expression.d.ts",
"require": "./output_parsers/expression.d.cts",
"default": "./output_parsers/expression.d.ts"
},
"import": "./output_parsers/expression.js",
"require": "./output_parsers/expression.cjs"
},
"./retrievers/contextual_compression": {
"types": {
"import": "./retrievers/contextual_compression.d.ts",
"require": "./retrievers/contextual_compression.d.cts",
"default": "./retrievers/contextual_compression.d.ts"
},
"import": "./retrievers/contextual_compression.js",
"require": "./retrievers/contextual_compression.cjs"
},
"./retrievers/document_compressors": {
"types": {
"import": "./retrievers/document_compressors.d.ts",
"require": "./retrievers/document_compressors.d.cts",
"default": "./retrievers/document_compressors.d.ts"
},
"import": "./retrievers/document_compressors.js",
"require": "./retrievers/document_compressors.cjs"
},
"./retrievers/ensemble": {
"types": {
"import": "./retrievers/ensemble.d.ts",
"require": "./retrievers/ensemble.d.cts",
"default": "./retrievers/ensemble.d.ts"
},
"import": "./retrievers/ensemble.js",
"require": "./retrievers/ensemble.cjs"
},
"./retrievers/multi_query": {
"types": {
"import": "./retrievers/multi_query.d.ts",
"require": "./retrievers/multi_query.d.cts",
"default": "./retrievers/multi_query.d.ts"
},
"import": "./retrievers/multi_query.js",
"require": "./retrievers/multi_query.cjs"
},
"./retrievers/multi_vector": {
"types": {
"import": "./retrievers/multi_vector.d.ts",
"require": "./retrievers/multi_vector.d.cts",
"default": "./retrievers/multi_vector.d.ts"
},
"import": "./retrievers/multi_vector.js",
"require": "./retrievers/multi_vector.cjs"
},
"./retrievers/parent_document": {
"types": {
"import": "./retrievers/parent_document.d.ts",
"require": "./retrievers/parent_document.d.cts",
"default": "./retrievers/parent_document.d.ts"
},
"import": "./retrievers/parent_document.js",
"require": "./retrievers/parent_document.cjs"
},
"./retrievers/time_weighted": {
"types": {
"import": "./retrievers/time_weighted.d.ts",
"require": "./retrievers/time_weighted.d.cts",
"default": "./retrievers/time_weighted.d.ts"
},
"import": "./retrievers/time_weighted.js",
"require": "./retrievers/time_weighted.cjs"
},
"./retrievers/document_compressors/chain_extract": {
"types": {
"import": "./retrievers/document_compressors/chain_extract.d.ts",
"require": "./retrievers/document_compressors/chain_extract.d.cts",
"default": "./retrievers/document_compressors/chain_extract.d.ts"
},
"import": "./retrievers/document_compressors/chain_extract.js",
"require": "./retrievers/document_compressors/chain_extract.cjs"
},
"./retrievers/document_compressors/embeddings_filter": {
"types": {
"import": "./retrievers/document_compressors/embeddings_filter.d.ts",
"require": "./retrievers/document_compressors/embeddings_filter.d.cts",
"default": "./retrievers/document_compressors/embeddings_filter.d.ts"
},
"import": "./retrievers/document_compressors/embeddings_filter.js",
"require": "./retrievers/document_compressors/embeddings_filter.cjs"
},
"./retrievers/hyde": {
"types": {
"import": "./retrievers/hyde.d.ts",
"require": "./retrievers/hyde.d.cts",
"default": "./retrievers/hyde.d.ts"
},
"import": "./retrievers/hyde.js",
"require": "./retrievers/hyde.cjs"
},
"./retrievers/score_threshold": {
"types": {
"import": "./retrievers/score_threshold.d.ts",
"require": "./retrievers/score_threshold.d.cts",
"default": "./retrievers/score_threshold.d.ts"
},
"import": "./retrievers/score_threshold.js",
"require": "./retrievers/score_threshold.cjs"
},
"./retrievers/self_query": {
"types": {
"import": "./retrievers/self_query.d.ts",
"require": "./retrievers/self_query.d.cts",
"default": "./retrievers/self_query.d.ts"
},
"import": "./retrievers/self_query.js",
"require": "./retrievers/self_query.cjs"
},
"./retrievers/self_query/functional": {
"types": {
"import": "./retrievers/self_query/functional.d.ts",
"require": "./retrievers/self_query/functional.d.cts",
"default": "./retrievers/self_query/functional.d.ts"
},
"import": "./retrievers/self_query/functional.js",
"require": "./retrievers/self_query/functional.cjs"
},
"./retrievers/matryoshka_retriever": {
"types": {
"import": "./retrievers/matryoshka_retriever.d.ts",
"require": "./retrievers/matryoshka_retriever.d.cts",
"default": "./retrievers/matryoshka_retriever.d.ts"
},
"import": "./retrievers/matryoshka_retriever.js",
"require": "./retrievers/matryoshka_retriever.cjs"
},
"./cache/file_system": {
"types": {
"import": "./cache/file_system.d.ts",
"require": "./cache/file_system.d.cts",
"default": "./cache/file_system.d.ts"
},
"import": "./cache/file_system.js",
"require": "./cache/file_system.cjs"
},
"./stores/doc/base": {
"types": {
"import": "./stores/doc/base.d.ts",
"require": "./stores/doc/base.d.cts",
"default": "./stores/doc/base.d.ts"
},
"import": "./stores/doc/base.js",
"require": "./stores/doc/base.cjs"
},
"./stores/doc/in_memory": {
"types": {
"import": "./stores/doc/in_memory.d.ts",
"require": "./stores/doc/in_memory.d.cts",
"default": "./stores/doc/in_memory.d.ts"
},
"import": "./stores/doc/in_memory.js",
"require": "./stores/doc/in_memory.cjs"
},
"./stores/file/in_memory": {
"types": {
"import": "./stores/file/in_memory.d.ts",
"require": "./stores/file/in_memory.d.cts",
"default": "./stores/file/in_memory.d.ts"
},
"import": "./stores/file/in_memory.js",
"require": "./stores/file/in_memory.cjs"
},
"./stores/file/node": {
"types": {
"import": "./stores/file/node.d.ts",
"require": "./stores/file/node.d.cts",
"default": "./stores/file/node.d.ts"
},
"import": "./stores/file/node.js",
"require": "./stores/file/node.cjs"
},
"./stores/message/in_memory": {
"types": {
"import": "./stores/message/in_memory.d.ts",
"require": "./stores/message/in_memory.d.cts",
"default": "./stores/message/in_memory.d.ts"
},
"import": "./stores/message/in_memory.js",
"require": "./stores/message/in_memory.cjs"
},
"./storage/encoder_backed": {
"types": {
"import": "./storage/encoder_backed.d.ts",
"require": "./storage/encoder_backed.d.cts",
"default": "./storage/encoder_backed.d.ts"
},
"import": "./storage/encoder_backed.js",
"require": "./storage/encoder_backed.cjs"
},
"./storage/in_memory": {
"types": {
"import": "./storage/in_memory.d.ts",
"require": "./storage/in_memory.d.cts",
"default": "./storage/in_memory.d.ts"
},
"import": "./storage/in_memory.js",
"require": "./storage/in_memory.cjs"
},
"./storage/file_system": {
"types": {
"import": "./storage/file_system.d.ts",
"require": "./storage/file_system.d.cts",
"default": "./storage/file_system.d.ts"
},
"import": "./storage/file_system.js",
"require": "./storage/file_system.cjs"
},
"./hub": {
"types": {
"import": "./hub.d.ts",
"require": "./hub.d.cts",
"default": "./hub.d.ts"
},
"import": "./hub.js",
"require": "./hub.cjs"
},
"./util/document": {
"types": {
"import": "./util/document.d.ts",
"require": "./util/document.d.cts",
"default": "./util/document.d.ts"
},
"import": "./util/document.js",
"require": "./util/document.cjs"
},
"./util/math": {
"types": {
"import": "./util/math.d.ts",
"require": "./util/math.d.cts",
"default": "./util/math.d.ts"
},
"import": "./util/math.js",
"require": "./util/math.cjs"
},
"./util/time": {
"types": {
"import": "./util/time.d.ts",
"require": "./util/time.d.cts",
"default": "./util/time.d.ts"
},
"import": "./util/time.js",
"require": "./util/time.cjs"
},
"./experimental/autogpt": {
"types": {
"import": "./experimental/autogpt.d.ts",
"require": "./experimental/autogpt.d.cts",
"default": "./experimental/autogpt.d.ts"
},
"import": "./experimental/autogpt.js",
"require": "./experimental/autogpt.cjs"
},
"./experimental/openai_assistant": {
"types": {
"import": "./experimental/openai_assistant.d.ts",
"require": "./experimental/openai_assistant.d.cts",
"default": "./experimental/openai_assistant.d.ts"
},
"import": "./experimental/openai_assistant.js",
"require": "./experimental/openai_assistant.cjs"
},
"./experimental/openai_files": {
"types": {
"import": "./experimental/openai_files.d.ts",
"require": "./experimental/openai_files.d.cts",
"default": "./experimental/openai_files.d.ts"
},
"import": "./experimental/openai_files.js",
"require": "./experimental/openai_files.cjs"
},
"./experimental/babyagi": {
"types": {
"import": "./experimental/babyagi.d.ts",
"require": "./experimental/babyagi.d.cts",
"default": "./experimental/babyagi.d.ts"
},
"import": "./experimental/babyagi.js",
"require": "./experimental/babyagi.cjs"
},
"./experimental/generative_agents": {
"types": {
"import": "./experimental/generative_agents.d.ts",
"require": "./experimental/generative_agents.d.cts",
"default": "./experimental/generative_agents.d.ts"
},
"import": "./experimental/generative_agents.js",
"require": "./experimental/generative_agents.cjs"
},
"./experimental/plan_and_execute": {
"types": {
"import": "./experimental/plan_and_execute.d.ts",
"require": "./experimental/plan_and_execute.d.cts",
"default": "./experimental/plan_and_execute.d.ts"
},
"import": "./experimental/plan_and_execute.js",
"require": "./experimental/plan_and_execute.cjs"
},
"./experimental/chains/violation_of_expectations": {
"types": {
"import": "./experimental/chains/violation_of_expectations.d.ts",
"require": "./experimental/chains/violation_of_expectations.d.cts",
"default": "./experimental/chains/violation_of_expectations.d.ts"
},
"import": "./experimental/chains/violation_of_expectations.js",
"require": "./experimental/chains/violation_of_expectations.cjs"
},
"./experimental/masking": {
"types": {
"import": "./experimental/masking.d.ts",
"require": "./experimental/masking.d.cts",
"default": "./experimental/masking.d.ts"
},
"import": "./experimental/masking.js",
"require": "./experimental/masking.cjs"
},
"./experimental/prompts/custom_format": {
"types": {
"import": "./experimental/prompts/custom_format.d.ts",
"require": "./experimental/prompts/custom_format.d.cts",
"default": "./experimental/prompts/custom_format.d.ts"
},
"import": "./experimental/prompts/custom_format.js",
"require": "./experimental/prompts/custom_format.cjs"
},
"./experimental/prompts/handlebars": {
"types": {
"import": "./experimental/prompts/handlebars.d.ts",
"require": "./experimental/prompts/handlebars.d.cts",
"default": "./experimental/prompts/handlebars.d.ts"
},
"import": "./experimental/prompts/handlebars.js",
"require": "./experimental/prompts/handlebars.cjs"
},
"./evaluation": {
"types": {
"import": "./evaluation.d.ts",
"require": "./evaluation.d.cts",
"default": "./evaluation.d.ts"
},
"import": "./evaluation.js",
"require": "./evaluation.cjs"
},
"./smith": {
"types": {
"import": "./smith.d.ts",
"require": "./smith.d.cts",
"default": "./smith.d.ts"
},
"import": "./smith.js",
"require": "./smith.cjs"
},
"./runnables/remote": {
"types": {
"import": "./runnables/remote.d.ts",
"require": "./runnables/remote.d.cts",
"default": "./runnables/remote.d.ts"
},
"import": "./runnables/remote.js",
"require": "./runnables/remote.cjs"
},
"./indexes": {
"types": {
"import": "./indexes.d.ts",
"require": "./indexes.d.cts",
"default": "./indexes.d.ts"
},
"import": "./indexes.js",
"require": "./indexes.cjs"
},
"./schema/query_constructor": {
"types": {
"import": "./schema/query_constructor.d.ts",
"require": "./schema/query_constructor.d.cts",
"default": "./schema/query_constructor.d.ts"
},
"import": "./schema/query_constructor.js",
"require": "./schema/query_constructor.cjs"
},
"./schema/prompt_template": {
"types": {
"import": "./schema/prompt_template.d.ts",
"require": "./schema/prompt_template.d.cts",
"default": "./schema/prompt_template.d.ts"
},
"import": "./schema/prompt_template.js",
"require": "./schema/prompt_template.cjs"
},
"./package.json": "./package.json"
}
}
|
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/tsconfig.cjs.json | {
"extends": "./tsconfig.json",
"compilerOptions": {
"module": "commonjs",
"declaration": false
},
"exclude": [
"node_modules",
"dist",
"docs",
"**/tests"
]
} |
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/turbo.json | {
"extends": ["//"],
"pipeline": {
"build": {
"outputs": ["**/dist/**"]
},
"build:internal": {
"dependsOn": ["^build:internal"]
}
}
}
|
0 | lc_public_repos/langchainjs | lc_public_repos/langchainjs/langchain/.prettierrc | {
"$schema": "https://json.schemastore.org/prettierrc",
"printWidth": 80,
"tabWidth": 2,
"useTabs": false,
"semi": true,
"singleQuote": false,
"quoteProps": "as-needed",
"jsxSingleQuote": false,
"trailingComma": "es5",
"bracketSpacing": true,
"arrowParens": "always",
"requirePragma": false,
"insertPragma": false,
"proseWrap": "preserve",
"htmlWhitespaceSensitivity": "css",
"vueIndentScriptAndStyle": false,
"endOfLine": "lf"
}
|
0 | lc_public_repos/langchainjs/langchain | lc_public_repos/langchainjs/langchain/src/hub.ts | import { Client } from "langsmith";
import { Runnable } from "@langchain/core/runnables";
import { load } from "./load/index.js";
/**
* Push a prompt to the hub.
* If the specified repo doesn't already exist, it will be created.
* @param repoFullName The full name of the repo.
* @param runnable The prompt to push.
* @param options
* @returns The URL of the newly pushed prompt in the hub.
*/
export async function push(
repoFullName: string,
runnable: Runnable,
options?: {
apiKey?: string;
apiUrl?: string;
parentCommitHash?: string;
/** @deprecated Use isPublic instead. */
newRepoIsPublic?: boolean;
isPublic?: boolean;
/** @deprecated Use description instead. */
newRepoDescription?: string;
description?: string;
readme?: string;
tags?: string[];
}
) {
const client = new Client(options);
const payloadOptions = {
object: runnable,
parentCommitHash: options?.parentCommitHash,
isPublic: options?.isPublic ?? options?.newRepoIsPublic,
description: options?.description ?? options?.newRepoDescription,
readme: options?.readme,
tags: options?.tags,
};
return client.pushPrompt(repoFullName, payloadOptions);
}
/**
* Pull a prompt from the hub.
* @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.
* @param options
* @returns
*/
export async function pull<T extends Runnable>(
ownerRepoCommit: string,
options?: { apiKey?: string; apiUrl?: string; includeModel?: boolean }
) {
const client = new Client(options);
const result = await client._pullPrompt(ownerRepoCommit, {
includeModel: options?.includeModel,
});
return load<T>(result);
}
|
0 | lc_public_repos/langchainjs/langchain | lc_public_repos/langchainjs/langchain/src/text_splitter.ts | export * from "@langchain/textsplitters";
|
0 | lc_public_repos/langchainjs/langchain | lc_public_repos/langchainjs/langchain/src/document.ts | export { type DocumentInput, Document } from "@langchain/core/documents";
|
0 | lc_public_repos/langchainjs/langchain | lc_public_repos/langchainjs/langchain/src/index.ts | console.warn(
`[WARNING]: The root "langchain" entrypoint is empty. Please use a specific entrypoint instead.`
);
|
0 | lc_public_repos/langchainjs/langchain | lc_public_repos/langchainjs/langchain/src/sql_db.ts | import type { DataSource as DataSourceT, DataSourceOptions } from "typeorm";
import { Serializable } from "@langchain/core/load/serializable";
import {
generateTableInfoFromTables,
getTableAndColumnsName,
SerializedSqlDatabase,
SqlDatabaseDataSourceParams,
SqlDatabaseOptionsParams,
SqlTable,
verifyIgnoreTablesExistInDatabase,
verifyIncludeTablesExistInDatabase,
verifyListTablesExistInDatabase,
} from "./util/sql_utils.js";
export type { SqlDatabaseDataSourceParams, SqlDatabaseOptionsParams };
/**
* Class that represents a SQL database in the LangChain framework.
*
* @security **Security Notice**
* This class generates SQL queries for the given database.
* The SQLDatabase class provides a getTableInfo method that can be used
* to get column information as well as sample data from the table.
* To mitigate risk of leaking sensitive data, limit permissions
* to read and scope to the tables that are needed.
* Optionally, use the includesTables or ignoreTables class parameters
* to limit which tables can/cannot be accessed.
*
* @link See https://js.langchain.com/docs/security for more information.
*/
export class SqlDatabase
extends Serializable
implements SqlDatabaseOptionsParams, SqlDatabaseDataSourceParams
{
lc_namespace = ["langchain", "sql_db"];
toJSON() {
return this.toJSONNotImplemented();
}
appDataSourceOptions: DataSourceOptions;
appDataSource: DataSourceT;
allTables: Array<SqlTable> = [];
includesTables: Array<string> = [];
ignoreTables: Array<string> = [];
sampleRowsInTableInfo = 3;
customDescription?: Record<string, string>;
protected constructor(fields: SqlDatabaseDataSourceParams) {
super(...arguments);
this.appDataSource = fields.appDataSource;
this.appDataSourceOptions = fields.appDataSource.options;
if (fields?.includesTables && fields?.ignoreTables) {
throw new Error("Cannot specify both include_tables and ignoreTables");
}
this.includesTables = fields?.includesTables ?? [];
this.ignoreTables = fields?.ignoreTables ?? [];
this.sampleRowsInTableInfo =
fields?.sampleRowsInTableInfo ?? this.sampleRowsInTableInfo;
}
static async fromDataSourceParams(
fields: SqlDatabaseDataSourceParams
): Promise<SqlDatabase> {
const sqlDatabase = new SqlDatabase(fields);
if (!sqlDatabase.appDataSource.isInitialized) {
await sqlDatabase.appDataSource.initialize();
}
sqlDatabase.allTables = await getTableAndColumnsName(
sqlDatabase.appDataSource
);
sqlDatabase.customDescription = Object.fromEntries(
Object.entries(fields?.customDescription ?? {}).filter(([key, _]) =>
sqlDatabase.allTables
.map((table: SqlTable) => table.tableName)
.includes(key)
)
);
verifyIncludeTablesExistInDatabase(
sqlDatabase.allTables,
sqlDatabase.includesTables
);
verifyIgnoreTablesExistInDatabase(
sqlDatabase.allTables,
sqlDatabase.ignoreTables
);
return sqlDatabase;
}
static async fromOptionsParams(
fields: SqlDatabaseOptionsParams
): Promise<SqlDatabase> {
const { DataSource } = await import("typeorm");
const dataSource = new DataSource(fields.appDataSourceOptions);
return SqlDatabase.fromDataSourceParams({
...fields,
appDataSource: dataSource,
});
}
/**
* Get information about specified tables.
*
* Follows best practices as specified in: Rajkumar et al, 2022
* (https://arxiv.org/abs/2204.00498)
*
* If `sample_rows_in_table_info`, the specified number of sample rows will be
* appended to each table description. This can increase performance as
* demonstrated in the paper.
*/
async getTableInfo(targetTables?: Array<string>): Promise<string> {
let selectedTables =
this.includesTables.length > 0
? this.allTables.filter((currentTable) =>
this.includesTables.includes(currentTable.tableName)
)
: this.allTables;
if (this.ignoreTables.length > 0) {
selectedTables = selectedTables.filter(
(currentTable) => !this.ignoreTables.includes(currentTable.tableName)
);
}
if (targetTables && targetTables.length > 0) {
verifyListTablesExistInDatabase(
this.allTables,
targetTables,
"Wrong target table name:"
);
selectedTables = this.allTables.filter((currentTable) =>
targetTables.includes(currentTable.tableName)
);
}
return generateTableInfoFromTables(
selectedTables,
this.appDataSource,
this.sampleRowsInTableInfo,
this.customDescription
);
}
/**
* Execute a SQL command and return a string representing the results.
* If the statement returns rows, a string of the results is returned.
* If the statement returns no rows, an empty string is returned.
*/
async run(command: string, fetch: "all" | "one" = "all"): Promise<string> {
// TODO: Potential security issue here
const res = await this.appDataSource.query(command);
if (fetch === "all") {
return JSON.stringify(res);
}
if (res?.length > 0) {
return JSON.stringify(res[0]);
}
return "";
}
serialize(): SerializedSqlDatabase {
return {
_type: "sql_database",
appDataSourceOptions: this.appDataSourceOptions,
includesTables: this.includesTables,
ignoreTables: this.ignoreTables,
sampleRowsInTableInfo: this.sampleRowsInTableInfo,
};
}
/** @ignore */
static async imports() {
try {
const { DataSource } = await import("typeorm");
return { DataSource };
} catch (e) {
console.error(e);
throw new Error(
"Failed to load typeorm. Please install it with eg. `yarn add typeorm`."
);
}
}
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/agents/helpers.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import type { ToolInterface } from "@langchain/core/tools";
import type { SerializedAgentT, AgentInput } from "./types.js";
import { LLMChain } from "../chains/llm_chain.js";
export const deserializeHelper = async <
T extends string,
U extends Record<string, unknown>,
V extends AgentInput,
Z
>(
llm: BaseLanguageModelInterface | undefined,
tools: ToolInterface[] | undefined,
data: SerializedAgentT<T, U, V>,
fromLLMAndTools: (
llm: BaseLanguageModelInterface,
tools: ToolInterface[],
args: U
) => Z,
fromConstructor: (args: V) => Z
): Promise<Z> => {
if (data.load_from_llm_and_tools) {
if (!llm) {
throw new Error("Loading from llm and tools, llm must be provided.");
}
if (!tools) {
throw new Error("Loading from llm and tools, tools must be provided.");
}
return fromLLMAndTools(llm, tools, data);
}
if (!data.llm_chain) {
throw new Error("Loading from constructor, llm_chain must be provided.");
}
const llmChain = await LLMChain.deserialize(data.llm_chain);
return fromConstructor({ ...data, llmChain });
};
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/agents/agent.ts | import type {
StructuredToolInterface,
ToolInterface,
} from "@langchain/core/tools";
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import { CallbackManager, Callbacks } from "@langchain/core/callbacks/manager";
import { BasePromptTemplate } from "@langchain/core/prompts";
import { AgentAction, AgentFinish, AgentStep } from "@langchain/core/agents";
import { BaseMessage } from "@langchain/core/messages";
import { ChainValues } from "@langchain/core/utils/types";
import { Serializable } from "@langchain/core/load/serializable";
import {
Runnable,
patchConfig,
type RunnableConfig,
RunnableSequence,
RunnableLike,
} from "@langchain/core/runnables";
import { LLMChain } from "../chains/llm_chain.js";
import type {
AgentActionOutputParser,
AgentInput,
RunnableMultiActionAgentInput,
RunnableSingleActionAgentInput,
SerializedAgent,
StoppingMethod,
} from "./types.js";
/**
* Record type for arguments passed to output parsers.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export type OutputParserArgs = Record<string, any>;
/**
* Error class for parse errors in LangChain. Contains information about
* the error message and the output that caused the error.
*/
class ParseError extends Error {
output: string;
constructor(msg: string, output: string) {
super(msg);
this.output = output;
}
}
/**
* Abstract base class for agents in LangChain. Provides common
* functionality for agents, such as handling inputs and outputs.
*/
export abstract class BaseAgent extends Serializable {
declare ToolType: StructuredToolInterface;
abstract get inputKeys(): string[];
get returnValues(): string[] {
return ["output"];
}
get allowedTools(): string[] | undefined {
return undefined;
}
/**
* Return the string type key uniquely identifying this class of agent.
*/
_agentType(): string {
throw new Error("Not implemented");
}
/**
* Return the string type key uniquely identifying multi or single action agents.
*/
abstract _agentActionType(): string;
/**
* Return response when agent has been stopped due to max iterations
*/
returnStoppedResponse(
earlyStoppingMethod: StoppingMethod,
_steps: AgentStep[],
_inputs: ChainValues,
_callbackManager?: CallbackManager
): Promise<AgentFinish> {
if (earlyStoppingMethod === "force") {
return Promise.resolve({
returnValues: { output: "Agent stopped due to max iterations." },
log: "",
});
}
throw new Error(`Invalid stopping method: ${earlyStoppingMethod}`);
}
/**
* Prepare the agent for output, if needed
*/
async prepareForOutput(
_returnValues: AgentFinish["returnValues"],
_steps: AgentStep[]
): Promise<AgentFinish["returnValues"]> {
return {};
}
}
/**
* Abstract base class for single action agents in LangChain. Extends the
* BaseAgent class and provides additional functionality specific to
* single action agents.
*/
export abstract class BaseSingleActionAgent extends BaseAgent {
_agentActionType(): string {
return "single" as const;
}
/**
* Decide what to do, given some input.
*
* @param steps - Steps the LLM has taken so far, along with observations from each.
* @param inputs - User inputs.
* @param callbackManager - Callback manager.
*
* @returns Action specifying what tool to use.
*/
abstract plan(
steps: AgentStep[],
inputs: ChainValues,
callbackManager?: CallbackManager,
config?: RunnableConfig
): Promise<AgentAction | AgentFinish>;
}
/**
* Abstract base class for multi-action agents in LangChain. Extends the
* BaseAgent class and provides additional functionality specific to
* multi-action agents.
*/
export abstract class BaseMultiActionAgent extends BaseAgent {
_agentActionType(): string {
return "multi" as const;
}
/**
* Decide what to do, given some input.
*
* @param steps - Steps the LLM has taken so far, along with observations from each.
* @param inputs - User inputs.
* @param callbackManager - Callback manager.
*
* @returns Actions specifying what tools to use.
*/
abstract plan(
steps: AgentStep[],
inputs: ChainValues,
callbackManager?: CallbackManager,
config?: RunnableConfig
): Promise<AgentAction[] | AgentFinish>;
}
function isAgentAction(input: unknown): input is AgentAction {
return !Array.isArray(input) && (input as AgentAction)?.tool !== undefined;
}
export function isRunnableAgent(x: BaseAgent) {
return (
(x as RunnableMultiActionAgent | RunnableSingleActionAgent).runnable !==
undefined
);
}
// TODO: Remove in the future. Only for backwards compatibility.
// Allows for the creation of runnables with properties that will
// be passed to the agent executor constructor.
export class AgentRunnableSequence<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunInput = any,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunOutput = any
> extends RunnableSequence<RunInput, RunOutput> {
streamRunnable?: boolean;
singleAction: boolean;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
static fromRunnables<RunInput = any, RunOutput = any>(
[first, ...runnables]: [
RunnableLike<RunInput>,
...RunnableLike[],
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunnableLike<any, RunOutput>
],
config: { singleAction: boolean; streamRunnable?: boolean; name?: string }
): AgentRunnableSequence<RunInput, Exclude<RunOutput, Error>> {
const sequence = RunnableSequence.from(
[first, ...runnables],
config.name
) as AgentRunnableSequence<RunInput, Exclude<RunOutput, Error>>;
sequence.singleAction = config.singleAction;
sequence.streamRunnable = config.streamRunnable;
return sequence;
}
static isAgentRunnableSequence(x: Runnable): x is AgentRunnableSequence {
return typeof (x as AgentRunnableSequence).singleAction === "boolean";
}
}
/**
* Class representing a single-action agent powered by runnables.
* Extends the BaseSingleActionAgent class and provides methods for
* planning agent actions with runnables.
*/
export class RunnableSingleActionAgent extends BaseSingleActionAgent {
lc_namespace = ["langchain", "agents", "runnable"];
runnable: Runnable<
ChainValues & { steps: AgentStep[] },
AgentAction | AgentFinish
>;
get inputKeys(): string[] {
return [];
}
/**
* Whether to stream from the runnable or not.
* If true, the underlying LLM is invoked in a streaming fashion to make it
* possible to get access to the individual LLM tokens when using
* `streamLog` with the Agent Executor. If false then LLM is invoked in a
* non-streaming fashion and individual LLM tokens will not be available
* in `streamLog`.
*
* Note that the runnable should still only stream a single action or
* finish chunk.
*/
streamRunnable = true;
defaultRunName = "RunnableAgent";
constructor(fields: RunnableSingleActionAgentInput) {
super(fields);
this.runnable = fields.runnable;
this.defaultRunName =
fields.defaultRunName ?? this.runnable.name ?? this.defaultRunName;
this.streamRunnable = fields.streamRunnable ?? this.streamRunnable;
}
async plan(
steps: AgentStep[],
inputs: ChainValues,
callbackManager?: CallbackManager,
config?: RunnableConfig
): Promise<AgentAction | AgentFinish> {
const combinedInput = { ...inputs, steps };
const combinedConfig = patchConfig(config, {
callbacks: callbackManager,
runName: this.defaultRunName,
});
if (this.streamRunnable) {
const stream = await this.runnable.stream(combinedInput, combinedConfig);
let finalOutput: AgentAction | AgentFinish | undefined;
for await (const chunk of stream) {
if (finalOutput === undefined) {
finalOutput = chunk;
} else {
throw new Error(
[
`Multiple agent actions/finishes received in streamed agent output.`,
`Set "streamRunnable: false" when initializing the agent to invoke this agent in non-streaming mode.`,
].join("\n")
);
}
}
if (finalOutput === undefined) {
throw new Error(
[
"No streaming output received from underlying runnable.",
`Set "streamRunnable: false" when initializing the agent to invoke this agent in non-streaming mode.`,
].join("\n")
);
}
return finalOutput;
} else {
return this.runnable.invoke(combinedInput, combinedConfig);
}
}
}
/**
* Class representing a multi-action agent powered by runnables.
* Extends the BaseMultiActionAgent class and provides methods for
* planning agent actions with runnables.
*/
export class RunnableMultiActionAgent extends BaseMultiActionAgent {
lc_namespace = ["langchain", "agents", "runnable"];
// TODO: Rename input to "intermediate_steps"
runnable: Runnable<
ChainValues & { steps: AgentStep[] },
AgentAction[] | AgentAction | AgentFinish
>;
defaultRunName = "RunnableAgent";
stop?: string[];
streamRunnable = true;
get inputKeys(): string[] {
return [];
}
constructor(fields: RunnableMultiActionAgentInput) {
super(fields);
this.runnable = fields.runnable;
this.stop = fields.stop;
this.defaultRunName =
fields.defaultRunName ?? this.runnable.name ?? this.defaultRunName;
this.streamRunnable = fields.streamRunnable ?? this.streamRunnable;
}
async plan(
steps: AgentStep[],
inputs: ChainValues,
callbackManager?: CallbackManager,
config?: RunnableConfig
): Promise<AgentAction[] | AgentFinish> {
const combinedInput = { ...inputs, steps };
const combinedConfig = patchConfig(config, {
callbacks: callbackManager,
runName: this.defaultRunName,
});
let output;
if (this.streamRunnable) {
const stream = await this.runnable.stream(combinedInput, combinedConfig);
let finalOutput: AgentAction | AgentFinish | AgentAction[] | undefined;
for await (const chunk of stream) {
if (finalOutput === undefined) {
finalOutput = chunk;
} else {
throw new Error(
[
`Multiple agent actions/finishes received in streamed agent output.`,
`Set "streamRunnable: false" when initializing the agent to invoke this agent in non-streaming mode.`,
].join("\n")
);
}
}
if (finalOutput === undefined) {
throw new Error(
[
"No streaming output received from underlying runnable.",
`Set "streamRunnable: false" when initializing the agent to invoke this agent in non-streaming mode.`,
].join("\n")
);
}
output = finalOutput;
} else {
output = await this.runnable.invoke(combinedInput, combinedConfig);
}
if (isAgentAction(output)) {
return [output];
}
return output;
}
}
/** @deprecated Renamed to RunnableMultiActionAgent. */
export class RunnableAgent extends RunnableMultiActionAgent {}
/**
* Interface for input data for creating a LLMSingleActionAgent.
*/
export interface LLMSingleActionAgentInput {
llmChain: LLMChain;
outputParser: AgentActionOutputParser;
stop?: string[];
}
/**
* Class representing a single action agent using a LLMChain in LangChain.
* Extends the BaseSingleActionAgent class and provides methods for
* planning agent actions based on LLMChain outputs.
* @example
* ```typescript
* const customPromptTemplate = new CustomPromptTemplate({
* tools: [new Calculator()],
* inputVariables: ["input", "agent_scratchpad"],
* });
* const customOutputParser = new CustomOutputParser();
* const agent = new LLMSingleActionAgent({
* llmChain: new LLMChain({
* prompt: customPromptTemplate,
* llm: new ChatOpenAI({ temperature: 0 }),
* }),
* outputParser: customOutputParser,
* stop: ["\nObservation"],
* });
* const executor = new AgentExecutor({
* agent,
* tools: [new Calculator()],
* });
* const result = await executor.invoke({
* input:
* "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?",
* });
* ```
*/
export class LLMSingleActionAgent extends BaseSingleActionAgent {
lc_namespace = ["langchain", "agents"];
llmChain: LLMChain;
outputParser: AgentActionOutputParser;
stop?: string[];
constructor(input: LLMSingleActionAgentInput) {
super(input);
this.stop = input.stop;
this.llmChain = input.llmChain;
this.outputParser = input.outputParser;
}
get inputKeys(): string[] {
return this.llmChain.inputKeys;
}
/**
* Decide what to do given some input.
*
* @param steps - Steps the LLM has taken so far, along with observations from each.
* @param inputs - User inputs.
* @param callbackManager - Callback manager.
*
* @returns Action specifying what tool to use.
*/
async plan(
steps: AgentStep[],
inputs: ChainValues,
callbackManager?: CallbackManager
): Promise<AgentAction | AgentFinish> {
const output = await this.llmChain.call(
{
intermediate_steps: steps,
stop: this.stop,
...inputs,
},
callbackManager
);
return this.outputParser.parse(
output[this.llmChain.outputKey],
callbackManager
);
}
}
/**
* Interface for arguments used to create an agent in LangChain.
*/
export interface AgentArgs {
outputParser?: AgentActionOutputParser;
callbacks?: Callbacks;
/**
* @deprecated Use `callbacks` instead.
*/
callbackManager?: CallbackManager;
}
/**
* Class responsible for calling a language model and deciding an action.
*
* @remarks This is driven by an LLMChain. The prompt in the LLMChain *must*
* include a variable called "agent_scratchpad" where the agent can put its
* intermediary work.
*
* @deprecated Use {@link https://js.langchain.com/docs/modules/agents/agent_types/ | new agent creation methods}.
*/
export abstract class Agent extends BaseSingleActionAgent {
llmChain: LLMChain;
outputParser: AgentActionOutputParser | undefined;
private _allowedTools?: string[] = undefined;
get allowedTools(): string[] | undefined {
return this._allowedTools;
}
get inputKeys(): string[] {
return this.llmChain.inputKeys.filter((k) => k !== "agent_scratchpad");
}
constructor(input: AgentInput) {
super(input);
this.llmChain = input.llmChain;
this._allowedTools = input.allowedTools;
this.outputParser = input.outputParser;
}
/**
* Prefix to append the observation with.
*/
abstract observationPrefix(): string;
/**
* Prefix to append the LLM call with.
*/
abstract llmPrefix(): string;
/**
* Return the string type key uniquely identifying this class of agent.
*/
abstract _agentType(): string;
/**
* Get the default output parser for this agent.
*/
static getDefaultOutputParser(
_fields?: OutputParserArgs
): AgentActionOutputParser {
throw new Error("Not implemented");
}
/**
* Create a prompt for this class
*
* @param _tools - List of tools the agent will have access to, used to format the prompt.
* @param _fields - Additional fields used to format the prompt.
*
* @returns A PromptTemplate assembled from the given tools and fields.
* */
static createPrompt(
_tools: StructuredToolInterface[],
// eslint-disable-next-line @typescript-eslint/no-explicit-any
_fields?: Record<string, any>
): BasePromptTemplate {
throw new Error("Not implemented");
}
/** Construct an agent from an LLM and a list of tools */
static fromLLMAndTools(
_llm: BaseLanguageModelInterface,
_tools: StructuredToolInterface[],
// eslint-disable-next-line @typescript-eslint/no-explicit-any
_args?: AgentArgs
): Agent {
throw new Error("Not implemented");
}
/**
* Validate that appropriate tools are passed in
*/
static validateTools(_tools: StructuredToolInterface[]): void {}
_stop(): string[] {
return [`\n${this.observationPrefix()}`];
}
/**
* Name of tool to use to terminate the chain.
*/
finishToolName(): string {
return "Final Answer";
}
/**
* Construct a scratchpad to let the agent continue its thought process
*/
async constructScratchPad(
steps: AgentStep[]
): Promise<string | BaseMessage[]> {
return steps.reduce(
(thoughts, { action, observation }) =>
thoughts +
[
action.log,
`${this.observationPrefix()}${observation}`,
this.llmPrefix(),
].join("\n"),
""
);
}
private async _plan(
steps: AgentStep[],
inputs: ChainValues,
suffix?: string,
callbackManager?: CallbackManager
): Promise<AgentAction | AgentFinish> {
const thoughts = await this.constructScratchPad(steps);
const newInputs: ChainValues = {
...inputs,
agent_scratchpad: suffix ? `${thoughts}${suffix}` : thoughts,
};
if (this._stop().length !== 0) {
newInputs.stop = this._stop();
}
const output = await this.llmChain.predict(newInputs, callbackManager);
if (!this.outputParser) {
throw new Error("Output parser not set");
}
return this.outputParser.parse(output, callbackManager);
}
/**
* Decide what to do given some input.
*
* @param steps - Steps the LLM has taken so far, along with observations from each.
* @param inputs - User inputs.
* @param callbackManager - Callback manager to use for this call.
*
* @returns Action specifying what tool to use.
*/
plan(
steps: AgentStep[],
inputs: ChainValues,
callbackManager?: CallbackManager
): Promise<AgentAction | AgentFinish> {
return this._plan(steps, inputs, undefined, callbackManager);
}
/**
* Return response when agent has been stopped due to max iterations
*/
async returnStoppedResponse(
earlyStoppingMethod: StoppingMethod,
steps: AgentStep[],
inputs: ChainValues,
callbackManager?: CallbackManager
): Promise<AgentFinish> {
if (earlyStoppingMethod === "force") {
return {
returnValues: { output: "Agent stopped due to max iterations." },
log: "",
};
}
if (earlyStoppingMethod === "generate") {
try {
const action = await this._plan(
steps,
inputs,
"\n\nI now need to return a final answer based on the previous steps:",
callbackManager
);
if ("returnValues" in action) {
return action;
}
return { returnValues: { output: action.log }, log: action.log };
} catch (err) {
// fine to use instanceof because we're in the same module
// eslint-disable-next-line no-instanceof/no-instanceof
if (!(err instanceof ParseError)) {
throw err;
}
return { returnValues: { output: err.output }, log: err.output };
}
}
throw new Error(`Invalid stopping method: ${earlyStoppingMethod}`);
}
/**
* Load an agent from a json-like object describing it.
*/
static async deserialize(
data: SerializedAgent & {
llm?: BaseLanguageModelInterface;
tools?: ToolInterface[];
}
): Promise<Agent> {
switch (data._type) {
case "zero-shot-react-description": {
const { ZeroShotAgent } = await import("./mrkl/index.js");
return ZeroShotAgent.deserialize(data);
}
default:
throw new Error("Unknown agent type");
}
}
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/agents/types.ts | import type { Runnable } from "@langchain/core/runnables";
import { BaseOutputParser } from "@langchain/core/output_parsers";
import type { AgentAction, AgentFinish } from "@langchain/core/agents";
import type { BaseMessage } from "@langchain/core/messages";
import type { ChainValues } from "@langchain/core/utils/types";
import { SerializedLLMChain } from "../chains/serde.js";
import { LLMChain } from "../chains/llm_chain.js";
/**
* Interface defining the input for creating an agent. It includes the
* LLMChain instance, an optional output parser, and an optional list of
* allowed tools.
*/
export interface AgentInput {
llmChain: LLMChain;
outputParser: AgentActionOutputParser | undefined;
allowedTools?: string[];
}
/**
* Interface defining the input for creating a single action agent
* that uses runnables.
*/
export interface RunnableSingleActionAgentInput {
runnable: Runnable<
ChainValues & {
agent_scratchpad?: string | BaseMessage[];
stop?: string[];
},
AgentAction | AgentFinish
>;
streamRunnable?: boolean;
defaultRunName?: string;
}
/**
* Interface defining the input for creating a multi-action agent that uses
* runnables. It includes the Runnable instance, and an optional list of
* stop strings.
*/
export interface RunnableMultiActionAgentInput {
runnable: Runnable<
ChainValues & {
agent_scratchpad?: string | BaseMessage[];
stop?: string[];
},
AgentAction[] | AgentAction | AgentFinish
>;
streamRunnable?: boolean;
defaultRunName?: string;
stop?: string[];
}
/** @deprecated Renamed to RunnableMultiActionAgentInput. */
export interface RunnableAgentInput extends RunnableMultiActionAgentInput {}
/**
* Abstract class representing an output parser specifically for agent
* actions and finishes in LangChain. It extends the `BaseOutputParser`
* class.
*/
export abstract class AgentActionOutputParser extends BaseOutputParser<
AgentAction | AgentFinish
> {}
/**
* Abstract class representing an output parser specifically for agents
* that return multiple actions.
*/
export abstract class AgentMultiActionOutputParser extends BaseOutputParser<
AgentAction[] | AgentFinish
> {}
/**
* Type representing the stopping method for an agent. It can be either
* 'force' or 'generate'.
*/
export type StoppingMethod = "force" | "generate";
/**
* Generic type representing a serialized agent in LangChain. It includes
* the type of the agent, the serialized form of the LLMChain, and
* additional properties specific to the agent type.
*/
export type SerializedAgentT<
TType extends string = string,
FromLLMInput extends Record<string, unknown> = Record<string, unknown>,
ConstructorInput extends AgentInput = AgentInput
> = {
_type: TType;
llm_chain?: SerializedLLMChain;
} & (
| ({ load_from_llm_and_tools: true } & FromLLMInput)
| ({ load_from_llm_and_tools?: false } & ConstructorInput)
);
export type SerializedFromLLMAndTools = {
suffix?: string;
prefix?: string;
input_variables?: string[];
};
/**
* Type representing a serialized ZeroShotAgent in LangChain. It extends
* the `SerializedAgentT` type and includes additional properties specific
* to the ZeroShotAgent.
*/
export type SerializedZeroShotAgent = SerializedAgentT<
"zero-shot-react-description",
SerializedFromLLMAndTools,
AgentInput
>;
/**
* Type representing a serialized agent in LangChain. It is currently
* synonymous with `SerializedZeroShotAgent`.
*/
export type SerializedAgent = SerializedZeroShotAgent;
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/agents/index.ts | export {
Agent,
type AgentArgs,
BaseSingleActionAgent,
BaseMultiActionAgent,
RunnableAgent,
LLMSingleActionAgent,
type LLMSingleActionAgentInput,
type OutputParserArgs,
type AgentRunnableSequence,
} from "./agent.js";
export {
JsonToolkit,
OpenApiToolkit,
RequestsToolkit,
type VectorStoreInfo,
VectorStoreRouterToolkit,
VectorStoreToolkit,
createJsonAgent,
createOpenApiAgent,
createVectorStoreAgent,
createVectorStoreRouterAgent,
} from "./toolkits/index.js";
export { Toolkit } from "./toolkits/base.js";
export {
ChatAgent,
type ChatAgentInput,
type ChatCreatePromptArgs,
} from "./chat/index.js";
export { ChatAgentOutputParser } from "./chat/outputParser.js";
export {
ChatConversationalAgent,
type ChatConversationalAgentInput,
type ChatConversationalCreatePromptArgs,
} from "./chat_convo/index.js";
export {
ChatConversationalAgentOutputParser,
type ChatConversationalAgentOutputParserArgs,
ChatConversationalAgentOutputParserWithRetries,
type ChatConversationalAgentOutputParserFormatInstructionsOptions,
} from "./chat_convo/outputParser.js";
export { AgentExecutor, type AgentExecutorInput } from "./executor.js";
export {
initializeAgentExecutor,
initializeAgentExecutorWithOptions,
type InitializeAgentExecutorOptions,
type InitializeAgentExecutorOptionsStructured,
} from "./initialize.js";
export {
ZeroShotAgent,
type ZeroShotAgentInput,
type ZeroShotCreatePromptArgs,
} from "./mrkl/index.js";
export { ZeroShotAgentOutputParser } from "./mrkl/outputParser.js";
export {
AgentActionOutputParser,
type AgentInput,
type SerializedAgent,
type SerializedAgentT,
type SerializedZeroShotAgent,
type StoppingMethod,
} from "./types.js";
export {
StructuredChatAgent,
type StructuredChatAgentInput,
type StructuredChatCreatePromptArgs,
type CreateStructuredChatAgentParams,
createStructuredChatAgent,
} from "./structured_chat/index.js";
export {
StructuredChatOutputParser,
type StructuredChatOutputParserArgs,
StructuredChatOutputParserWithRetries,
} from "./structured_chat/outputParser.js";
export {
OpenAIAgent,
type OpenAIAgentInput,
type OpenAIAgentCreatePromptArgs,
type CreateOpenAIFunctionsAgentParams,
createOpenAIFunctionsAgent,
} from "./openai_functions/index.js";
export {
type CreateOpenAIToolsAgentParams,
createOpenAIToolsAgent,
} from "./openai_tools/index.js";
export {
type CreateToolCallingAgentParams,
createToolCallingAgent,
} from "./tool_calling/index.js";
export {
XMLAgent,
type XMLAgentInput,
type CreateXmlAgentParams,
createXmlAgent,
} from "./xml/index.js";
export {
type CreateReactAgentParams,
createReactAgent,
} from "./react/index.js";
export type {
AgentAction,
AgentFinish,
AgentStep,
} from "@langchain/core/agents";
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/agents/initialize.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import type {
StructuredToolInterface,
ToolInterface,
} from "@langchain/core/tools";
import { CallbackManager } from "@langchain/core/callbacks/manager";
import { BufferMemory } from "../memory/buffer_memory.js";
import { ChatAgent } from "./chat/index.js";
import { ChatConversationalAgent } from "./chat_convo/index.js";
import { StructuredChatAgent } from "./structured_chat/index.js";
import { AgentExecutor, AgentExecutorInput } from "./executor.js";
import { ZeroShotAgent } from "./mrkl/index.js";
import { OpenAIAgent } from "./openai_functions/index.js";
import { XMLAgent } from "./xml/index.js";
/**
* Represents the type of an agent in LangChain. It can be
* "zero-shot-react-description", "chat-zero-shot-react-description", or
* "chat-conversational-react-description".
*/
type AgentType =
| "zero-shot-react-description"
| "chat-zero-shot-react-description"
| "chat-conversational-react-description";
/**
* @deprecated See {@link https://js.langchain.com/docs/modules/agents/agent_types/ | new agent creation docs}.
*/
export const initializeAgentExecutor = async (
tools: ToolInterface[],
llm: BaseLanguageModelInterface,
_agentType?: AgentType,
_verbose?: boolean,
_callbackManager?: CallbackManager
): Promise<AgentExecutor> => {
const agentType = _agentType ?? "zero-shot-react-description";
const verbose = _verbose;
const callbackManager = _callbackManager;
switch (agentType) {
case "zero-shot-react-description":
return AgentExecutor.fromAgentAndTools({
agent: ZeroShotAgent.fromLLMAndTools(llm, tools),
tools,
returnIntermediateSteps: true,
verbose,
callbackManager,
});
case "chat-zero-shot-react-description":
return AgentExecutor.fromAgentAndTools({
agent: ChatAgent.fromLLMAndTools(llm, tools),
tools,
returnIntermediateSteps: true,
verbose,
callbackManager,
});
case "chat-conversational-react-description":
return AgentExecutor.fromAgentAndTools({
agent: ChatConversationalAgent.fromLLMAndTools(llm, tools),
tools,
verbose,
callbackManager,
});
default:
throw new Error("Unknown agent type");
}
};
/**
* @interface
*/
export type InitializeAgentExecutorOptions =
| ({
agentType: "zero-shot-react-description";
agentArgs?: Parameters<typeof ZeroShotAgent.fromLLMAndTools>[2];
memory?: never;
} & Omit<AgentExecutorInput, "agent" | "tools">)
| ({
agentType: "chat-zero-shot-react-description";
agentArgs?: Parameters<typeof ChatAgent.fromLLMAndTools>[2];
memory?: never;
} & Omit<AgentExecutorInput, "agent" | "tools">)
| ({
agentType: "chat-conversational-react-description";
agentArgs?: Parameters<typeof ChatConversationalAgent.fromLLMAndTools>[2];
} & Omit<AgentExecutorInput, "agent" | "tools">)
| ({
agentType: "xml";
agentArgs?: Parameters<typeof XMLAgent.fromLLMAndTools>[2];
} & Omit<AgentExecutorInput, "agent" | "tools">);
/**
* @interface
*/
export type InitializeAgentExecutorOptionsStructured =
| ({
agentType: "structured-chat-zero-shot-react-description";
agentArgs?: Parameters<typeof StructuredChatAgent.fromLLMAndTools>[2];
} & Omit<AgentExecutorInput, "agent" | "tools">)
| ({
agentType: "openai-functions";
agentArgs?: Parameters<typeof OpenAIAgent.fromLLMAndTools>[2];
} & Omit<AgentExecutorInput, "agent" | "tools">);
/**
* Initialize an agent executor with options.
* @deprecated See {@link https://js.langchain.com/docs/modules/agents/agent_types/ | new agent creation docs}.
* @param tools Array of tools to use in the agent
* @param llm LLM or ChatModel to use in the agent
* @param options Options for the agent, including agentType, agentArgs, and other options for AgentExecutor.fromAgentAndTools
* @returns AgentExecutor
*/
export async function initializeAgentExecutorWithOptions(
tools: StructuredToolInterface[],
llm: BaseLanguageModelInterface,
options: InitializeAgentExecutorOptionsStructured
): Promise<AgentExecutor>;
/** @deprecated See {@link https://js.langchain.com/docs/modules/agents/agent_types/ | new agent creation docs}. */
export async function initializeAgentExecutorWithOptions(
tools: ToolInterface[],
llm: BaseLanguageModelInterface,
options?: InitializeAgentExecutorOptions
): Promise<AgentExecutor>;
/** @deprecated See {@link https://js.langchain.com/docs/modules/agents/agent_types/ | new agent creation docs}. */
export async function initializeAgentExecutorWithOptions(
tools: StructuredToolInterface[] | ToolInterface[],
llm: BaseLanguageModelInterface,
options:
| InitializeAgentExecutorOptions
| InitializeAgentExecutorOptionsStructured = {
agentType:
llm._modelType() === "base_chat_model"
? "chat-zero-shot-react-description"
: "zero-shot-react-description",
}
): Promise<AgentExecutor> {
// Note this tools cast is safe as the overload signatures prevent
// the function from being called with a StructuredTool[] when
// the agentType is not in InitializeAgentExecutorOptionsStructured
switch (options.agentType) {
case "zero-shot-react-description": {
const { agentArgs, tags, ...rest } = options;
return AgentExecutor.fromAgentAndTools({
tags: [...(tags ?? []), "zero-shot-react-description"],
agent: ZeroShotAgent.fromLLMAndTools(
llm,
tools as ToolInterface[],
agentArgs
),
tools,
...rest,
});
}
case "chat-zero-shot-react-description": {
const { agentArgs, tags, ...rest } = options;
return AgentExecutor.fromAgentAndTools({
tags: [...(tags ?? []), "chat-zero-shot-react-description"],
agent: ChatAgent.fromLLMAndTools(
llm,
tools as ToolInterface[],
agentArgs
),
tools,
...rest,
});
}
case "chat-conversational-react-description": {
const { agentArgs, memory, tags, ...rest } = options;
const executor = AgentExecutor.fromAgentAndTools({
tags: [...(tags ?? []), "chat-conversational-react-description"],
agent: ChatConversationalAgent.fromLLMAndTools(
llm,
tools as ToolInterface[],
agentArgs
),
tools,
memory:
memory ??
new BufferMemory({
returnMessages: true,
memoryKey: "chat_history",
inputKey: "input",
outputKey: "output",
}),
...rest,
});
return executor;
}
case "xml": {
const { agentArgs, tags, ...rest } = options;
const executor = AgentExecutor.fromAgentAndTools({
tags: [...(tags ?? []), "xml"],
agent: XMLAgent.fromLLMAndTools(
llm,
tools as ToolInterface[],
agentArgs
),
tools,
...rest,
});
return executor;
}
case "structured-chat-zero-shot-react-description": {
const { agentArgs, memory, tags, ...rest } = options;
const executor = AgentExecutor.fromAgentAndTools({
tags: [...(tags ?? []), "structured-chat-zero-shot-react-description"],
agent: StructuredChatAgent.fromLLMAndTools(llm, tools, agentArgs),
tools,
memory,
...rest,
});
return executor;
}
case "openai-functions": {
const { agentArgs, memory, tags, ...rest } = options;
const executor = AgentExecutor.fromAgentAndTools({
tags: [...(tags ?? []), "openai-functions"],
agent: OpenAIAgent.fromLLMAndTools(llm, tools, agentArgs),
tools,
memory:
memory ??
new BufferMemory({
returnMessages: true,
memoryKey: "chat_history",
inputKey: "input",
outputKey: "output",
}),
...rest,
});
return executor;
}
default: {
throw new Error("Unknown agent type");
}
}
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/agents/executor.ts | import {
type StructuredToolInterface,
type ToolInterface,
ToolInputParsingException,
Tool,
} from "@langchain/core/tools";
import {
Runnable,
type RunnableConfig,
patchConfig,
} from "@langchain/core/runnables";
import { AgentAction, AgentFinish, AgentStep } from "@langchain/core/agents";
import { ChainValues } from "@langchain/core/utils/types";
import {
CallbackManager,
CallbackManagerForChainRun,
Callbacks,
} from "@langchain/core/callbacks/manager";
import { OutputParserException } from "@langchain/core/output_parsers";
import { Serializable } from "@langchain/core/load/serializable";
import { SerializedLLMChain } from "../chains/serde.js";
import { StoppingMethod } from "./types.js";
import {
AgentRunnableSequence,
BaseMultiActionAgent,
BaseSingleActionAgent,
RunnableMultiActionAgent,
RunnableSingleActionAgent,
isRunnableAgent,
} from "./agent.js";
import { BaseChain, ChainInputs } from "../chains/base.js";
interface AgentExecutorIteratorInput {
agentExecutor: AgentExecutor;
inputs: Record<string, string>;
config?: RunnableConfig;
/** @deprecated Use "config" */
callbacks?: Callbacks;
/** @deprecated Use "config" */
tags?: string[];
/** @deprecated Use "config" */
metadata?: Record<string, unknown>;
runName?: string;
runManager?: CallbackManagerForChainRun;
}
export class AgentExecutorIterator
extends Serializable
implements AgentExecutorIteratorInput
{
lc_namespace = ["langchain", "agents", "executor_iterator"];
agentExecutor: AgentExecutor;
inputs: Record<string, string>;
config?: RunnableConfig;
/** @deprecated Use "config" */
callbacks?: Callbacks;
/** @deprecated Use "config" */
tags: string[] | undefined;
/** @deprecated Use "config" */
metadata: Record<string, unknown> | undefined;
/** @deprecated Use "config" */
runName: string | undefined;
private _finalOutputs: Record<string, unknown> | undefined;
get finalOutputs(): Record<string, unknown> | undefined {
return this._finalOutputs;
}
/** Intended to be used as a setter method, needs to be async. */
async setFinalOutputs(value: Record<string, unknown> | undefined) {
this._finalOutputs = undefined;
if (value) {
const preparedOutputs: Record<string, unknown> =
await this.agentExecutor.prepOutputs(this.inputs, value, true);
this._finalOutputs = preparedOutputs;
}
}
runManager: CallbackManagerForChainRun | undefined;
intermediateSteps: AgentStep[] = [];
iterations = 0;
get nameToToolMap(): Record<string, ToolInterface> {
const toolMap = this.agentExecutor.tools.map((tool) => ({
[tool.name]: tool,
}));
return Object.assign({}, ...toolMap);
}
constructor(fields: AgentExecutorIteratorInput) {
super(fields);
this.agentExecutor = fields.agentExecutor;
this.inputs = fields.inputs;
this.callbacks = fields.callbacks;
this.tags = fields.tags;
this.metadata = fields.metadata;
this.runName = fields.runName;
this.runManager = fields.runManager;
this.config = fields.config;
}
/**
* Reset the iterator to its initial state, clearing intermediate steps,
* iterations, and the final output.
*/
reset(): void {
this.intermediateSteps = [];
this.iterations = 0;
this._finalOutputs = undefined;
}
updateIterations(): void {
this.iterations += 1;
}
async *streamIterator() {
this.reset();
// Loop to handle iteration
while (true) {
try {
if (this.iterations === 0) {
await this.onFirstStep();
}
const result = await this._callNext();
yield result;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) {
if (
"message" in e &&
e.message.startsWith("Final outputs already reached: ")
) {
if (!this.finalOutputs) {
throw e;
}
return this.finalOutputs;
}
if (this.runManager) {
await this.runManager.handleChainError(e);
}
throw e;
}
}
}
/**
* Perform any necessary setup for the first step
* of the asynchronous iterator.
*/
async onFirstStep(): Promise<void> {
if (this.iterations === 0) {
const callbackManager = await CallbackManager.configure(
this.callbacks ?? this.config?.callbacks,
this.agentExecutor.callbacks,
this.tags ?? this.config?.tags,
this.agentExecutor.tags,
this.metadata ?? this.config?.metadata,
this.agentExecutor.metadata,
{
verbose: this.agentExecutor.verbose,
}
);
this.runManager = await callbackManager?.handleChainStart(
this.agentExecutor.toJSON(),
this.inputs,
this.config?.runId,
undefined,
this.tags ?? this.config?.tags,
this.metadata ?? this.config?.metadata,
this.runName ?? this.config?.runName
);
if (this.config !== undefined) {
delete this.config.runId;
}
}
}
/**
* Execute the next step in the chain using the
* AgentExecutor's _takeNextStep method.
*/
async _executeNextStep(
runManager?: CallbackManagerForChainRun
): Promise<AgentFinish | AgentStep[]> {
return this.agentExecutor._takeNextStep(
this.nameToToolMap,
this.inputs,
this.intermediateSteps,
runManager,
this.config
);
}
/**
* Process the output of the next step,
* handling AgentFinish and tool return cases.
*/
async _processNextStepOutput(
nextStepOutput: AgentFinish | AgentStep[],
runManager?: CallbackManagerForChainRun
): Promise<Record<string, string | AgentStep[]>> {
if ("returnValues" in nextStepOutput) {
const output = await this.agentExecutor._return(
nextStepOutput as AgentFinish,
this.intermediateSteps,
runManager
);
if (this.runManager) {
await this.runManager.handleChainEnd(output);
}
await this.setFinalOutputs(output);
return output;
}
this.intermediateSteps = this.intermediateSteps.concat(
nextStepOutput as AgentStep[]
);
let output: Record<string, string | AgentStep[]> = {};
if (Array.isArray(nextStepOutput) && nextStepOutput.length === 1) {
const nextStep = nextStepOutput[0];
const toolReturn = await this.agentExecutor._getToolReturn(nextStep);
if (toolReturn) {
output = await this.agentExecutor._return(
toolReturn,
this.intermediateSteps,
runManager
);
await this.runManager?.handleChainEnd(output);
await this.setFinalOutputs(output);
}
}
output = { intermediateSteps: nextStepOutput as AgentStep[] };
return output;
}
async _stop(): Promise<Record<string, unknown>> {
const output = await this.agentExecutor.agent.returnStoppedResponse(
this.agentExecutor.earlyStoppingMethod,
this.intermediateSteps,
this.inputs
);
const returnedOutput = await this.agentExecutor._return(
output,
this.intermediateSteps,
this.runManager
);
await this.setFinalOutputs(returnedOutput);
await this.runManager?.handleChainEnd(returnedOutput);
return returnedOutput;
}
async _callNext(): Promise<Record<string, unknown>> {
// final output already reached: stopiteration (final output)
if (this.finalOutputs) {
throw new Error(
`Final outputs already reached: ${JSON.stringify(
this.finalOutputs,
null,
2
)}`
);
}
// timeout/max iterations: stopiteration (stopped response)
if (!this.agentExecutor.shouldContinueGetter(this.iterations)) {
return this._stop();
}
const nextStepOutput = await this._executeNextStep(this.runManager);
const output = await this._processNextStepOutput(
nextStepOutput,
this.runManager
);
this.updateIterations();
return output;
}
}
type ExtractToolType<T> = T extends { ToolType: infer ToolInterface }
? ToolInterface
: StructuredToolInterface;
/**
* Interface defining the structure of input data for creating an
* AgentExecutor. It extends ChainInputs and includes additional
* properties specific to agent execution.
*/
export interface AgentExecutorInput extends ChainInputs {
agent:
| BaseSingleActionAgent
| BaseMultiActionAgent
| Runnable<
ChainValues & { steps?: AgentStep[] },
AgentAction[] | AgentAction | AgentFinish
>;
tools: ExtractToolType<this["agent"]>[];
returnIntermediateSteps?: boolean;
maxIterations?: number;
earlyStoppingMethod?: StoppingMethod;
handleParsingErrors?:
| boolean
| string
| ((e: OutputParserException | ToolInputParsingException) => string);
handleToolRuntimeErrors?: (e: Error) => string;
}
// TODO: Type properly with { intermediateSteps?: AgentStep[] };
export type AgentExecutorOutput = ChainValues;
/**
* Tool that just returns the query.
* Used for exception tracking.
*/
export class ExceptionTool extends Tool {
name = "_Exception";
description = "Exception tool";
async _call(query: string) {
return query;
}
}
/**
* A chain managing an agent using tools.
* @augments BaseChain
* @example
* ```typescript
*
* const executor = AgentExecutor.fromAgentAndTools({
* agent: async () => loadAgentFromLangchainHub(),
* tools: [new SerpAPI(), new Calculator()],
* returnIntermediateSteps: true,
* });
*
* const result = await executor.invoke({
* input: `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`,
* });
*
* ```
*/
export class AgentExecutor extends BaseChain<ChainValues, AgentExecutorOutput> {
static lc_name() {
return "AgentExecutor";
}
get lc_namespace() {
return ["langchain", "agents", "executor"];
}
agent: BaseSingleActionAgent | BaseMultiActionAgent;
tools: this["agent"]["ToolType"][];
returnIntermediateSteps = false;
maxIterations?: number = 15;
earlyStoppingMethod: StoppingMethod = "force";
// TODO: Update BaseChain implementation on breaking change to include this
returnOnlyOutputs = true;
/**
* How to handle errors raised by the agent's output parser.
Defaults to `False`, which raises the error.
If `true`, the error will be sent back to the LLM as an observation.
If a string, the string itself will be sent to the LLM as an observation.
If a callable function, the function will be called with the exception
as an argument, and the result of that function will be passed to the agent
as an observation.
*/
handleParsingErrors:
| boolean
| string
| ((e: OutputParserException | ToolInputParsingException) => string) =
false;
handleToolRuntimeErrors?: (e: Error) => string;
get inputKeys() {
return this.agent.inputKeys;
}
get outputKeys() {
return this.agent.returnValues;
}
constructor(input: AgentExecutorInput) {
let agent: BaseSingleActionAgent | BaseMultiActionAgent;
let returnOnlyOutputs = true;
if (Runnable.isRunnable(input.agent)) {
if (AgentRunnableSequence.isAgentRunnableSequence(input.agent)) {
if (input.agent.singleAction) {
agent = new RunnableSingleActionAgent({
runnable: input.agent,
streamRunnable: input.agent.streamRunnable,
});
} else {
agent = new RunnableMultiActionAgent({
runnable: input.agent,
streamRunnable: input.agent.streamRunnable,
});
}
} else {
agent = new RunnableMultiActionAgent({ runnable: input.agent });
}
// TODO: Update BaseChain implementation on breaking change
returnOnlyOutputs = false;
} else {
if (isRunnableAgent(input.agent)) {
returnOnlyOutputs = false;
}
agent = input.agent;
}
super(input);
this.agent = agent;
this.tools = input.tools;
this.handleParsingErrors =
input.handleParsingErrors ?? this.handleParsingErrors;
this.handleToolRuntimeErrors = input.handleToolRuntimeErrors;
this.returnOnlyOutputs = returnOnlyOutputs;
if (this.agent._agentActionType() === "multi") {
for (const tool of this.tools) {
if (tool.returnDirect) {
throw new Error(
`Tool with return direct ${tool.name} not supported for multi-action agent.`
);
}
}
}
this.returnIntermediateSteps =
input.returnIntermediateSteps ?? this.returnIntermediateSteps;
this.maxIterations = input.maxIterations ?? this.maxIterations;
this.earlyStoppingMethod =
input.earlyStoppingMethod ?? this.earlyStoppingMethod;
}
/** Create from agent and a list of tools. */
static fromAgentAndTools(fields: AgentExecutorInput): AgentExecutor {
return new AgentExecutor(fields);
}
get shouldContinueGetter() {
return this.shouldContinue.bind(this);
}
/**
* Method that checks if the agent execution should continue based on the
* number of iterations.
* @param iterations The current number of iterations.
* @returns A boolean indicating whether the agent execution should continue.
*/
private shouldContinue(iterations: number): boolean {
return this.maxIterations === undefined || iterations < this.maxIterations;
}
/** @ignore */
async _call(
inputs: ChainValues,
runManager?: CallbackManagerForChainRun,
config?: RunnableConfig
): Promise<AgentExecutorOutput> {
const toolsByName = Object.fromEntries(
this.tools.map((t) => [t.name.toLowerCase(), t])
);
const steps: AgentStep[] = [];
let iterations = 0;
const getOutput = async (
finishStep: AgentFinish
): Promise<AgentExecutorOutput> => {
const { returnValues } = finishStep;
const additional = await this.agent.prepareForOutput(returnValues, steps);
await runManager?.handleAgentEnd(finishStep);
let response;
if (this.returnIntermediateSteps) {
response = { ...returnValues, intermediateSteps: steps, ...additional };
} else {
response = { ...returnValues, ...additional };
}
if (!this.returnOnlyOutputs) {
response = { ...inputs, ...response };
}
return response;
};
while (this.shouldContinue(iterations)) {
let output;
try {
output = await this.agent.plan(
steps,
inputs,
runManager?.getChild(),
config
);
} catch (e) {
// eslint-disable-next-line no-instanceof/no-instanceof
if (e instanceof OutputParserException) {
let observation;
let text = e.message;
if (this.handleParsingErrors === true) {
if (e.sendToLLM) {
observation = e.observation;
text = e.llmOutput ?? "";
} else {
observation = "Invalid or incomplete response";
}
} else if (typeof this.handleParsingErrors === "string") {
observation = this.handleParsingErrors;
} else if (typeof this.handleParsingErrors === "function") {
observation = this.handleParsingErrors(e);
} else {
throw e;
}
output = {
tool: "_Exception",
toolInput: observation,
log: text,
} as AgentAction;
} else {
throw e;
}
}
// Check if the agent has finished
if ("returnValues" in output) {
return getOutput(output);
}
let actions: AgentAction[];
if (Array.isArray(output)) {
actions = output as AgentAction[];
} else {
actions = [output as AgentAction];
}
const newSteps = await Promise.all(
actions.map(async (action) => {
await runManager?.handleAgentAction(action);
const tool =
action.tool === "_Exception"
? new ExceptionTool()
: toolsByName[action.tool?.toLowerCase()];
let observation;
try {
observation = tool
? await tool.invoke(
action.toolInput,
patchConfig(config, { callbacks: runManager?.getChild() })
)
: `${action.tool} is not a valid tool, try another one.`;
if (typeof observation !== "string") {
throw new Error(
"Received unsupported non-string response from tool call."
);
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) {
// eslint-disable-next-line no-instanceof/no-instanceof
if (e instanceof ToolInputParsingException) {
if (this.handleParsingErrors === true) {
observation =
"Invalid or incomplete tool input. Please try again.";
} else if (typeof this.handleParsingErrors === "string") {
observation = this.handleParsingErrors;
} else if (typeof this.handleParsingErrors === "function") {
observation = this.handleParsingErrors(e);
} else {
throw e;
}
observation = await new ExceptionTool().call(
observation,
runManager?.getChild()
);
return { action, observation: observation ?? "" };
} else if (this.handleToolRuntimeErrors !== undefined) {
observation = this.handleToolRuntimeErrors(e);
}
}
return { action, observation: observation ?? "" };
})
);
steps.push(...newSteps);
const lastStep = steps[steps.length - 1];
const lastTool = toolsByName[lastStep.action.tool?.toLowerCase()];
if (lastTool?.returnDirect) {
return getOutput({
returnValues: { [this.agent.returnValues[0]]: lastStep.observation },
log: "",
});
}
iterations += 1;
}
const finish = await this.agent.returnStoppedResponse(
this.earlyStoppingMethod,
steps,
inputs
);
return getOutput(finish);
}
async _takeNextStep(
nameToolMap: Record<string, ToolInterface>,
inputs: ChainValues,
intermediateSteps: AgentStep[],
runManager?: CallbackManagerForChainRun,
config?: RunnableConfig
): Promise<AgentFinish | AgentStep[]> {
let output;
try {
output = await this.agent.plan(
intermediateSteps,
inputs,
runManager?.getChild(),
config
);
} catch (e) {
// eslint-disable-next-line no-instanceof/no-instanceof
if (e instanceof OutputParserException) {
let observation;
let text = e.message;
if (this.handleParsingErrors === true) {
if (e.sendToLLM) {
observation = e.observation;
text = e.llmOutput ?? "";
} else {
observation = "Invalid or incomplete response";
}
} else if (typeof this.handleParsingErrors === "string") {
observation = this.handleParsingErrors;
} else if (typeof this.handleParsingErrors === "function") {
observation = this.handleParsingErrors(e);
} else {
throw e;
}
output = {
tool: "_Exception",
toolInput: observation,
log: text,
} as AgentAction;
} else {
throw e;
}
}
if ("returnValues" in output) {
return output;
}
let actions: AgentAction[];
if (Array.isArray(output)) {
actions = output as AgentAction[];
} else {
actions = [output as AgentAction];
}
const result: AgentStep[] = [];
for (const agentAction of actions) {
let observation = "";
if (runManager) {
await runManager?.handleAgentAction(agentAction);
}
if (agentAction.tool in nameToolMap) {
const tool = nameToolMap[agentAction.tool];
try {
observation = await tool.call(
agentAction.toolInput,
runManager?.getChild()
);
if (typeof observation !== "string") {
throw new Error(
"Received unsupported non-string response from tool call."
);
}
} catch (e) {
// eslint-disable-next-line no-instanceof/no-instanceof
if (e instanceof ToolInputParsingException) {
if (this.handleParsingErrors === true) {
observation =
"Invalid or incomplete tool input. Please try again.";
} else if (typeof this.handleParsingErrors === "string") {
observation = this.handleParsingErrors;
} else if (typeof this.handleParsingErrors === "function") {
observation = this.handleParsingErrors(e);
} else {
throw e;
}
observation = await new ExceptionTool().call(
observation,
runManager?.getChild()
);
}
}
} else {
observation = `${
agentAction.tool
} is not a valid tool, try another available tool: ${Object.keys(
nameToolMap
).join(", ")}`;
}
result.push({
action: agentAction,
observation,
});
}
return result;
}
async _return(
output: AgentFinish,
intermediateSteps: AgentStep[],
runManager?: CallbackManagerForChainRun
): Promise<AgentExecutorOutput> {
if (runManager) {
await runManager.handleAgentEnd(output);
}
const finalOutput: Record<string, unknown> = output.returnValues;
if (this.returnIntermediateSteps) {
finalOutput.intermediateSteps = intermediateSteps;
}
return finalOutput;
}
async _getToolReturn(nextStepOutput: AgentStep): Promise<AgentFinish | null> {
const { action, observation } = nextStepOutput;
const nameToolMap = Object.fromEntries(
this.tools.map((t) => [t.name.toLowerCase(), t])
);
const [returnValueKey = "output"] = this.agent.returnValues;
// Invalid tools won't be in the map, so we return False.
if (action.tool in nameToolMap) {
if (nameToolMap[action.tool].returnDirect) {
return {
returnValues: { [returnValueKey]: observation },
log: "",
};
}
}
return null;
}
_returnStoppedResponse(earlyStoppingMethod: StoppingMethod) {
if (earlyStoppingMethod === "force") {
return {
returnValues: {
output: "Agent stopped due to iteration limit or time limit.",
},
log: "",
} as AgentFinish;
}
throw new Error(
`Got unsupported early_stopping_method: ${earlyStoppingMethod}`
);
}
async *_streamIterator(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
inputs: Record<string, any>,
options?: Partial<RunnableConfig>
): AsyncGenerator<ChainValues> {
const agentExecutorIterator = new AgentExecutorIterator({
inputs,
agentExecutor: this,
config: options,
// TODO: Deprecate these other parameters
metadata: options?.metadata,
tags: options?.tags,
callbacks: options?.callbacks,
});
const iterator = agentExecutorIterator.streamIterator();
for await (const step of iterator) {
if (!step) {
continue;
}
yield step;
}
}
_chainType() {
return "agent_executor" as const;
}
serialize(): SerializedLLMChain {
throw new Error("Cannot serialize an AgentExecutor");
}
}
|
0 | lc_public_repos/langchainjs/langchain/src | lc_public_repos/langchainjs/langchain/src/agents/load.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import type { ToolInterface } from "@langchain/core/tools";
import { Agent } from "./agent.js";
import { loadFromHub } from "../util/hub.js";
import { FileLoader, loadFromFile } from "../util/load.js";
import { parseFileConfig } from "../util/parse.js";
/** @deprecated */
const loadAgentFromFile: FileLoader<Agent> = async (
file: string,
path: string,
llmAndTools?: { llm?: BaseLanguageModelInterface; tools?: ToolInterface[] }
) => {
const serialized = parseFileConfig(file, path);
return Agent.deserialize({ ...serialized, ...llmAndTools });
};
/** @deprecated */
export const loadAgent = async (
uri: string,
llmAndTools?: { llm?: BaseLanguageModelInterface; tools?: ToolInterface[] }
): Promise<Agent> => {
const hubResult = await loadFromHub(
uri,
loadAgentFromFile,
"agents",
new Set(["json", "yaml"]),
llmAndTools
);
if (hubResult) {
return hubResult;
}
return loadFromFile(uri, loadAgentFromFile, llmAndTools);
};
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/openai/output_parser.ts | // console.warn([
// `[WARNING]: The root "langchain/agents/openai/output_parser" entrypoint is deprecated.`,
// `Please use either "langchain/agents/openai/output_parser" specific entrypoint instead.`
// ].join("\n"));
export * from "../openai_functions/output_parser.js";
export * from "../openai_tools/output_parser.js";
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/chat/outputParser.ts | import type { AgentFinish } from "@langchain/core/agents";
import { OutputParserException } from "@langchain/core/output_parsers";
import { AgentActionOutputParser } from "../types.js";
import { FORMAT_INSTRUCTIONS } from "./prompt.js";
export const FINAL_ANSWER_ACTION = "Final Answer:";
/**
* A class that extends the AgentActionOutputParser to parse the output of
* the ChatAgent in LangChain. It checks if the output text contains the
* final answer action or a JSON response, and parses it accordingly.
* @example
* ```typescript
* const prompt = ChatPromptTemplate.fromMessages([
* [
* "ai",
* `{PREFIX}
* {FORMAT_INSTRUCTIONS}
* {SUFFIX}`,
* ],
* ["human", "Question: {input}"],
* ]);
* const runnableAgent = RunnableSequence.from([
* {
* input: (i: { input: string; steps: AgentStep[] }) => i.input,
* agent_scratchpad: (i: { input: string; steps: AgentStep[] }) =>
* formatLogToString(i.steps),
* },
* prompt,
* new OpenAI({ temperature: 0 }),
* new ChatAgentOutputParser(),
* ]);
*
* const executor = AgentExecutor.fromAgentAndTools({
* agent: runnableAgent,
* tools: [new SerpAPI(), new Calculator()],
* });
*
* const result = await executor.invoke({
* input:
* "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?",
* });
* ```
*/
export class ChatAgentOutputParser extends AgentActionOutputParser {
lc_namespace = ["langchain", "agents", "chat"];
/**
* Parses the output text from the MRKL chain into an agent action or
* agent finish. If the text contains the final answer action or does not
* contain an action, it returns an AgentFinish with the output and log.
* If the text contains a JSON response, it returns the tool, toolInput,
* and log.
* @param text The output text from the MRKL chain.
* @returns An object that satisfies the AgentFinish interface or an object with the tool, toolInput, and log.
*/
async parse(text: string) {
if (text.includes(FINAL_ANSWER_ACTION) || !text.includes(`"action":`)) {
const parts = text.split(FINAL_ANSWER_ACTION);
const output = parts[parts.length - 1].trim();
return { returnValues: { output }, log: text } satisfies AgentFinish;
}
const action = text.includes("```")
? text.trim().split(/```(?:json)?/)[1]
: text.trim();
try {
const response = JSON.parse(action.trim());
return {
tool: response.action,
toolInput: response.action_input,
log: text,
};
} catch {
throw new OutputParserException(
`Unable to parse JSON response from chat agent.\n\n${text}`
);
}
}
/**
* Returns the format instructions used in the output parser for the
* ChatAgent class.
* @returns The format instructions as a string.
*/
getFormatInstructions(): string {
return FORMAT_INSTRUCTIONS;
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/chat/index.ts | import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import type { ToolInterface } from "@langchain/core/tools";
import {
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
} from "@langchain/core/prompts";
import type { AgentStep } from "@langchain/core/agents";
import { LLMChain } from "../../chains/llm_chain.js";
import { Optional } from "../../types/type-utils.js";
import { Agent, AgentArgs, OutputParserArgs } from "../agent.js";
import { AgentInput } from "../types.js";
import { ChatAgentOutputParser } from "./outputParser.js";
import { FORMAT_INSTRUCTIONS, PREFIX, SUFFIX } from "./prompt.js";
const DEFAULT_HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}";
/**
* Interface for arguments used to create a chat prompt.
* @deprecated
*/
export interface ChatCreatePromptArgs {
/** String to put after the list of tools. */
suffix?: string;
/** String to put before the list of tools. */
prefix?: string;
/** String to use directly as the human message template. */
humanMessageTemplate?: string;
/** Formattable string to use as the instructions template. */
formatInstructions?: string;
/** List of input variables the final prompt will expect. */
inputVariables?: string[];
}
/**
* Type for input data for creating a ChatAgent, extending AgentInput with
* optional 'outputParser'.
*
* @deprecated
*/
export type ChatAgentInput = Optional<AgentInput, "outputParser">;
/**
* Agent for the MRKL chain.
* @augments Agent
*
* @deprecated Use the {@link https://api.js.langchain.com/functions/langchain.agents.createStructuredChatAgent.html | createStructuredChatAgent method instead}.
*/
export class ChatAgent extends Agent {
static lc_name() {
return "ChatAgent";
}
lc_namespace = ["langchain", "agents", "chat"];
declare ToolType: ToolInterface;
constructor(input: ChatAgentInput) {
const outputParser =
input?.outputParser ?? ChatAgent.getDefaultOutputParser();
super({ ...input, outputParser });
}
_agentType() {
return "chat-zero-shot-react-description" as const;
}
observationPrefix() {
return "Observation: ";
}
llmPrefix() {
return "Thought:";
}
_stop(): string[] {
return ["Observation:"];
}
/**
* Validates that all tools have descriptions. Throws an error if a tool
* without a description is found.
* @param tools Array of Tool instances to validate.
* @returns void
*/
static validateTools(tools: ToolInterface[]) {
const descriptionlessTool = tools.find((tool) => !tool.description);
if (descriptionlessTool) {
const msg =
`Got a tool ${descriptionlessTool.name} without a description.` +
` This agent requires descriptions for all tools.`;
throw new Error(msg);
}
}
/**
* Returns a default output parser for the ChatAgent.
* @param _fields Optional OutputParserArgs to customize the output parser.
* @returns ChatAgentOutputParser instance
*/
static getDefaultOutputParser(_fields?: OutputParserArgs) {
return new ChatAgentOutputParser();
}
/**
* Constructs the agent's scratchpad, which is a string representation of
* the agent's previous steps.
* @param steps Array of AgentStep instances representing the agent's previous steps.
* @returns Promise resolving to a string representing the agent's scratchpad.
*/
async constructScratchPad(steps: AgentStep[]): Promise<string> {
const agentScratchpad = await super.constructScratchPad(steps);
if (agentScratchpad) {
return `This was your previous work (but I haven't seen any of it! I only see what you return as final answer):\n${agentScratchpad}`;
}
return agentScratchpad;
}
/**
* Create prompt in the style of the zero shot agent.
*
* @param tools - List of tools the agent will have access to, used to format the prompt.
* @param args - Arguments to create the prompt with.
* @param args.suffix - String to put after the list of tools.
* @param args.prefix - String to put before the list of tools.
* @param args.humanMessageTemplate - String to use directly as the human message template
* @param args.formatInstructions - Formattable string to use as the instructions template
*/
static createPrompt(tools: ToolInterface[], args?: ChatCreatePromptArgs) {
const {
prefix = PREFIX,
suffix = SUFFIX,
humanMessageTemplate = DEFAULT_HUMAN_MESSAGE_TEMPLATE,
formatInstructions = FORMAT_INSTRUCTIONS,
} = args ?? {};
const toolStrings = tools
.map((tool) => `${tool.name}: ${tool.description}`)
.join("\n");
const template = [prefix, toolStrings, formatInstructions, suffix].join(
"\n\n"
);
const messages = [
SystemMessagePromptTemplate.fromTemplate(template),
HumanMessagePromptTemplate.fromTemplate(humanMessageTemplate),
];
return ChatPromptTemplate.fromMessages(messages);
}
/**
* Creates a ChatAgent instance using a language model, tools, and
* optional arguments.
* @param llm BaseLanguageModelInterface instance to use in the agent.
* @param tools Array of Tool instances to include in the agent.
* @param args Optional arguments to customize the agent and prompt.
* @returns ChatAgent instance
*/
static fromLLMAndTools(
llm: BaseLanguageModelInterface,
tools: ToolInterface[],
args?: ChatCreatePromptArgs & AgentArgs
) {
ChatAgent.validateTools(tools);
const prompt = ChatAgent.createPrompt(tools, args);
const chain = new LLMChain({
prompt,
llm,
callbacks: args?.callbacks ?? args?.callbackManager,
});
const outputParser =
args?.outputParser ?? ChatAgent.getDefaultOutputParser();
return new ChatAgent({
llmChain: chain,
outputParser,
allowedTools: tools.map((t) => t.name),
});
}
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/chat/prompt.ts | export const PREFIX = `Answer the following questions as best you can. You have access to the following tools:`;
export const FORMAT_INSTRUCTIONS = `The way you use the tools is by specifying a json blob, denoted below by $JSON_BLOB
Specifically, this $JSON_BLOB should have a "action" key (with the name of the tool to use) and a "action_input" key (with the input to the tool going here).
The $JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. Here is an example of a valid $JSON_BLOB:
\`\`\`
{{
"action": "calculator",
"action_input": "1 + 2"
}}
\`\`\`
ALWAYS use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action:
\`\`\`
$JSON_BLOB
\`\`\`
Observation: the result of the action
... (this Thought/Action/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question`;
export const SUFFIX = `Begin! Reminder to always use the exact characters \`Final Answer\` when responding.`;
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/format_scratchpad/openai_functions.ts | import {
AIMessage,
type BaseMessage,
HumanMessage,
FunctionMessage,
} from "@langchain/core/messages";
import type { AgentStep } from "@langchain/core/agents";
import { renderTemplate } from "@langchain/core/prompts";
import { TEMPLATE_TOOL_RESPONSE } from "../chat_convo/prompt.js";
/**
* Format a list of AgentSteps into a list of BaseMessage instances for
* agents that use OpenAI's API. Helpful for passing in previous agent
* step context into new iterations.
*
* @deprecated Use formatToOpenAIFunctionMessages instead.
* @param steps A list of AgentSteps to format.
* @returns A list of BaseMessages.
*/
export function formatForOpenAIFunctions(steps: AgentStep[]): BaseMessage[] {
const thoughts: BaseMessage[] = [];
for (const step of steps) {
thoughts.push(new AIMessage(step.action.log));
thoughts.push(
new HumanMessage(
renderTemplate(TEMPLATE_TOOL_RESPONSE, "f-string", {
observation: step.observation,
})
)
);
}
return thoughts;
}
/**
* Format a list of AgentSteps into a list of BaseMessage instances for
* agents that use OpenAI's API. Helpful for passing in previous agent
* step context into new iterations.
*
* @param steps A list of AgentSteps to format.
* @returns A list of BaseMessages.
*/
export function formatToOpenAIFunctionMessages(
steps: AgentStep[]
): BaseMessage[] {
return steps.flatMap(({ action, observation }) => {
if ("messageLog" in action && action.messageLog !== undefined) {
const log = action.messageLog as BaseMessage[];
return log.concat(new FunctionMessage(observation, action.tool));
} else {
return [new AIMessage(action.log)];
}
});
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/format_scratchpad/tool_calling.ts | import { AIMessage, BaseMessage, ToolMessage } from "@langchain/core/messages";
import { ToolsAgentStep } from "../tool_calling/output_parser.js";
/**
* Convert agent action and observation into a function message.
* @param agentAction - The tool invocation request from the agent
* @param observation - The result of the tool invocation
* @returns FunctionMessage that corresponds to the original tool invocation
*/
export function _createToolMessage(step: ToolsAgentStep): ToolMessage {
return new ToolMessage({
tool_call_id: step.action.toolCallId,
content: step.observation,
additional_kwargs: { name: step.action.tool },
});
}
export function formatToToolMessages(steps: ToolsAgentStep[]): BaseMessage[] {
return steps.flatMap(({ action, observation }) => {
if ("messageLog" in action && action.messageLog !== undefined) {
const log = action.messageLog as BaseMessage[];
return log.concat(_createToolMessage({ action, observation }));
} else {
return [new AIMessage(action.log)];
}
});
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/format_scratchpad/log.ts | import { AgentStep } from "@langchain/core/agents";
/**
* Construct the scratchpad that lets the agent continue its thought process.
* @param intermediateSteps
* @param observationPrefix
* @param llmPrefix
* @returns a string with the formatted observations and agent logs
*/
export function formatLogToString(
intermediateSteps: AgentStep[],
observationPrefix = "Observation: ",
llmPrefix = "Thought: "
): string {
const formattedSteps = intermediateSteps.reduce(
(thoughts, { action, observation }) =>
thoughts +
[action.log, `\n${observationPrefix}${observation}`, llmPrefix].join(
"\n"
),
""
);
return formattedSteps;
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/format_scratchpad/xml.ts | import { AgentStep } from "@langchain/core/agents";
export function formatXml(intermediateSteps: AgentStep[]) {
let log = "";
for (const step of intermediateSteps) {
const { action, observation } = step;
log += `<tool>${action.tool}</tool><tool_input>${action.toolInput}\n</tool_input><observation>${observation}</observation>`;
}
return log;
}
|
0 | lc_public_repos/langchainjs/langchain/src/agents | lc_public_repos/langchainjs/langchain/src/agents/format_scratchpad/log_to_message.ts | import type { AgentStep } from "@langchain/core/agents";
import {
type BaseMessage,
AIMessage,
HumanMessage,
} from "@langchain/core/messages";
import { renderTemplate } from "@langchain/core/prompts";
export function formatLogToMessage(
intermediateSteps: AgentStep[],
templateToolResponse = "{observation}"
): BaseMessage[] {
// Get all input variables, if there is more than one, throw an error.
const matches = [...templateToolResponse.matchAll(/{([^}]*)}/g)];
const stringsInsideBrackets = matches.map((match) => match[1]);
if (stringsInsideBrackets.length > 1) {
throw new Error(
`templateToolResponse must contain one input variable: ${templateToolResponse}`
);
}
const thoughts: BaseMessage[] = [];
for (const step of intermediateSteps) {
thoughts.push(new AIMessage(step.action.log));
thoughts.push(
new HumanMessage(
renderTemplate(templateToolResponse, "f-string", {
[stringsInsideBrackets[0]]: step.observation,
})
)
);
}
return thoughts;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.