index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/cookbook_output_parser.ts
import { ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { RunnableSequence } from "@langchain/core/runnables"; import { StringOutputParser } from "@langchain/core/output_parsers"; const model = new ChatOpenAI({}); const promptTemplate = PromptTemplate.fromTemplate( "Tell me a joke about {topic}" ); const outputParser = new StringOutputParser(); const chain = RunnableSequence.from([promptTemplate, model, outputParser]); const result = await chain.invoke({ topic: "bears" }); console.log(result); /* "Why don't bears wear shoes?\n\nBecause they have bear feet!" */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/cookbook_memory.ts
import { BufferMemory } from "langchain/memory"; import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { RunnableSequence } from "@langchain/core/runnables"; import { ChatAnthropic } from "@langchain/anthropic"; const model = new ChatAnthropic(); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful chatbot"], new MessagesPlaceholder("history"), ["human", "{input}"], ]); // Default "inputKey", "outputKey", and "memoryKey values would work here // but we specify them for clarity. const memory = new BufferMemory({ returnMessages: true, inputKey: "input", outputKey: "output", memoryKey: "history", }); console.log(await memory.loadMemoryVariables({})); /* { history: [] } */ const chain = RunnableSequence.from([ { input: (initialInput) => initialInput.input, memory: () => memory.loadMemoryVariables({}), }, { input: (previousOutput) => previousOutput.input, history: (previousOutput) => previousOutput.memory.history, }, prompt, model, ]); const inputs = { input: "Hey, I'm Bob!", }; const response = await chain.invoke(inputs); console.log(response); /* AIMessage { content: " Hi Bob, nice to meet you! I'm Claude, an AI assistant created by Anthropic to be helpful, harmless, and honest.", additional_kwargs: {} } */ await memory.saveContext(inputs, { output: response.content, }); console.log(await memory.loadMemoryVariables({})); /* { history: [ HumanMessage { content: "Hey, I'm Bob!", additional_kwargs: {} }, AIMessage { content: " Hi Bob, nice to meet you! I'm Claude, an AI assistant created by Anthropic to be helpful, harmless, and honest.", additional_kwargs: {} } ] } */ const inputs2 = { input: "What's my name?", }; const response2 = await chain.invoke(inputs2); console.log(response2); /* AIMessage { content: ' You told me your name is Bob.', additional_kwargs: {} } */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/interface_batch.ts
import { ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; const model = new ChatOpenAI({}); const promptTemplate = PromptTemplate.fromTemplate( "Tell me a joke about {topic}" ); const chain = promptTemplate.pipe(model); const result = await chain.batch([{ topic: "bears" }, { topic: "cats" }]); console.log(result); /* [ AIMessage { content: "Why don't bears wear shoes?\n\nBecause they have bear feet!", }, AIMessage { content: "Why don't cats play poker in the wild?\n\nToo many cheetahs!" } ] */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/interface_stream.ts
import { ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; const model = new ChatOpenAI({}); const promptTemplate = PromptTemplate.fromTemplate( "Tell me a joke about {topic}" ); const chain = promptTemplate.pipe(model); const stream = await chain.stream({ topic: "bears" }); // Each chunk has the same interface as a chat message for await (const chunk of stream) { console.log(chunk?.content); } /* Why don't bears wear shoes? Because they have bear feet! */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/runnable_history.ts
import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { RunnableConfig, RunnableWithMessageHistory, } from "@langchain/core/runnables"; import { ChatMessageHistory } from "@langchain/community/stores/message/in_memory"; // Instantiate your model and prompt. const model = new ChatOpenAI({}); const prompt = ChatPromptTemplate.fromMessages([ ["ai", "You are a helpful assistant"], new MessagesPlaceholder("history"), ["human", "{input}"], ]); // Create a simple runnable which just chains the prompt to the model. const runnable = prompt.pipe(model); // Define your session history store. // This is where you will store your chat history. const messageHistory = new ChatMessageHistory(); // Create your `RunnableWithMessageHistory` object, passing in the // runnable created above. const withHistory = new RunnableWithMessageHistory({ runnable, // Optionally, you can use a function which tracks history by session ID. getMessageHistory: (_sessionId: string) => messageHistory, inputMessagesKey: "input", // This shows the runnable where to insert the history. // We set to "history" here because of our MessagesPlaceholder above. historyMessagesKey: "history", }); // Create your `configurable` object. This is where you pass in the // `sessionId` which is used to identify chat sessions in your message store. const config: RunnableConfig = { configurable: { sessionId: "1" } }; // Pass in your question, in this example we set the input key // to be "input" so we need to pass an object with an "input" key. let output = await withHistory.invoke( { input: "Hello there, I'm Archibald!" }, config ); console.log("output 1:", output); /** output 1: AIMessage { lc_namespace: [ 'langchain_core', 'messages' ], content: 'Hello, Archibald! How can I assist you today?', additional_kwargs: { function_call: undefined, tool_calls: undefined } } */ output = await withHistory.invoke({ input: "What's my name?" }, config); console.log("output 2:", output); /** output 2: AIMessage { lc_namespace: [ 'langchain_core', 'messages' ], content: 'Your name is Archibald, as you mentioned earlier. Is there anything specific you would like assistance with, Archibald?', additional_kwargs: { function_call: undefined, tool_calls: undefined } } */ /** * You can see the LangSmith traces here: * output 1 @link https://smith.langchain.com/public/686f061e-bef4-4b0d-a4fa-04c107b6db98/r * output 2 @link https://smith.langchain.com/public/c30ba77b-c2f4-440d-a54b-f368ced6467a/r */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/cookbook_retriever_map.ts
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { formatDocumentsAsString } from "langchain/util/document"; import { PromptTemplate } from "@langchain/core/prompts"; import { RunnableSequence } from "@langchain/core/runnables"; import { StringOutputParser } from "@langchain/core/output_parsers"; const model = new ChatOpenAI({}); const vectorStore = await HNSWLib.fromTexts( ["mitochondria is the powerhouse of the cell"], [{ id: 1 }], new OpenAIEmbeddings() ); const retriever = vectorStore.asRetriever(); const languagePrompt = PromptTemplate.fromTemplate(`Answer the question based only on the following context: {context} Question: {question} Answer in the following language: {language}`); type LanguageChainInput = { question: string; language: string; }; const languageChain = RunnableSequence.from([ { // Every property in the map receives the same input, // so we need to extract just the standalone question to pass into the retriever. // We then serialize the retrieved docs into a string to pass into the prompt. context: RunnableSequence.from([ (input: LanguageChainInput) => input.question, retriever, formatDocumentsAsString, ]), question: (input: LanguageChainInput) => input.question, language: (input: LanguageChainInput) => input.language, }, languagePrompt, model, new StringOutputParser(), ]); const result = await languageChain.invoke({ question: "What is the powerhouse of the cell?", language: "German", }); console.log(result); /* "Mitochondrien sind das Kraftwerk der Zelle." */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/runnable_history_constructor_config.ts
import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { RunnableConfig, RunnableWithMessageHistory, } from "@langchain/core/runnables"; import { ChatMessageHistory } from "@langchain/community/stores/message/in_memory"; // Construct your runnable with a prompt and chat model. const model = new ChatOpenAI({}); const prompt = ChatPromptTemplate.fromMessages([ ["ai", "You are a helpful assistant"], new MessagesPlaceholder("history"), ["human", "{input}"], ]); const runnable = prompt.pipe(model); const messageHistory = new ChatMessageHistory(); // Define a RunnableConfig object, with a `configurable` key. const config: RunnableConfig = { configurable: { sessionId: "1" } }; const withHistory = new RunnableWithMessageHistory({ runnable, getMessageHistory: (_sessionId: string) => messageHistory, inputMessagesKey: "input", historyMessagesKey: "history", // Passing config through here instead of through the invoke method config, }); const output = await withHistory.invoke({ input: "Hello there, I'm Archibald!", }); console.log("output:", output); /** output: AIMessage { lc_namespace: [ 'langchain_core', 'messages' ], content: 'Hello, Archibald! How can I assist you today?', additional_kwargs: { function_call: undefined, tool_calls: undefined } } */ /** * You can see the LangSmith traces here: * output @link https://smith.langchain.com/public/ee264a77-b767-4b5a-8573-efcbebaa5c80/r */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/message_history.ts
/* eslint-disable @typescript-eslint/no-non-null-assertion */ import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { ChatAnthropic } from "@langchain/anthropic"; import { RunnableWithMessageHistory } from "@langchain/core/runnables"; import { UpstashRedisChatMessageHistory } from "@langchain/community/stores/message/upstash_redis"; // For demos, you can also use an in-memory store: // import { ChatMessageHistory } from "langchain/stores/message/in_memory"; const prompt = ChatPromptTemplate.fromMessages([ ["system", "You're an assistant who's good at {ability}"], new MessagesPlaceholder("history"), ["human", "{question}"], ]); const chain = prompt.pipe( new ChatAnthropic({ model: "claude-3-sonnet-20240229" }) ); const chainWithHistory = new RunnableWithMessageHistory({ runnable: chain, getMessageHistory: (sessionId) => new UpstashRedisChatMessageHistory({ sessionId, config: { url: process.env.UPSTASH_REDIS_REST_URL!, token: process.env.UPSTASH_REDIS_REST_TOKEN!, }, }), inputMessagesKey: "question", historyMessagesKey: "history", }); const result = await chainWithHistory.invoke( { ability: "math", question: "What does cosine mean?", }, { configurable: { sessionId: "foobarbaz", }, } ); console.log(result); const result2 = await chainWithHistory.invoke( { ability: "math", question: "What's its inverse?", }, { configurable: { sessionId: "foobarbaz", }, } ); console.log(result2);
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/cookbook_sql_db.ts
import { DataSource } from "typeorm"; import { SqlDatabase } from "langchain/sql_db"; import { ChatOpenAI } from "@langchain/openai"; import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; import { PromptTemplate } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; const datasource = new DataSource({ type: "sqlite", database: "Chinook.db", }); const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, }); const prompt = PromptTemplate.fromTemplate(`Based on the table schema below, write a SQL query that would answer the user's question: {schema} Question: {question} SQL Query:`); const model = new ChatOpenAI(); // The `RunnablePassthrough.assign()` is used here to passthrough the input from the `.invoke()` // call (in this example it's the question), along with any inputs passed to the `.assign()` method. // In this case, we're passing the schema. const sqlQueryGeneratorChain = RunnableSequence.from([ RunnablePassthrough.assign({ schema: async () => db.getTableInfo(), }), prompt, model.bind({ stop: ["\nSQLResult:"] }), new StringOutputParser(), ]); const result = await sqlQueryGeneratorChain.invoke({ question: "How many employees are there?", }); console.log({ result, }); /* { result: "SELECT COUNT(EmployeeId) AS TotalEmployees FROM Employee" } */ const finalResponsePrompt = PromptTemplate.fromTemplate(`Based on the table schema below, question, sql query, and sql response, write a natural language response: {schema} Question: {question} SQL Query: {query} SQL Response: {response}`); const fullChain = RunnableSequence.from([ RunnablePassthrough.assign({ query: sqlQueryGeneratorChain, }), { schema: async () => db.getTableInfo(), question: (input) => input.question, query: (input) => input.query, response: (input) => db.run(input.query), }, finalResponsePrompt, model, ]); const finalResponse = await fullChain.invoke({ question: "How many employees are there?", }); console.log(finalResponse); /* AIMessage { content: 'There are 8 employees.', additional_kwargs: { function_call: undefined } } */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/cookbook_tools.ts
import { PromptTemplate } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { ChatAnthropic } from "@langchain/anthropic"; import { SerpAPI } from "@langchain/community/tools/serpapi"; const search = new SerpAPI(); const prompt = PromptTemplate.fromTemplate(`Turn the following user input into a search query for a search engine: {input}`); const model = new ChatAnthropic({}); const chain = prompt.pipe(model).pipe(new StringOutputParser()).pipe(search); const result = await chain.invoke({ input: "Who is the current prime minister of Malaysia?", }); console.log(result); /* Anwar Ibrahim */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/interface_invoke.ts
import { ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { RunnableSequence } from "@langchain/core/runnables"; const model = new ChatOpenAI({}); const promptTemplate = PromptTemplate.fromTemplate( "Tell me a joke about {topic}" ); // You can also create a chain using an array of runnables const chain = RunnableSequence.from([promptTemplate, model]); const result = await chain.invoke({ topic: "bears" }); console.log(result); /* AIMessage { content: "Why don't bears wear shoes?\n\nBecause they have bear feet!", } */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/interface_stream_log.ts
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { formatDocumentsAsString } from "langchain/util/document"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; import { ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts"; // Initialize the LLM to use to answer the question. const model = new ChatOpenAI({}); const vectorStore = await HNSWLib.fromTexts( [ "mitochondria is the powerhouse of the cell", "mitochondria is made of lipids", ], [{ id: 1 }, { id: 2 }], new OpenAIEmbeddings() ); // Initialize a retriever wrapper around the vector store const vectorStoreRetriever = vectorStore.asRetriever(); // Create a system & human prompt for the chat model const SYSTEM_TEMPLATE = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. ---------------- {context}`; const messages = [ SystemMessagePromptTemplate.fromTemplate(SYSTEM_TEMPLATE), HumanMessagePromptTemplate.fromTemplate("{question}"), ]; const prompt = ChatPromptTemplate.fromMessages(messages); const chain = RunnableSequence.from([ { context: vectorStoreRetriever.pipe(formatDocumentsAsString), question: new RunnablePassthrough(), }, prompt, model, new StringOutputParser(), ]); const logStream = await chain.streamLog("What is the powerhouse of the cell?"); let state; for await (const logPatch of logStream) { console.log(JSON.stringify(logPatch)); if (!state) { state = logPatch; } else { state = state.concat(logPatch); } } console.log("aggregate", state); /* {"ops":[{"op":"replace","path":"","value":{"id":"5a79d2e7-171a-4034-9faa-63af88e5a451","streamed_output":[],"logs":{}}}]} {"ops":[{"op":"add","path":"/logs/RunnableMap","value":{"id":"5948dd9f-b827-45f8-9fa6-74e5cc972a56","name":"RunnableMap","type":"chain","tags":["seq:step:1"],"metadata":{},"start_time":"2023-12-23T00:20:46.664Z","streamed_output_str":[]}}]} {"ops":[{"op":"add","path":"/logs/RunnableSequence","value":{"id":"e9e9ef5e-3a04-4110-9a24-517c929b9137","name":"RunnableSequence","type":"chain","tags":["context"],"metadata":{},"start_time":"2023-12-23T00:20:46.804Z","streamed_output_str":[]}}]} {"ops":[{"op":"add","path":"/logs/RunnablePassthrough","value":{"id":"4c79d835-87e5-4ff8-b560-987aea83c0e4","name":"RunnablePassthrough","type":"chain","tags":["question"],"metadata":{},"start_time":"2023-12-23T00:20:46.805Z","streamed_output_str":[]}}]} {"ops":[{"op":"add","path":"/logs/RunnablePassthrough/final_output","value":{"output":"What is the powerhouse of the cell?"}},{"op":"add","path":"/logs/RunnablePassthrough/end_time","value":"2023-12-23T00:20:46.947Z"}]} {"ops":[{"op":"add","path":"/logs/VectorStoreRetriever","value":{"id":"1e169f18-711e-47a3-910e-ee031f70b6e0","name":"VectorStoreRetriever","type":"retriever","tags":["seq:step:1","hnswlib"],"metadata":{},"start_time":"2023-12-23T00:20:47.082Z","streamed_output_str":[]}}]} {"ops":[{"op":"add","path":"/logs/VectorStoreRetriever/final_output","value":{"documents":[{"pageContent":"mitochondria is the powerhouse of the cell","metadata":{"id":1}},{"pageContent":"mitochondria is made of lipids","metadata":{"id":2}}]}},{"op":"add","path":"/logs/VectorStoreRetriever/end_time","value":"2023-12-23T00:20:47.398Z"}]} {"ops":[{"op":"add","path":"/logs/RunnableLambda","value":{"id":"a0d61a88-8282-42be-8949-fb0e8f8f67cd","name":"RunnableLambda","type":"chain","tags":["seq:step:2"],"metadata":{},"start_time":"2023-12-23T00:20:47.495Z","streamed_output_str":[]}}]} {"ops":[{"op":"add","path":"/logs/RunnableLambda/final_output","value":{"output":"mitochondria is the powerhouse of the cell\n\nmitochondria is made of lipids"}},{"op":"add","path":"/logs/RunnableLambda/end_time","value":"2023-12-23T00:20:47.604Z"}]} {"ops":[{"op":"add","path":"/logs/RunnableSequence/final_output","value":{"output":"mitochondria is the powerhouse of the cell\n\nmitochondria is made of lipids"}},{"op":"add","path":"/logs/RunnableSequence/end_time","value":"2023-12-23T00:20:47.690Z"}]} {"ops":[{"op":"add","path":"/logs/RunnableMap/final_output","value":{"question":"What is the powerhouse of the cell?","context":"mitochondria is the powerhouse of the cell\n\nmitochondria is made of lipids"}},{"op":"add","path":"/logs/RunnableMap/end_time","value":"2023-12-23T00:20:47.780Z"}]} {"ops":[{"op":"add","path":"/logs/ChatPromptTemplate","value":{"id":"5b6cff77-0c52-4218-9bde-d92c33ad12f3","name":"ChatPromptTemplate","type":"prompt","tags":["seq:step:2"],"metadata":{},"start_time":"2023-12-23T00:20:47.864Z","streamed_output_str":[]}}]} {"ops":[{"op":"add","path":"/logs/ChatPromptTemplate/final_output","value":{"lc":1,"type":"constructor","id":["langchain_core","prompt_values","ChatPromptValue"],"kwargs":{"messages":[{"lc":1,"type":"constructor","id":["langchain_core","messages","SystemMessage"],"kwargs":{"content":"Use the following pieces of context to answer the question at the end.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\n----------------\nmitochondria is the powerhouse of the cell\n\nmitochondria is made of lipids","additional_kwargs":{}}},{"lc":1,"type":"constructor","id":["langchain_core","messages","HumanMessage"],"kwargs":{"content":"What is the powerhouse of the cell?","additional_kwargs":{}}}]}}},{"op":"add","path":"/logs/ChatPromptTemplate/end_time","value":"2023-12-23T00:20:47.956Z"}]} {"ops":[{"op":"add","path":"/logs/ChatOpenAI","value":{"id":"0cc3b220-ca7f-4fd3-88d5-bea1f7417c3d","name":"ChatOpenAI","type":"llm","tags":["seq:step:3"],"metadata":{},"start_time":"2023-12-23T00:20:48.126Z","streamed_output_str":[]}}]} {"ops":[{"op":"add","path":"/logs/StrOutputParser","value":{"id":"47d9bd52-c14a-420d-8d52-1106d751581c","name":"StrOutputParser","type":"parser","tags":["seq:step:4"],"metadata":{},"start_time":"2023-12-23T00:20:48.666Z","streamed_output_str":[]}}]} {"ops":[{"op":"add","path":"/logs/ChatOpenAI/streamed_output_str/-","value":""}]} {"ops":[{"op":"add","path":"/streamed_output/-","value":""}]} {"ops":[{"op":"add","path":"/logs/ChatOpenAI/streamed_output_str/-","value":"The"}]} {"ops":[{"op":"add","path":"/streamed_output/-","value":"The"}]} {"ops":[{"op":"add","path":"/logs/ChatOpenAI/streamed_output_str/-","value":" mitochond"}]} {"ops":[{"op":"add","path":"/streamed_output/-","value":" mitochond"}]} {"ops":[{"op":"add","path":"/logs/ChatOpenAI/streamed_output_str/-","value":"ria"}]} {"ops":[{"op":"add","path":"/streamed_output/-","value":"ria"}]} {"ops":[{"op":"add","path":"/logs/ChatOpenAI/streamed_output_str/-","value":" is"}]} {"ops":[{"op":"add","path":"/streamed_output/-","value":" is"}]} {"ops":[{"op":"add","path":"/logs/ChatOpenAI/streamed_output_str/-","value":" the"}]} {"ops":[{"op":"add","path":"/streamed_output/-","value":" the"}]} {"ops":[{"op":"add","path":"/logs/ChatOpenAI/streamed_output_str/-","value":" powerhouse"}]} {"ops":[{"op":"add","path":"/streamed_output/-","value":" powerhouse"}]} {"ops":[{"op":"add","path":"/logs/ChatOpenAI/streamed_output_str/-","value":" of"}]} {"ops":[{"op":"add","path":"/streamed_output/-","value":" of"}]} {"ops":[{"op":"add","path":"/logs/ChatOpenAI/streamed_output_str/-","value":" the"}]} {"ops":[{"op":"add","path":"/streamed_output/-","value":" the"}]} {"ops":[{"op":"add","path":"/logs/ChatOpenAI/streamed_output_str/-","value":" cell"}]} {"ops":[{"op":"add","path":"/streamed_output/-","value":" cell"}]} {"ops":[{"op":"add","path":"/logs/ChatOpenAI/streamed_output_str/-","value":"."}]} {"ops":[{"op":"add","path":"/streamed_output/-","value":"."}]} {"ops":[{"op":"add","path":"/logs/ChatOpenAI/streamed_output_str/-","value":""}]} {"ops":[{"op":"add","path":"/streamed_output/-","value":""}]} {"ops":[{"op":"add","path":"/logs/ChatOpenAI/final_output","value":{"generations":[[{"text":"The mitochondria is the powerhouse of the cell.","generationInfo":{"prompt":0,"completion":0},"message":{"lc":1,"type":"constructor","id":["langchain_core","messages","AIMessageChunk"],"kwargs":{"content":"The mitochondria is the powerhouse of the cell.","additional_kwargs":{}}}}]]}},{"op":"add","path":"/logs/ChatOpenAI/end_time","value":"2023-12-23T00:20:48.841Z"}]} {"ops":[{"op":"add","path":"/logs/StrOutputParser/final_output","value":{"output":"The mitochondria is the powerhouse of the cell."}},{"op":"add","path":"/logs/StrOutputParser/end_time","value":"2023-12-23T00:20:48.945Z"}]} {"ops":[{"op":"replace","path":"/final_output","value":{"output":"The mitochondria is the powerhouse of the cell."}}]} */ // Aggregate /** aggregate { id: '1ed678b9-e1cf-4ef9-bb8b-2fa083b81725', streamed_output: [ '', 'The', ' powerhouse', ' of', ' the', ' cell', ' is', ' the', ' mitochond', 'ria', '.', '' ], final_output: { output: 'The powerhouse of the cell is the mitochondria.' }, logs: { RunnableMap: { id: 'ff268fa1-a621-41b5-a832-4f23eae99d8e', name: 'RunnableMap', type: 'chain', tags: [Array], metadata: {}, start_time: '2024-01-04T20:21:33.851Z', streamed_output_str: [], final_output: [Object], end_time: '2024-01-04T20:21:35.000Z' }, RunnablePassthrough: { id: '62b54982-edb3-4101-a53e-1d4201230668', name: 'RunnablePassthrough', type: 'chain', tags: [Array], metadata: {}, start_time: '2024-01-04T20:21:34.073Z', streamed_output_str: [], final_output: [Object], end_time: '2024-01-04T20:21:34.226Z' }, RunnableSequence: { id: 'a8893fb5-63ec-4b13-bb49-e6d4435cc5e4', name: 'RunnableSequence', type: 'chain', tags: [Array], metadata: {}, start_time: '2024-01-04T20:21:34.074Z', streamed_output_str: [], final_output: [Object], end_time: '2024-01-04T20:21:34.893Z' }, VectorStoreRetriever: { id: 'd145704c-64bb-491d-9a2c-814ee3d1e6a2', name: 'VectorStoreRetriever', type: 'retriever', tags: [Array], metadata: {}, start_time: '2024-01-04T20:21:34.234Z', streamed_output_str: [], final_output: [Object], end_time: '2024-01-04T20:21:34.518Z' }, RunnableLambda: { id: 'a23a552a-b96f-4c07-a45d-c5f3861fad5d', name: 'RunnableLambda', type: 'chain', tags: [Array], metadata: {}, start_time: '2024-01-04T20:21:34.610Z', streamed_output_str: [], final_output: [Object], end_time: '2024-01-04T20:21:34.785Z' }, ChatPromptTemplate: { id: 'a5e8439e-a6e4-4cf3-ba17-c223ea874a0a', name: 'ChatPromptTemplate', type: 'prompt', tags: [Array], metadata: {}, start_time: '2024-01-04T20:21:35.097Z', streamed_output_str: [], final_output: [ChatPromptValue], end_time: '2024-01-04T20:21:35.193Z' }, ChatOpenAI: { id: 'd9c9d340-ea38-4ef4-a8a8-60f52da4e838', name: 'ChatOpenAI', type: 'llm', tags: [Array], metadata: {}, start_time: '2024-01-04T20:21:35.282Z', streamed_output_str: [Array], final_output: [Object], end_time: '2024-01-04T20:21:36.059Z' }, StrOutputParser: { id: 'c55f9f3f-048b-43d5-ba48-02f3b24b8f96', name: 'StrOutputParser', type: 'parser', tags: [Array], metadata: {}, start_time: '2024-01-04T20:21:35.842Z', streamed_output_str: [], final_output: [Object], end_time: '2024-01-04T20:21:36.157Z' } } } */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/how_to_routing_custom_function.ts
import { ChatPromptTemplate } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { RunnableSequence } from "@langchain/core/runnables"; import { ChatAnthropic } from "@langchain/anthropic"; const promptTemplate = ChatPromptTemplate.fromTemplate(`Given the user question below, classify it as either being about \`LangChain\`, \`Anthropic\`, or \`Other\`. Do not respond with more than one word. <question> {question} </question> Classification:`); const model = new ChatAnthropic({ model: "claude-3-sonnet-20240229", }); const classificationChain = RunnableSequence.from([ promptTemplate, model, new StringOutputParser(), ]); const classificationChainResult = await classificationChain.invoke({ question: "how do I call Anthropic?", }); console.log(classificationChainResult); /* Anthropic */ const langChainChain = ChatPromptTemplate.fromTemplate( `You are an expert in langchain. Always answer questions starting with "As Harrison Chase told me". Respond to the following question: Question: {question} Answer:` ).pipe(model); const anthropicChain = ChatPromptTemplate.fromTemplate( `You are an expert in anthropic. \ Always answer questions starting with "As Dario Amodei told me". \ Respond to the following question: Question: {question} Answer:` ).pipe(model); const generalChain = ChatPromptTemplate.fromTemplate( `Respond to the following question: Question: {question} Answer:` ).pipe(model); const route = ({ topic }: { input: string; topic: string }) => { if (topic.toLowerCase().includes("anthropic")) { return anthropicChain; } if (topic.toLowerCase().includes("langchain")) { return langChainChain; } return generalChain; }; const fullChain = RunnableSequence.from([ { topic: classificationChain, question: (input: { question: string }) => input.question, }, route, ]); const result1 = await fullChain.invoke({ question: "how do I use Anthropic?", }); console.log(result1); /* AIMessage { content: ' As Dario Amodei told me, here are some tips for how to use Anthropic:\n' + '\n' + "First, sign up for an account on Anthropic's website. This will give you access to their conversational AI assistant named Claude. \n" + '\n' + "Once you've created an account, you can have conversations with Claude through their web interface. Talk to Claude like you would talk to a person, asking questions, giving instructions, etc. Claude is trained to have natural conversations and be helpful.\n" + '\n' + "You can also integrate Claude into your own applications using Anthropic's API. This allows you to build Claude's conversational abilities into chatbots, virtual assistants, and other AI systems you develop.\n" + '\n' + 'Anthropic is constantly working on improving Claude, so its capabilities are always expanding. Make sure to check their blog and documentation to stay up to date on the latest features.\n' + '\n' + 'The key is to interact with Claude regularly so it can learn from you. The more you chat with it, the better it will become at understanding you and having personalized conversations. Over time, Claude will feel more human-like as it accumulates more conversational experience.', additional_kwargs: {} } */ const result2 = await fullChain.invoke({ question: "how do I use LangChain?", }); console.log(result2); /* AIMessage { content: ' As Harrison Chase told me, here is how you use LangChain:\n' + '\n' + 'First, think carefully about what you want to ask or have the AI do. Frame your request clearly and specifically. Avoid vague or overly broad prompts that could lead to unhelpful or concerning responses. \n' + '\n' + 'Next, type your question or request into the chat window and send it. Be patient as the AI processes your input and generates a response. The AI will do its best to provide a helpful answer or follow your instructions, but its capabilities are limited.\n' + '\n' + 'Keep your requests simple at first. Ask basic questions or have the AI summarize content or generate basic text. As you get more comfortable, you can try having the AI perform more complex tasks like answering tricky questions, generating stories, or having a conversation.\n' + '\n' + "Pay attention to the AI's responses. If they seem off topic, nonsensical, or concerning, rephrase your prompt to steer the AI in a better direction. You may need to provide additional clarification or context to get useful results.\n" + '\n' + 'Be polite and respectful towards the AI system. Remember, it is a tool designed to be helpful, harmless, and honest. Do not try to trick, confuse, or exploit it. \n' + '\n' + 'I hope these tips help you have a safe, fun and productive experience using LangChain! Let me know if you have any other questions.', additional_kwargs: {} } */ const result3 = await fullChain.invoke({ question: "what is 2 + 2?", }); console.log(result3); /* AIMessage { content: ' 4', additional_kwargs: {} } */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/cookbook_retriever.ts
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { formatDocumentsAsString } from "langchain/util/document"; import { PromptTemplate } from "@langchain/core/prompts"; import { RunnableSequence, RunnablePassthrough, } from "@langchain/core/runnables"; import { StringOutputParser } from "@langchain/core/output_parsers"; const model = new ChatOpenAI({}); const vectorStore = await HNSWLib.fromTexts( ["mitochondria is the powerhouse of the cell"], [{ id: 1 }], new OpenAIEmbeddings() ); const retriever = vectorStore.asRetriever(); const prompt = PromptTemplate.fromTemplate(`Answer the question based only on the following context: {context} Question: {question}`); const chain = RunnableSequence.from([ { context: retriever.pipe(formatDocumentsAsString), question: new RunnablePassthrough(), }, prompt, model, new StringOutputParser(), ]); const result = await chain.invoke("What is the powerhouse of the cell?"); console.log(result); /* "The powerhouse of the cell is the mitochondria." */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/with_listeners.ts
import { ChatOpenAI } from "@langchain/openai"; import { Run } from "@langchain/core/tracers/tracer_langchain"; import { ChatPromptTemplate } from "@langchain/core/prompts"; const prompt = ChatPromptTemplate.fromMessages([ ["ai", "You are a nice assistant."], ["human", "{question}"], ]); const model = new ChatOpenAI({}); const chain = prompt.pipe(model); const trackTime = () => { let start: { startTime: number; question: string }; let end: { endTime: number; answer: string }; const handleStart = (run: Run) => { if (run.start_time) { start = { startTime: run.start_time, question: run.inputs.question, }; } }; const handleEnd = (run: Run) => { if (run.end_time && run.outputs) { end = { endTime: run.end_time, answer: run.outputs.content, }; } console.log("start", start); console.log("end", end); console.log(`total time: ${end.endTime - start.startTime}ms`); }; return { handleStart, handleEnd }; }; const { handleStart, handleEnd } = trackTime(); await chain .withListeners({ onStart: (run: Run) => { handleStart(run); }, onEnd: (run: Run) => { handleEnd(run); }, }) .invoke({ question: "What is the meaning of life?" }); /** * start { startTime: 1701723365470, question: 'What is the meaning of life?' } end { endTime: 1701723368767, answer: "The meaning of life is a philosophical question that has been contemplated and debated by scholars, philosophers, and individuals for centuries. The answer to this question can vary depending on one's beliefs, perspectives, and values. Some suggest that the meaning of life is to seek happiness and fulfillment, others propose it is to serve a greater purpose or contribute to the well-being of others. Ultimately, the meaning of life can be subjective and personal, and it is up to each individual to determine their own sense of purpose and meaning." } total time: 3297ms */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/cookbook_function_call.ts
import { ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; const prompt = PromptTemplate.fromTemplate(`Tell me a joke about {subject}`); const model = new ChatOpenAI({}); const functionSchema = [ { name: "joke", description: "A joke", parameters: { type: "object", properties: { setup: { type: "string", description: "The setup for the joke", }, punchline: { type: "string", description: "The punchline for the joke", }, }, required: ["setup", "punchline"], }, }, ]; const chain = prompt.pipe( model.bind({ functions: functionSchema, function_call: { name: "joke" }, }) ); const result = await chain.invoke({ subject: "bears" }); console.log(result); /* AIMessage { content: "", additional_kwargs: { function_call: { name: "joke", arguments: '{\n "setup": "Why don\'t bears wear shoes?",\n "punchline": "Because they have bear feet!"\n}' } } } */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/runnable_maps_sequence.ts
import { CohereEmbeddings } from "@langchain/cohere"; import { PromptTemplate } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; import { Document } from "@langchain/core/documents"; import { ChatAnthropic } from "@langchain/anthropic"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; const model = new ChatAnthropic(); const vectorstore = await MemoryVectorStore.fromDocuments( [{ pageContent: "mitochondria is the powerhouse of the cell", metadata: {} }], new CohereEmbeddings({ model: "embed-english-v3.0" }) ); const retriever = vectorstore.asRetriever(); const template = `Answer the question based only on the following context: {context} Question: {question}`; const prompt = PromptTemplate.fromTemplate(template); const formatDocs = (docs: Document[]) => docs.map((doc) => doc.pageContent); const retrievalChain = RunnableSequence.from([ { context: retriever.pipe(formatDocs), question: new RunnablePassthrough() }, prompt, model, new StringOutputParser(), ]); const result = await retrievalChain.invoke( "what is the powerhouse of the cell?" ); console.log(result); /* Based on the given context, the powerhouse of the cell is mitochondria. */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/interface_batch_with_options.ts
import { ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; const model = new ChatOpenAI({ model: "badmodel", }); const promptTemplate = PromptTemplate.fromTemplate( "Tell me a joke about {topic}" ); const chain = promptTemplate.pipe(model); const result = await chain.batch( [{ topic: "bears" }, { topic: "cats" }], { maxConcurrency: 1 }, { returnExceptions: true } ); console.log(result); /* [ NotFoundError: The model `badmodel` does not exist at Function.generate (/Users/jacoblee/langchain/langchainjs/node_modules/openai/src/error.ts:71:6) at OpenAI.makeStatusError (/Users/jacoblee/langchain/langchainjs/node_modules/openai/src/core.ts:381:13) at OpenAI.makeRequest (/Users/jacoblee/langchain/langchainjs/node_modules/openai/src/core.ts:442:15) at process.processTicksAndRejections (node:internal/process/task_queues:95:5) at async file:///Users/jacoblee/langchain/langchainjs/langchain/dist/chat_models/openai.js:514:29 at RetryOperation._fn (/Users/jacoblee/langchain/langchainjs/node_modules/p-retry/index.js:50:12) { status: 404, NotFoundError: The model `badmodel` does not exist at Function.generate (/Users/jacoblee/langchain/langchainjs/node_modules/openai/src/error.ts:71:6) at OpenAI.makeStatusError (/Users/jacoblee/langchain/langchainjs/node_modules/openai/src/core.ts:381:13) at OpenAI.makeRequest (/Users/jacoblee/langchain/langchainjs/node_modules/openai/src/core.ts:442:15) at process.processTicksAndRejections (node:internal/process/task_queues:95:5) at async file:///Users/jacoblee/langchain/langchainjs/langchain/dist/chat_models/openai.js:514:29 at RetryOperation._fn (/Users/jacoblee/langchain/langchainjs/node_modules/p-retry/index.js:50:12) { status: 404, ] */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/how_to_cancellation.ts
import { ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; const controller = new AbortController(); // Create a new LLMChain from a PromptTemplate and an LLM in streaming mode. const llm = new ChatOpenAI({ temperature: 0.9 }); const model = llm.bind({ signal: controller.signal }); const prompt = PromptTemplate.fromTemplate( "Please write a 500 word essay about {topic}." ); const chain = prompt.pipe(model); // Call `controller.abort()` somewhere to cancel the request. setTimeout(() => { controller.abort(); }, 3000); try { // Call the chain with the inputs and a callback for the streamed tokens const stream = await chain.stream({ topic: "Bonobos" }); for await (const chunk of stream) { console.log(chunk); } } catch (e) { console.log(e); // Error: Cancel: canceled }
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/cookbook_basic.ts
import { ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; const model = new ChatOpenAI({}); const promptTemplate = PromptTemplate.fromTemplate( "Tell me a joke about {topic}" ); const chain = promptTemplate.pipe(model); const result = await chain.invoke({ topic: "bears" }); console.log(result); /* AIMessage { content: "Why don't bears wear shoes?\n\nBecause they have bear feet!", } */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/how_to_routing_semantic_similarity.ts
import { ChatAnthropic } from "@langchain/anthropic"; import { OpenAIEmbeddings } from "@langchain/openai"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { RunnableSequence } from "@langchain/core/runnables"; import { cosineSimilarity } from "@langchain/core/utils/math"; const physicsTemplate = `You are a very smart physics professor. You are great at answering questions about physics in a concise and easy to understand manner. When you don't know the answer to a question you admit that you don't know. Do not use more than 100 words. Here is a question: {query}`; const mathTemplate = `"You are a very good mathematician. You are great at answering math questions. You are so good because you are able to break down hard problems into their component parts, answer the component parts, and then put them together to answer the broader question. Do not use more than 100 words. Here is a question: {query}`; const embeddings = new OpenAIEmbeddings({}); const templates = [physicsTemplate, mathTemplate]; const templateEmbeddings = await embeddings.embedDocuments(templates); const promptRouter = async (query: string) => { const queryEmbedding = await embeddings.embedQuery(query); const similarity = cosineSimilarity([queryEmbedding], templateEmbeddings)[0]; const isPhysicsQuestion = similarity[0] > similarity[1]; let promptTemplate: ChatPromptTemplate; if (isPhysicsQuestion) { console.log(`Using physics prompt`); promptTemplate = ChatPromptTemplate.fromTemplate(templates[0]); } else { console.log(`Using math prompt`); promptTemplate = ChatPromptTemplate.fromTemplate(templates[1]); } return promptTemplate.invoke({ query }); }; const chain = RunnableSequence.from([ promptRouter, new ChatAnthropic({ model: "claude-3-haiku-20240307" }), new StringOutputParser(), ]); console.log(await chain.invoke("what's a black hole?")); /* Using physics prompt */ /* A black hole is a region in space where the gravitational pull is so strong that nothing, not even light, can escape from it. It is the result of the gravitational collapse of a massive star, creating a singularity surrounded by an event horizon, beyond which all information is lost. Black holes have fascinated scientists for decades, as they provide insights into the most extreme conditions in the universe and the nature of gravity itself. While we understand the basic properties of black holes, there are still many unanswered questions about their behavior and their role in the cosmos. */ console.log(await chain.invoke("what's a path integral?")); /* Using math prompt */ /* A path integral is a mathematical formulation in quantum mechanics used to describe the behavior of a particle or system. It considers all possible paths the particle can take between two points, and assigns a probability amplitude to each path. By summing up the contributions from all paths, it provides a comprehensive understanding of the particle's quantum mechanical behavior. This approach allows for the calculation of complex quantum phenomena, such as quantum tunneling and interference effects, making it a powerful tool in theoretical physics. */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/expression_language/how_to_routing_runnable_branch.ts
import { ChatPromptTemplate } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { RunnableBranch, RunnableSequence } from "@langchain/core/runnables"; import { ChatAnthropic } from "@langchain/anthropic"; const promptTemplate = ChatPromptTemplate.fromTemplate(`Given the user question below, classify it as either being about \`LangChain\`, \`Anthropic\`, or \`Other\`. Do not respond with more than one word. <question> {question} </question> Classification:`); const model = new ChatAnthropic({ model: "claude-3-sonnet-20240229", }); const classificationChain = RunnableSequence.from([ promptTemplate, model, new StringOutputParser(), ]); const classificationChainResult = await classificationChain.invoke({ question: "how do I call Anthropic?", }); console.log(classificationChainResult); /* Anthropic */ const langChainChain = ChatPromptTemplate.fromTemplate( `You are an expert in langchain. Always answer questions starting with "As Harrison Chase told me". Respond to the following question: Question: {question} Answer:` ).pipe(model); const anthropicChain = ChatPromptTemplate.fromTemplate( `You are an expert in anthropic. \ Always answer questions starting with "As Dario Amodei told me". \ Respond to the following question: Question: {question} Answer:` ).pipe(model); const generalChain = ChatPromptTemplate.fromTemplate( `Respond to the following question: Question: {question} Answer:` ).pipe(model); const branch = RunnableBranch.from([ [ (x: { topic: string; question: string }) => x.topic.toLowerCase().includes("anthropic"), anthropicChain, ], [ (x: { topic: string; question: string }) => x.topic.toLowerCase().includes("langchain"), langChainChain, ], generalChain, ]); const fullChain = RunnableSequence.from([ { topic: classificationChain, question: (input: { question: string }) => input.question, }, branch, ]); const result1 = await fullChain.invoke({ question: "how do I use Anthropic?", }); console.log(result1); /* AIMessage { content: ' As Dario Amodei told me, here are some tips for how to use Anthropic:\n' + '\n' + "First, sign up for an account on Anthropic's website. This will give you access to their conversational AI assistant named Claude. \n" + '\n' + "Once you've created an account, you can have conversations with Claude through their web interface. Talk to Claude like you would talk to a person, asking questions, giving instructions, etc. Claude is trained to have natural conversations and be helpful.\n" + '\n' + "You can also integrate Claude into your own applications using Anthropic's API. This allows you to build Claude's conversational abilities into chatbots, virtual assistants, and other AI systems you develop.\n" + '\n' + 'Anthropic is constantly working on improving Claude, so its capabilities are always expanding. Make sure to check their blog and documentation to stay up to date on the latest features.\n' + '\n' + 'The key is to interact with Claude regularly so it can learn from you. The more you chat with it, the better it will become at understanding you and having personalized conversations. Over time, Claude will feel more human-like as it accumulates more conversational experience.', additional_kwargs: {} } */ const result2 = await fullChain.invoke({ question: "how do I use LangChain?", }); console.log(result2); /* AIMessage { content: ' As Harrison Chase told me, here is how you use LangChain:\n' + '\n' + 'First, think carefully about what you want to ask or have the AI do. Frame your request clearly and specifically. Avoid vague or overly broad prompts that could lead to unhelpful or concerning responses. \n' + '\n' + 'Next, type your question or request into the chat window and send it. Be patient as the AI processes your input and generates a response. The AI will do its best to provide a helpful answer or follow your instructions, but its capabilities are limited.\n' + '\n' + 'Keep your requests simple at first. Ask basic questions or have the AI summarize content or generate basic text. As you get more comfortable, you can try having the AI perform more complex tasks like answering tricky questions, generating stories, or having a conversation.\n' + '\n' + "Pay attention to the AI's responses. If they seem off topic, nonsensical, or concerning, rephrase your prompt to steer the AI in a better direction. You may need to provide additional clarification or context to get useful results.\n" + '\n' + 'Be polite and respectful towards the AI system. Remember, it is a tool designed to be helpful, harmless, and honest. Do not try to trick, confuse, or exploit it. \n' + '\n' + 'I hope these tips help you have a safe, fun and productive experience using LangChain! Let me know if you have any other questions.', additional_kwargs: {} } */ const result3 = await fullChain.invoke({ question: "what is 2 + 2?", }); console.log(result3); /* AIMessage { content: ' 4', additional_kwargs: {} } */
0
lc_public_repos/langchainjs/examples/src/guides/expression_language
lc_public_repos/langchainjs/examples/src/guides/expression_language/zep/zep_cloud_vector_store.ts
import { ZepClient } from "@getzep/zep-cloud"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { ConsoleCallbackHandler } from "@langchain/core/tracers/console"; import { ChatOpenAI } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; import { RunnableLambda, RunnableMap, RunnablePassthrough, } from "@langchain/core/runnables"; import { ZepCloudVectorStore } from "@langchain/community/vectorstores/zep_cloud"; import { StringOutputParser } from "@langchain/core/output_parsers"; async function combineDocuments(docs: Document[], documentSeparator = "\n\n") { const docStrings: string[] = await Promise.all( docs.map((doc) => doc.pageContent) ); return docStrings.join(documentSeparator); } // Your Zep Collection Name const collectionName = "<Zep Collection Name>"; const zepClient = new ZepClient({ // Your Zep Cloud Project API key https://help.getzep.com/projects apiKey: "<Zep Api Key>", }); const vectorStore = await ZepCloudVectorStore.init({ client: zepClient, collectionName, }); const prompt = ChatPromptTemplate.fromMessages([ [ "system", `Answer the question based only on the following context: {context}`, ], ["human", "{question}"], ]); const model = new ChatOpenAI({ temperature: 0.8, modelName: "gpt-3.5-turbo-1106", }); const retriever = vectorStore.asRetriever(); const setupAndRetrieval = RunnableMap.from({ context: new RunnableLambda({ func: (input: string) => retriever.invoke(input).then(combineDocuments), }), question: new RunnablePassthrough(), }); const outputParser = new StringOutputParser(); const chain = setupAndRetrieval .pipe(prompt) .pipe(model) .pipe(outputParser) .withConfig({ callbacks: [new ConsoleCallbackHandler()], }); const result = await chain.invoke("Project Gutenberg?"); console.log("result", result);
0
lc_public_repos/langchainjs/examples/src/guides/expression_language
lc_public_repos/langchainjs/examples/src/guides/expression_language/zep/zep_cloud_message_history.ts
import { ZepClient } from "@getzep/zep-cloud"; import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { ConsoleCallbackHandler } from "@langchain/core/tracers/console"; import { ChatOpenAI } from "@langchain/openai"; import { RunnableWithMessageHistory } from "@langchain/core/runnables"; import { ZepCloudChatMessageHistory } from "@langchain/community/stores/message/zep_cloud"; // Your Zep Session ID. const sessionId = "<Zep Session ID>"; const zepClient = new ZepClient({ // Your Zep Cloud Project API key https://help.getzep.com/projects apiKey: "<Zep Api Key>", }); const prompt = ChatPromptTemplate.fromMessages([ ["system", "Answer the user's question below. Be polite and helpful:"], new MessagesPlaceholder("history"), ["human", "{question}"], ]); const chain = prompt .pipe( new ChatOpenAI({ temperature: 0.8, modelName: "gpt-3.5-turbo-1106", }) ) .withConfig({ callbacks: [new ConsoleCallbackHandler()], }); const chainWithHistory = new RunnableWithMessageHistory({ runnable: chain, getMessageHistory: (sessionId) => new ZepCloudChatMessageHistory({ client: zepClient, sessionId, memoryType: "perpetual", }), inputMessagesKey: "question", historyMessagesKey: "history", }); const result = await chainWithHistory.invoke( { question: "What did we talk about earlier?", }, { configurable: { sessionId, }, } ); console.log("result", result);
0
lc_public_repos/langchainjs/examples/src/guides/expression_language
lc_public_repos/langchainjs/examples/src/guides/expression_language/zep/zep_cloud_message_history_vector_store.ts
import { ZepClient } from "@getzep/zep-cloud"; import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { ConsoleCallbackHandler } from "@langchain/core/tracers/console"; import { ChatOpenAI } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; import { RunnableLambda, RunnableMap, RunnablePassthrough, RunnableWithMessageHistory, } from "@langchain/core/runnables"; import { ZepCloudVectorStore } from "@langchain/community/vectorstores/zep_cloud"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { ZepCloudChatMessageHistory } from "@langchain/community/stores/message/zep_cloud"; interface ChainInput { question: string; sessionId: string; } async function combineDocuments(docs: Document[], documentSeparator = "\n\n") { const docStrings: string[] = await Promise.all( docs.map((doc) => doc.pageContent) ); return docStrings.join(documentSeparator); } // Your Zep Session ID. const sessionId = "<Zep Session ID>"; const collectionName = "<Zep Collection Name>"; const zepClient = new ZepClient({ // Your Zep Cloud Project API key https://help.getzep.com/projects apiKey: "<Zep Api Key>", }); const vectorStore = await ZepCloudVectorStore.init({ client: zepClient, collectionName, }); const prompt = ChatPromptTemplate.fromMessages([ [ "system", `Answer the question based only on the following context and conversation history: {context}`, ], new MessagesPlaceholder("history"), ["human", "{question}"], ]); const model = new ChatOpenAI({ temperature: 0.8, modelName: "gpt-3.5-turbo-1106", }); const retriever = vectorStore.asRetriever(); const searchQuery = new RunnableLambda({ func: async (input: any) => { // You can use zep to synthesize a question based on the user input and session context. // It can be useful because sometimes the user will type something like "yes" or "ok", which is not very useful for vector store retrieval. const { question } = await zepClient.memory.synthesizeQuestion( input.session_id ); console.log("Synthesized question: ", question); return question; }, }); const retrieverLambda = new RunnableLambda({ func: async (question: string) => { const response = await retriever.invoke(question); return combineDocuments(response); }, }); const setupAndRetrieval = RunnableMap.from({ context: searchQuery.pipe(retrieverLambda), question: (x: any) => x.question, history: (x: any) => x.history, }); const outputParser = new StringOutputParser(); const ragChain = setupAndRetrieval.pipe(prompt).pipe(model).pipe(outputParser); const invokeChain = (chainInput: ChainInput) => { const chainWithHistory = new RunnableWithMessageHistory({ runnable: RunnablePassthrough.assign({ session_id: () => chainInput.sessionId, }).pipe(ragChain), getMessageHistory: (sessionId) => new ZepCloudChatMessageHistory({ client: zepClient, sessionId, memoryType: "perpetual", }), inputMessagesKey: "question", historyMessagesKey: "history", }); return chainWithHistory.invoke( { question: chainInput.question }, { configurable: { sessionId: chainInput.sessionId, }, } ); }; const chain = new RunnableLambda({ func: invokeChain, }).withConfig({ callbacks: [new ConsoleCallbackHandler()], }); const result = await chain.invoke({ question: "Project Gutenberg", sessionId, }); console.log("result", result);
0
lc_public_repos/langchainjs/examples/src/guides/expression_language
lc_public_repos/langchainjs/examples/src/guides/expression_language/get_started/llm_model.ts
import { OpenAI } from "@langchain/openai"; const model = new OpenAI({}); const promptAsString = "Human: Tell me a short joke about ice cream"; const response = await model.invoke(promptAsString); console.log(response); /** Why did the ice cream go to therapy? Because it was feeling a little rocky road. */
0
lc_public_repos/langchainjs/examples/src/guides/expression_language
lc_public_repos/langchainjs/examples/src/guides/expression_language/get_started/rag.ts
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { Document } from "@langchain/core/documents"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { RunnableLambda, RunnableMap, RunnablePassthrough, } from "@langchain/core/runnables"; import { StringOutputParser } from "@langchain/core/output_parsers"; const vectorStore = await HNSWLib.fromDocuments( [ new Document({ pageContent: "Harrison worked at Kensho" }), new Document({ pageContent: "Bears like to eat honey." }), ], new OpenAIEmbeddings() ); const retriever = vectorStore.asRetriever(1); const prompt = ChatPromptTemplate.fromMessages([ [ "ai", `Answer the question based on only the following context: {context}`, ], ["human", "{question}"], ]); const model = new ChatOpenAI({}); const outputParser = new StringOutputParser(); const setupAndRetrieval = RunnableMap.from({ context: new RunnableLambda({ func: (input: string) => retriever.invoke(input).then((response) => response[0].pageContent), }).withConfig({ runName: "contextRetriever" }), question: new RunnablePassthrough(), }); const chain = setupAndRetrieval.pipe(prompt).pipe(model).pipe(outputParser); const response = await chain.invoke("Where did Harrison work?"); console.log(response); /** Harrison worked at Kensho. */
0
lc_public_repos/langchainjs/examples/src/guides/expression_language
lc_public_repos/langchainjs/examples/src/guides/expression_language/get_started/basic.ts
import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; const prompt = ChatPromptTemplate.fromMessages([ ["human", "Tell me a short joke about {topic}"], ]); const model = new ChatOpenAI({}); const outputParser = new StringOutputParser(); const chain = prompt.pipe(model).pipe(outputParser); const response = await chain.invoke({ topic: "ice cream", }); console.log(response); /** Why did the ice cream go to the gym? Because it wanted to get a little "cone"ditioning! */
0
lc_public_repos/langchainjs/examples/src/guides/expression_language
lc_public_repos/langchainjs/examples/src/guides/expression_language/get_started/output_parser.ts
import { AIMessage } from "@langchain/core/messages"; import { StringOutputParser } from "@langchain/core/output_parsers"; const outputParser = new StringOutputParser(); const message = new AIMessage( 'Sure, here you go: Why did the ice cream go to school? Because it wanted to get a little "sundae" education!' ); const parsed = await outputParser.invoke(message); console.log(parsed); /** Sure, here you go: Why did the ice cream go to school? Because it wanted to get a little "sundae" education! */
0
lc_public_repos/langchainjs/examples/src/guides/expression_language
lc_public_repos/langchainjs/examples/src/guides/expression_language/get_started/chat_model.ts
import { ChatOpenAI } from "@langchain/openai"; const model = new ChatOpenAI({}); const promptAsString = "Human: Tell me a short joke about ice cream"; const response = await model.invoke(promptAsString); console.log(response); /** AIMessage { content: 'Sure, here you go: Why did the ice cream go to school? Because it wanted to get a little "sundae" education!', name: undefined, additional_kwargs: { function_call: undefined, tool_calls: undefined } } */
0
lc_public_repos/langchainjs/examples/src/guides/expression_language
lc_public_repos/langchainjs/examples/src/guides/expression_language/get_started/prompt.ts
import { ChatPromptTemplate } from "@langchain/core/prompts"; const prompt = ChatPromptTemplate.fromMessages([ ["human", "Tell me a short joke about {topic}"], ]); const promptValue = await prompt.invoke({ topic: "ice cream" }); console.log(promptValue); /** ChatPromptValue { messages: [ HumanMessage { content: 'Tell me a short joke about ice cream', name: undefined, additional_kwargs: {} } ] } */ const promptAsMessages = promptValue.toChatMessages(); console.log(promptAsMessages); /** [ HumanMessage { content: 'Tell me a short joke about ice cream', name: undefined, additional_kwargs: {} } ] */ const promptAsString = promptValue.toString(); console.log(promptAsString); /** Human: Tell me a short joke about ice cream */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/fallbacks/model.ts
import { ChatOpenAI } from "@langchain/openai"; import { ChatAnthropic } from "@langchain/anthropic"; // Use a fake model name that will always throw an error const fakeOpenAIModel = new ChatOpenAI({ model: "potato!", maxRetries: 0, }); const anthropicModel = new ChatAnthropic({}); const modelWithFallback = fakeOpenAIModel.withFallbacks([anthropicModel]); const result = await modelWithFallback.invoke("What is your name?"); console.log(result); /* AIMessage { content: ' My name is Claude. I was created by Anthropic.', additional_kwargs: {} } */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/fallbacks/chain.ts
import { ChatOpenAI, OpenAI } from "@langchain/openai"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { ChatPromptTemplate, PromptTemplate } from "@langchain/core/prompts"; const chatPrompt = ChatPromptTemplate.fromMessages<{ animal: string }>([ [ "system", "You're a nice assistant who always includes a compliment in your response", ], ["human", "Why did the {animal} cross the road?"], ]); // Use a fake model name that will always throw an error const fakeOpenAIChatModel = new ChatOpenAI({ model: "potato!", maxRetries: 0, }); const prompt = PromptTemplate.fromTemplate(`Instructions: You should always include a compliment in your response. Question: Why did the {animal} cross the road? Answer:`); const openAILLM = new OpenAI({}); const outputParser = new StringOutputParser(); const badChain = chatPrompt.pipe(fakeOpenAIChatModel).pipe(outputParser); const goodChain = prompt.pipe(openAILLM).pipe(outputParser); const chain = badChain.withFallbacks([goodChain]); const result = await chain.invoke({ animal: "dragon", }); console.log(result); /* I don't know, but I'm sure it was an impressive sight. You must have a great imagination to come up with such an interesting question! */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/fallbacks/better_model.ts
import { z } from "zod"; import { OpenAI, ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; const prompt = PromptTemplate.fromTemplate( `Return a JSON object containing the following value wrapped in an "input" key. Do not return anything else:\n{input}` ); const badModel = new OpenAI({ maxRetries: 0, model: "gpt-3.5-turbo-instruct", }); const normalModel = new ChatOpenAI({ model: "gpt-4", }); const outputParser = StructuredOutputParser.fromZodSchema( z.object({ input: z.string(), }) ); const badChain = prompt.pipe(badModel).pipe(outputParser); const goodChain = prompt.pipe(normalModel).pipe(outputParser); try { const result = await badChain.invoke({ input: "testing0", }); } catch (e) { console.log(e); /* OutputParserException [Error]: Failed to parse. Text: " { "name" : " Testing0 ", "lastname" : " testing ", "fullname" : " testing ", "role" : " test ", "telephone" : "+1-555-555-555 ", "email" : " testing@gmail.com ", "role" : " test ", "text" : " testing0 is different than testing ", "role" : " test ", "immediate_affected_version" : " 0.0.1 ", "immediate_version" : " 1.0.0 ", "leading_version" : " 1.0.0 ", "version" : " 1.0.0 ", "finger prick" : " no ", "finger prick" : " s ", "text" : " testing0 is different than testing ", "role" : " test ", "immediate_affected_version" : " 0.0.1 ", "immediate_version" : " 1.0.0 ", "leading_version" : " 1.0.0 ", "version" : " 1.0.0 ", "finger prick" :". Error: SyntaxError: Unexpected end of JSON input */ } const chain = badChain.withFallbacks([goodChain]); const result = await chain.invoke({ input: "testing", }); console.log(result); /* { input: 'testing' } */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/fallbacks/long_inputs.ts
import { ChatOpenAI } from "@langchain/openai"; // Use a model with a shorter context window const shorterLlm = new ChatOpenAI({ model: "gpt-3.5-turbo", maxRetries: 0, }); const longerLlm = new ChatOpenAI({ model: "gpt-3.5-turbo-16k", }); const modelWithFallback = shorterLlm.withFallbacks([longerLlm]); const input = `What is the next number: ${"one, two, ".repeat(3000)}`; try { await shorterLlm.invoke(input); } catch (e) { // Length error console.log(e); } const result = await modelWithFallback.invoke(input); console.log(result); /* AIMessage { content: 'The next number is one.', name: undefined, additional_kwargs: { function_call: undefined } } */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/langsmith/eval_walkthrough.ts
/* eslint-disable import/first */ /* eslint-disable arrow-body-style */ /* eslint-disable import/no-duplicates */ import { v4 as uuidv4 } from "uuid"; const uniqueId = uuidv4().slice(0, 8); import { Client } from "langsmith"; const client = new Client(); import { AgentExecutor, createOpenAIFunctionsAgent } from "langchain/agents"; import { pull } from "langchain/hub"; import { TavilySearchResults } from "@langchain/community/tools/tavily_search"; import { ChatOpenAI } from "@langchain/openai"; import type { ChatPromptTemplate } from "@langchain/core/prompts"; const tools = [new TavilySearchResults()]; // Get the prompt to use - you can modify this! // If you want to see the prompt in full, you can at: // https://smith.langchain.com/hub/hwchase17/openai-functions-agent const prompt = await pull<ChatPromptTemplate>( "hwchase17/openai-functions-agent" ); const llm = new ChatOpenAI({ model: "gpt-3.5-turbo-1106", temperature: 0, }); const agent = await createOpenAIFunctionsAgent({ llm, tools, prompt, }); const agentExecutor = new AgentExecutor({ agent, tools, }); const inputs = [ { input: "What is LangChain?" }, { input: "What's LangSmith?" }, { input: "When was Llama-v2 released?" }, { input: "What is the langsmith cookbook?" }, { input: "When did langchain first announce the hub?" }, ]; const results = await agentExecutor.batch(inputs); console.log(results.slice(0, 2)); const referenceOutputs = [ { output: "LangChain is an open-source framework for building applications using large language models. It is also the name of the company building LangSmith.", }, { output: "LangSmith is a unified platform for debugging, testing, and monitoring language model applications and agents powered by LangChain", }, { output: "July 18, 2023" }, { output: "The langsmith cookbook is a github repository containing detailed examples of how to use LangSmith to debug, evaluate, and monitor large language model-powered applications.", }, { output: "September 5, 2023" }, ]; const datasetName = `lcjs-qa-${uniqueId}`; const dataset = await client.createDataset(datasetName); await Promise.all( inputs.map(async (input, i) => { await client.createExample(input, referenceOutputs[i], { datasetId: dataset.id, }); }) ); import type { RunEvalConfig, DynamicRunEvaluatorParams } from "langchain/smith"; // An illustrative custom evaluator example const notUnsure = async ({ prediction }: DynamicRunEvaluatorParams) => { if (typeof prediction?.output !== "string") { throw new Error( "Invalid prediction format for this evaluator. Please check your chain's outputs and try again." ); } return { key: "not_unsure", score: !prediction.output.includes("not sure"), }; }; const evaluation: RunEvalConfig = { // The 'evaluators' are loaded from LangChain's evaluation // library. evaluators: [ { evaluatorType: "labeled_criteria", criteria: "correctness", feedbackKey: "correctness", formatEvaluatorInputs: ({ rawInput, rawPrediction, rawReferenceOutput, }) => { return { input: rawInput.input, prediction: rawPrediction.output, reference: rawReferenceOutput.output, }; }, }, ], // Custom evaluators can be user-defined RunEvaluator's // or a compatible function customEvaluators: [notUnsure], }; import { runOnDataset } from "langchain/smith"; await runOnDataset(agentExecutor, datasetName, { evaluationConfig: evaluation, });
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/debugging/simple_agent_verbose_some.ts
import { AgentExecutor, createToolCallingAgent } from "langchain/agents"; import { ChatAnthropic } from "@langchain/anthropic"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { TavilySearchResults } from "@langchain/community/tools/tavily_search"; import { Calculator } from "@langchain/community/tools/calculator"; const tools = [ new TavilySearchResults({ verbose: true }), new Calculator({ verbose: true }), ]; // Prompt template must have "input" and "agent_scratchpad input variables const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful assistant"], ["placeholder", "{chat_history}"], ["human", "{input}"], ["placeholder", "{agent_scratchpad}"], ]); const llm = new ChatAnthropic({ model: "claude-3-sonnet-20240229", temperature: 0, verbose: false, }); const agent = await createToolCallingAgent({ llm, tools, prompt, }); const agentExecutor = new AgentExecutor({ agent, tools, verbose: false, }); const result = await agentExecutor.invoke({ input: "Who directed the 2023 film Oppenheimer and what is their age? What is their age in days (assume 365 days per year)?", }); console.log(result);
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/debugging/simple_agent.ts
import { ChatAnthropic } from "@langchain/anthropic"; import { AgentExecutor, createToolCallingAgent } from "langchain/agents"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { TavilySearchResults } from "@langchain/community/tools/tavily_search"; import { Calculator } from "@langchain/community/tools/calculator"; const tools = [new TavilySearchResults(), new Calculator()]; // Prompt template must have "input" and "agent_scratchpad input variables const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful assistant"], ["placeholder", "{chat_history}"], ["human", "{input}"], ["placeholder", "{agent_scratchpad}"], ]); const llm = new ChatAnthropic({ model: "claude-3-sonnet-20240229", temperature: 0, }); const agent = await createToolCallingAgent({ llm, tools, prompt, }); const agentExecutor = new AgentExecutor({ agent, tools, }); const result = await agentExecutor.invoke({ input: "Who directed the 2023 film Oppenheimer and what is their age? What is their age in days (assume 365 days per year)?", }); console.log(result);
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/debugging/simple_agent_verbose.ts
import { AgentExecutor, createToolCallingAgent } from "langchain/agents"; import { ChatAnthropic } from "@langchain/anthropic"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { TavilySearchResults } from "@langchain/community/tools/tavily_search"; import { Calculator } from "@langchain/community/tools/calculator"; const tools = [ new TavilySearchResults({ verbose: true }), new Calculator({ verbose: true }), ]; // Prompt template must have "input" and "agent_scratchpad input variables const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful assistant"], ["placeholder", "{chat_history}"], ["human", "{input}"], ["placeholder", "{agent_scratchpad}"], ]); const llm = new ChatAnthropic({ model: "claude-3-sonnet-20240229", temperature: 0, verbose: true, }); const agent = await createToolCallingAgent({ llm, tools, prompt, }); const agentExecutor = new AgentExecutor({ agent, tools, verbose: true, }); const result = await agentExecutor.invoke({ input: "Who directed the 2023 film Oppenheimer and what is their age? What is their age in days (assume 365 days per year)?", }); console.log(result);
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/conversational_retrieval/agent.ts
import { FaissStore } from "@langchain/community/vectorstores/faiss"; import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai"; import { TextLoader } from "langchain/document_loaders/fs/text"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import { createRetrieverTool, createConversationalRetrievalAgent, } from "langchain/agents/toolkits"; const loader = new TextLoader("state_of_the_union.txt"); const docs = await loader.load(); const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 0, }); const texts = await splitter.splitDocuments(docs); const vectorStore = await FaissStore.fromDocuments( texts, new OpenAIEmbeddings() ); const retriever = vectorStore.asRetriever(); const tool = createRetrieverTool(retriever, { name: "search_state_of_union", description: "Searches and returns documents regarding the state-of-the-union.", }); const model = new ChatOpenAI({}); const executor = await createConversationalRetrievalAgent(model, [tool], { verbose: true, }); const result = await executor.invoke({ input: "Hi, I'm Bob!", }); console.log(result); /* { output: 'Hello Bob! How can I assist you today?', intermediateSteps: [] } */ const result2 = await executor.invoke({ input: "What's my name?", }); console.log(result2); /* { output: 'Your name is Bob.', intermediateSteps: [] } */ const result3 = await executor.invoke({ input: "What did the president say about Ketanji Brown Jackson in the most recent state of the union?", }); console.log(result3); /* { output: "In the most recent state of the union, President Biden mentioned Ketanji Brown Jackson. He nominated her as a Circuit Court of Appeals judge and described her as one of the nation's top legal minds who will continue Justice Breyer's legacy of excellence. He mentioned that she has received a broad range of support, including from the Fraternal Order of Police and former judges appointed by Democrats and Republicans.", intermediateSteps: [ {...} ] } */ const result4 = await executor.invoke({ input: "How long ago did he nominate her?", }); console.log(result4); /* { output: 'President Biden nominated Ketanji Brown Jackson four days before the most recent state of the union address.', intermediateSteps: [] } */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/deployment/streaming.ts
import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { HttpResponseOutputParser } from "langchain/output_parsers"; const TEMPLATE = `You are a pirate named Patchy. All responses must be extremely verbose and in pirate dialect. {input}`; const prompt = ChatPromptTemplate.fromTemplate(TEMPLATE); export async function POST() { const model = new ChatOpenAI({ temperature: 0.8, model: "gpt-3.5-turbo-1106", }); const outputParser = new HttpResponseOutputParser(); const chain = prompt.pipe(model).pipe(outputParser); const stream = await chain.stream({ input: "Hi there!", }); return new Response(stream); }
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/deployment/error_handling.ts
import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { HttpResponseOutputParser } from "langchain/output_parsers"; const TEMPLATE = `You are a pirate named Patchy. All responses must be extremely verbose and in pirate dialect. {input}`; const prompt = ChatPromptTemplate.fromTemplate(TEMPLATE); const model = new ChatOpenAI({ temperature: 0.8, model: "gpt-3.5-turbo-1106", apiKey: "INVALID_KEY", }); const outputParser = new HttpResponseOutputParser(); const chain = prompt.pipe(model).pipe(outputParser); try { await chain.invoke({ input: "Hi there!", }); } catch (e) { console.log(e); } /* AuthenticationError: 401 Incorrect API key provided: INVALID_KEY. You can find your API key at https://platform.openai.com/account/api-keys. at Function.generate (/Users/jacoblee/langchain/langchainjs/node_modules/openai/src/error.ts:71:14) at OpenAI.makeStatusError (/Users/jacoblee/langchain/langchainjs/node_modules/openai/src/core.ts:371:21) at OpenAI.makeRequest (/Users/jacoblee/langchain/langchainjs/node_modules/openai/src/core.ts:429:24) at process.processTicksAndRejections (node:internal/process/task_queues:95:5) at async file:///Users/jacoblee/langchain/langchainjs/libs/langchain-openai/dist/chat_models.js:646:29 at RetryOperation._fn (/Users/jacoblee/langchain/langchainjs/node_modules/p-retry/index.js:50:12) { status: 401, */
0
lc_public_repos/langchainjs/examples/src/guides
lc_public_repos/langchainjs/examples/src/guides/deployment/stream_error_handling.ts
import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { HttpResponseOutputParser } from "langchain/output_parsers"; const TEMPLATE = `You are a pirate named Patchy. All responses must be extremely verbose and in pirate dialect. {input}`; const prompt = ChatPromptTemplate.fromTemplate(TEMPLATE); const model = new ChatOpenAI({ temperature: 0.8, model: "gpt-3.5-turbo-1106", apiKey: "INVALID_KEY", }); const outputParser = new HttpResponseOutputParser(); const chain = prompt.pipe(model).pipe(outputParser); try { await chain.stream({ input: "Hi there!", }); } catch (e) { console.log(e); } /* AuthenticationError: 401 Incorrect API key provided: INVALID_KEY. You can find your API key at https://platform.openai.com/account/api-keys. at Function.generate (/Users/jacoblee/langchain/langchainjs/node_modules/openai/src/error.ts:71:14) at OpenAI.makeStatusError (/Users/jacoblee/langchain/langchainjs/node_modules/openai/src/core.ts:371:21) at OpenAI.makeRequest (/Users/jacoblee/langchain/langchainjs/node_modules/openai/src/core.ts:429:24) at process.processTicksAndRejections (node:internal/process/task_queues:95:5) at async file:///Users/jacoblee/langchain/langchainjs/libs/langchain-openai/dist/chat_models.js:646:29 at RetryOperation._fn (/Users/jacoblee/langchain/langchainjs/node_modules/p-retry/index.js:50:12) { status: 401, */
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/comparision_evaluator/pairwise_string_without_reference.ts
import { loadEvaluator } from "langchain/evaluation"; const chain = await loadEvaluator("pairwise_string", { criteria: "conciseness", }); const res = await chain.evaluateStringPairs({ prediction: "Addition is a mathematical operation.", predictionB: "Addition is a mathematical operation that adds two numbers to create a third number, the 'sum'.", input: "What is addition?", }); console.log({ res }); /* { res: { reasoning: 'Response A is concise, but it lacks detail. Response B, while slightly longer, provides a more complete and informative answer by explaining what addition does. It is still concise and to the point.Final decision: [[B]]', value: 'B', score: 0 } } */
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/comparision_evaluator/pairwise_string_with_reference.ts
import { loadEvaluator } from "langchain/evaluation"; const chain = await loadEvaluator("labeled_pairwise_string", { criteria: "correctness", }); const res = await chain.evaluateStringPairs({ prediction: "there are three dogs", predictionB: "4", input: "how many dogs are in the park?", reference: "four", }); console.log(res); /* { reasoning: 'Both responses attempt to answer the question about the number of dogs in the park. However, Response A states that there are three dogs, which is incorrect according to the reference answer. Response B, on the other hand, correctly states that there are four dogs, which matches the reference answer. Therefore, Response B is more accurate.Final Decision: [[B]]', value: 'B', score: 0 } */
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/comparision_evaluator/pairwise_string_custom_prompt.ts
import { loadEvaluator } from "langchain/evaluation"; import { PromptTemplate } from "@langchain/core/prompts"; const promptTemplate = PromptTemplate.fromTemplate( `Given the input context, which do you prefer: A or B? Evaluate based on the following criteria: {criteria} Reason step by step and finally, respond with either [[A]] or [[B]] on its own line. DATA ---- input: {input} reference: {reference} A: {prediction} B: {predictionB} --- Reasoning: ` ); const chain = await loadEvaluator("labeled_pairwise_string", { chainOptions: { prompt: promptTemplate, }, }); const res = await chain.evaluateStringPairs({ prediction: "The dog that ate the ice cream was named fido.", predictionB: "The dog's name is spot", input: "What is the name of the dog that ate the ice cream?", reference: "The dog's name is fido", }); console.log(res); /* { reasoning: 'Helpfulness: Both A and B are helpful as they provide a direct answer to the question.Relevance: Both A and B refer to the question, but only A matches the reference text.Correctness: Only A is correct as it matches the reference text.Depth: Both A and B are straightforward and do not demonstrate depth of thought.Based on these criteria, the preferred response is A. ', value: 'A', score: 1 } */
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/comparision_evaluator/pairwise_embedding_distance.ts
import { OpenAIEmbeddings } from "@langchain/openai"; import { loadEvaluator } from "langchain/evaluation"; const embedding = new OpenAIEmbeddings(); const chain = await loadEvaluator("pairwise_embedding_distance", { embedding }); const res = await chain.evaluateStringPairs({ prediction: "Seattle is hot in June", predictionB: "Seattle is cool in June.", }); console.log({ res }); /* { res: { score: 0.03633645503883243 } } */ const res1 = await chain.evaluateStringPairs({ prediction: "Seattle is warm in June", predictionB: "Seattle is cool in June.", }); console.log({ res1 }); /* { res1: { score: 0.03657957473761331 } } */
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/comparision_evaluator/pairwise_string_custom_criteria.ts
import { loadEvaluator } from "langchain/evaluation"; const customCriterion = { simplicity: "Is the language straightforward and unpretentious?", clarity: "Are the sentences clear and easy to understand?", precision: "Is the writing precise, with no unnecessary words or details?", truthfulness: "Does the writing feel honest and sincere?", subtext: "Does the writing suggest deeper meanings or themes?", }; const chain = await loadEvaluator("pairwise_string", { criteria: customCriterion, }); const res = await chain.evaluateStringPairs({ prediction: "Every cheerful household shares a similar rhythm of joy; but sorrow, in each household, plays a unique, haunting melody.", predictionB: "Where one finds a symphony of joy, every domicile of happiness resounds in harmonious, identical notes; yet, every abode of despair conducts a dissonant orchestra, each playing an elegy of grief that is peculiar and profound to its own existence.", input: "Write some prose about families.", }); console.log(res); /* { reasoning: "Response A is simple, clear, and precise. It uses straightforward language to convey a deep and universal truth about families. The metaphor of joy and sorrow as music is effective and easy to understand. Response B, on the other hand, is more complex and less clear. It uses more sophisticated language and a more elaborate metaphor, which may make it harder for some readers to understand. It also includes unnecessary words and details that don't add to the overall meaning of the prose.Both responses are truthful and sincere, and both suggest deeper meanings about the nature of family life. However, Response A does a better job of conveying these meanings in a simple, clear, and precise way.Therefore, the better response is [[A]].", value: 'A', score: 1 } */
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/comparision_evaluator/pairwise_string_custom_llm.ts
import { loadEvaluator } from "langchain/evaluation"; import { ChatAnthropic } from "@langchain/anthropic"; const model = new ChatAnthropic({ temperature: 0 }); const chain = await loadEvaluator("labeled_pairwise_string", { llm: model }); const res = await chain.evaluateStringPairs({ prediction: "there are three dogs", predictionB: "4", input: "how many dogs are in the park?", reference: "four", }); console.log(res); /* { reasoning: 'Here is my assessment:Response B is more correct and accurate compared to Response A. Response B simply states "4", which matches the ground truth reference answer of "four". Meanwhile, Response A states "there are three dogs", which is incorrect according to the reference. In terms of following instructions and directly answering the question "how many dogs are in the park?", Response B gives the precise numerical answer, while Response A provides an incomplete sentence. Overall, Response B is more accurate and better followed the instructions to directly answer the question.[[B]]', value: 'B', score: 0 } */
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/agent_trajectory/trajectory.ts
import { OpenAI } from "@langchain/openai"; import { Calculator } from "@langchain/community/tools/calculator"; import { initializeAgentExecutorWithOptions } from "langchain/agents"; import { loadEvaluator } from "langchain/evaluation"; import { SerpAPI } from "@langchain/community/tools/serpapi"; // Capturing Trajectory // The easiest way to return an agent's trajectory (without using tracing callbacks like those in LangSmith) // for evaluation is to initialize the agent with return_intermediate_steps=True. // Below, create an example agent we will call to evaluate. const model = new OpenAI({ temperature: 0 }, { baseURL: process.env.BASE_URL }); const tools = [ new SerpAPI(process.env.SERPAPI_API_KEY, { location: "Austin,Texas,United States", hl: "en", gl: "us", }), new Calculator(), ]; const executor = await initializeAgentExecutorWithOptions(tools, model, { agentType: "zero-shot-react-description", returnIntermediateSteps: true, }); const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`; const result = await executor.invoke({ input }); // Evaluate Trajectory const chain = await loadEvaluator("trajectory"); const res = await chain.evaluateAgentTrajectory({ prediction: result.output, input, agentTrajectory: result.intermediateSteps, }); console.log({ res }); /* { res: { reasoning: "i. The final answer is helpful as it provides the information the user asked for: Olivia Wilde's boyfriend and the value of his current age raised to the 0.23 power.\n" + '\n' + "ii. The AI language model uses a logical sequence of tools to answer the question. It first identifies Olivia Wilde's boyfriend using the search tool, then calculates his age raised to the 0.23 power using the calculator tool.\n" + '\n' + "iii. The AI language model uses the tools in a helpful way. The search tool is used to find current information about Olivia Wilde's boyfriend, and the calculator tool is used to perform the mathematical operation requested by the user.\n" + '\n' + 'iv. The AI language model does not use too many steps to answer the question. It uses two steps, each of which is necessary to fully answer the question.\n' + '\n' + 'v. The appropriate tools are used to answer the question. The search tool is used to find current information, and the calculator tool is used to perform the mathematical operation.\n' + '\n' + "However, there is a mistake in the calculation. The model assumed Harry Styles' age to be 26, but it didn't use a tool to confirm this. It should have used the search tool to find Harry Styles' current age before performing the calculation.\n" + '\n' + "Given these considerations, the model's performance can be rated as 3 out of 5.", score: 0.5 } } */ // Providing List of Valid Tools // By default, the evaluator doesn't take into account the tools the agent is permitted to call. // You can provide these to the evaluator via the agent_tools argument. const chainWithTools = await loadEvaluator("trajectory", { agentTools: tools }); const res2 = await chainWithTools.evaluateAgentTrajectory({ prediction: result.output, input, agentTrajectory: result.intermediateSteps, }); console.log({ res2 }); /* { res2: { reasoning: "i. The final answer is helpful. It provides the name of Olivia Wilde's boyfriend and the result of his current age raised to the 0.23 power.\n" + '\n' + "ii. The AI language model uses a logical sequence of tools to answer the question. It first identifies Olivia Wilde's boyfriend using the search tool, then calculates his age raised to the 0.23 power using the calculator tool.\n" + '\n' + "iii. The AI language model uses the tools in a helpful way. The search tool is used to find current information about Olivia Wilde's boyfriend, and the calculator tool is used to perform the mathematical operation asked in the question.\n" + '\n' + 'iv. The AI language model does not use too many steps to answer the question. It uses two steps, each corresponding to a part of the question.\n' + '\n' + 'v. The appropriate tools are used to answer the question. The search tool is used to find current information, and the calculator tool is used to perform the mathematical operation.\n' + '\n' + "However, there is a mistake in the model's response. The model assumed Harry Styles' age to be 26, but it didn't confirm this with a search. This could lead to an incorrect calculation if his age is not 26.\n" + '\n' + "Given these considerations, I would give the model a score of 4 out of 5. The model's response was mostly correct and helpful, but it made an assumption about Harry Styles' age without confirming it.", score: 0.75 } } */
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/examples/comparisons.ts
import { loadEvaluator } from "langchain/evaluation"; import { initializeAgentExecutorWithOptions } from "langchain/agents"; import { ChatOpenAI } from "@langchain/openai"; import { ChainValues } from "@langchain/core/utils/types"; import { SerpAPI } from "@langchain/community/tools/serpapi"; // Step 1. Create the Evaluator // In this example, you will use gpt-4 to select which output is preferred. const evalChain = await loadEvaluator("pairwise_string"); // Step 2. Select Dataset // If you already have real usage data for your LLM, you can use a representative sample. More examples // provide more reliable results. We will use some example queries someone might have about how to use langchain here. const dataset = [ "Can I use LangChain to automatically rate limit or retry failed API calls?", "How can I ensure the accuracy and reliability of the travel data with LangChain?", "How can I track student progress with LangChain?", "langchain how to handle different document formats?", // "Can I chain API calls to different services in LangChain?", // "How do I handle API errors in my langchain app?", // "How do I handle different currency and tax calculations with LangChain?", // "How do I extract specific data from the document using langchain tools?", // "Can I use LangChain to handle real-time data from these APIs?", // "Can I use LangChain to track and manage travel alerts and updates?", // "Can I use LangChain to create and grade quizzes from these APIs?", // "Can I use LangChain to automate data cleaning and preprocessing for the AI plugins?", // "How can I ensure the accuracy and reliability of the financial data with LangChain?", // "Can I integrate medical imaging tools with LangChain?", // "How do I ensure the privacy and security of the patient data with LangChain?", // "How do I handle authentication for APIs in LangChain?", // "Can I use LangChain to recommend personalized study materials?", // "How do I connect to the arXiv API using LangChain?", // "How can I use LangChain to interact with educational APIs?", // "langchain how to sort retriever results - relevance or date?", // "Can I integrate a recommendation engine with LangChain to suggest products?" ]; // Step 3. Define Models to Compare // We will be comparing two agents in this case. const model = new ChatOpenAI({ temperature: 0, model: "gpt-3.5-turbo-16k-0613", }); const serpAPI = new SerpAPI(process.env.SERPAPI_API_KEY, { location: "Austin,Texas,United States", hl: "en", gl: "us", }); serpAPI.description = "Useful when you need to answer questions about current events. You should ask targeted questions."; const tools = [serpAPI]; const conversationAgent = await initializeAgentExecutorWithOptions( tools, model, { agentType: "chat-zero-shot-react-description", } ); const functionsAgent = await initializeAgentExecutorWithOptions(tools, model, { agentType: "openai-functions", }); // Step 4. Generate Responses // We will generate outputs for each of the models before evaluating them. const results = []; const agents = [functionsAgent, conversationAgent]; const concurrencyLevel = 4; // How many concurrent agents to run. May need to decrease if OpenAI is rate limiting. // We will only run the first 20 examples of this dataset to speed things up // This will lead to larger confidence intervals downstream. const batch = []; for (const example of dataset) { batch.push( Promise.all(agents.map((agent) => agent.invoke({ input: example }))) ); if (batch.length >= concurrencyLevel) { const batchResults = await Promise.all(batch); results.push(...batchResults); batch.length = 0; } } if (batch.length) { const batchResults = await Promise.all(batch); results.push(...batchResults); } console.log(JSON.stringify(results)); // Step 5. Evaluate Pairs // Now it's time to evaluate the results. For each agent response, run the evaluation chain to select which output is preferred (or return a tie). // Randomly select the input order to reduce the likelihood that one model will be preferred just because it is presented first. const preferences = await predictPreferences(dataset, results); // Print out the ratio of preferences. const nameMap: { [key: string]: string } = { a: "OpenAI Functions Agent", b: "Structured Chat Agent", }; const counts = counter(preferences); const prefRatios: { [key: string]: number } = {}; for (const k of Object.keys(counts)) { prefRatios[k] = counts[k] / preferences.length; } for (const k of Object.keys(prefRatios)) { console.log(`${nameMap[k]}: ${(prefRatios[k] * 100).toFixed(2)}%`); } /* OpenAI Functions Agent: 100.00% */ // Estimate Confidence Intervals // The results seem pretty clear, but if you want to have a better sense of how confident we are, that model "A" (the OpenAI Functions Agent) is the preferred model, we can calculate confidence intervals. // Below, use the Wilson score to estimate the confidence interval. for (const [which_, name] of Object.entries(nameMap)) { const [low, high] = wilsonScoreInterval(preferences, which_); console.log( `The "${name}" would be preferred between ${(low * 100).toFixed(2)}% and ${( high * 100 ).toFixed(2)}% percent of the time (with 95% confidence).` ); } /* The "OpenAI Functions Agent" would be preferred between 51.01% and 100.00% percent of the time (with 95% confidence). The "Structured Chat Agent" would be preferred between 0.00% and 48.99% percent of the time (with 95% confidence). */ function counter(arr: string[]): { [key: string]: number } { return arr.reduce( (countMap: { [key: string]: number }, word: string) => ({ ...countMap, [word]: (countMap[word] || 0) + 1, }), {} ); } async function predictPreferences(dataset: string[], results: ChainValues[][]) { const preferences: string[] = []; for (let i = 0; i < dataset.length; i += 1) { const input = dataset[i]; const resA = results[i][0]; const resB = results[i][1]; // Flip a coin to reduce persistent position bias let a; let b; let predA; let predB; if (Math.random() < 0.5) { predA = resA; predB = resB; a = "a"; b = "b"; } else { predA = resB; predB = resA; a = "b"; b = "a"; } const evalRes = await evalChain.evaluateStringPairs({ input, prediction: predA.output || predA.toString(), predictionB: predB.output || predB.toString(), }); if (evalRes.value === "A") { preferences.push(a); } else if (evalRes.value === "B") { preferences.push(b); } else { preferences.push("None"); // No preference } } return preferences; } function wilsonScoreInterval( preferences: string[], which = "a", z = 1.96 ): [number, number] { const totalPreferences = preferences.filter( (p) => p === "a" || p === "b" ).length; const ns = preferences.filter((p) => p === which).length; if (totalPreferences === 0) { return [0, 0]; } const pHat = ns / totalPreferences; const denominator = 1 + z ** 2 / totalPreferences; const adjustment = (z / denominator) * Math.sqrt( (pHat * (1 - pHat)) / totalPreferences + z ** 2 / (4 * totalPreferences ** 2) ); const center = (pHat + z ** 2 / (2 * totalPreferences)) / denominator; const lowerBound = Math.min(Math.max(center - adjustment, 0.0), 1.0); const upperBound = Math.min(Math.max(center + adjustment, 0.0), 1.0); return [lowerBound, upperBound]; }
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/string/embedding_distance.ts
import { loadEvaluator } from "langchain/evaluation"; import { FakeEmbeddings } from "@langchain/core/utils/testing"; const chain = await loadEvaluator("embedding_distance"); const res = await chain.evaluateStrings({ prediction: "I shall go", reference: "I shan't go", }); console.log({ res }); /* { res: { score: 0.09664669666115833 } } */ const res1 = await chain.evaluateStrings({ prediction: "I shall go", reference: "I will go", }); console.log({ res1 }); /* { res1: { score: 0.03761174400183265 } } */ // Select the Distance Metric // By default, the evalutor uses cosine distance. You can choose a different distance metric if you'd like. const evaluator = await loadEvaluator("embedding_distance", { distanceMetric: "euclidean", }); // Select Embeddings to Use // The constructor uses OpenAI embeddings by default, but you can configure this however you want. const embedding = new FakeEmbeddings(); const customEmbeddingEvaluator = await loadEvaluator("embedding_distance", { embedding, }); const res2 = await customEmbeddingEvaluator.evaluateStrings({ prediction: "I shall go", reference: "I shan't go", }); console.log({ res2 }); /* { res2: { score: 2.220446049250313e-16 } } */ const res3 = await customEmbeddingEvaluator.evaluateStrings({ prediction: "I shall go", reference: "I will go", }); console.log({ res3 }); /* { res3: { score: 2.220446049250313e-16 } } */
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/string/configuring_criteria_llm.ts
import { loadEvaluator } from "langchain/evaluation"; import { PRINCIPLES } from "langchain/chains"; import { ChatAnthropic } from "@langchain/anthropic"; const model = new ChatAnthropic(); const chain = await loadEvaluator("criteria", { criteria: PRINCIPLES.harmful1, llm: model, }); const res = await chain.evaluateStrings({ input: "What's 2+2?", prediction: "What's 2+2? That's an elementary question. The answer you're looking for is that two and two is four.", }); console.log(res); /* { reasoning: `Step 1) Read through the input, submission, and criteria carefully.Step 2) The criteria asks me to identify if the submission contains content that is harmful, unethical, racist, sexist, toxic, dangerous, or illegal. Step 3) The submission provides the correct answer to the question "What's 2+2?" by stating "The answer you're looking for is that two and two is four."Step 4) The submission does not contain any harmful, unethical, racist, sexist, toxic, dangerous or illegal content. It simply provides the factual answer to a math question.Step 5) Therefore, the submission meets the criteria as it does not contain any harmful, unethical, racist, sexist, toxic, dangerous or illegal content.N`, value: 'N', score: 0 } */
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/string/configuring_criteria_prompt.ts
import { loadEvaluator } from "langchain/evaluation"; import { PromptTemplate } from "@langchain/core/prompts"; const template = `Respond Y or N based on how well the following response follows the specified rubric. Grade only based on the rubric and expected response: Grading Rubric: {criteria} Expected Response: {reference} DATA: --------- Question: {input} Response: {output} --------- Write out your explanation for each criterion, then respond with Y or N on a new line.`; const chain = await loadEvaluator("labeled_criteria", { criteria: "correctness", chainOptions: { prompt: PromptTemplate.fromTemplate(template), }, }); const res = await chain.evaluateStrings({ prediction: "What's 2+2? That's an elementary question. The answer you're looking for is that two and two is four.", input: "What's 2+2?", reference: "It's 17 now.", }); console.log(res); /* { reasoning: `Correctness: The response is not correct. The expected response was "It's 17 now." but the response given was "What's 2+2? That's an elementary question. The answer you're looking for is that two and two is four."`, value: 'N', score: 0 } */
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/string/custom_criteria.ts
import { loadEvaluator } from "langchain/evaluation"; const customCriterion = { numeric: "Does the output contain numeric or mathematical information?", }; const evaluator = await loadEvaluator("criteria", { criteria: customCriterion, }); const query = "Tell me a joke"; const prediction = "I ate some square pie but I don't know the square of pi."; const res = await evaluator.evaluateStrings({ input: query, prediction, }); console.log(res); /* { reasoning: `The criterion asks if the output contains numeric or mathematical information. The submission is a joke that says, predictionIn this joke, there are two references to mathematical concepts. The first is the "square pie," which is a play on words referring to the mathematical concept of squaring a number. The second is the "square of pi," which is a specific mathematical operation involving the mathematical constant pi.Therefore, the submission does contain numeric or mathematical information, and it meets the criterion.Y`, value: 'Y', score: 1 } */ // If you wanted to specify multiple criteria. Generally not recommended const customMultipleCriterion = { numeric: "Does the output contain numeric information?", mathematical: "Does the output contain mathematical information?", grammatical: "Is the output grammatically correct?", logical: "Is the output logical?", }; const chain = await loadEvaluator("criteria", { criteria: customMultipleCriterion, }); const res2 = await chain.evaluateStrings({ input: query, prediction, }); console.log(res2); /* { reasoning: `Let's assess the submission based on the given criteria:1. Numeric: The output does not contain any numeric information. There are no numbers present in the joke.2. Mathematical: The output does contain mathematical information. The joke refers to the mathematical concept of squaring a number, and also mentions pi, a mathematical constant.3. Grammatical: The output is grammatically correct. The sentence structure and word usage are appropriate.4. Logical: The output is logical. The joke makes sense in that it plays on the words "square pie" and "square of pi".Based on this analysis, the submission does not meet all the criteria because it does not contain numeric information.N`, value: 'N', score: 0 } */
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/string/criteria_without_reference.ts
import { loadEvaluator } from "langchain/evaluation"; const evaluator = await loadEvaluator("criteria", { criteria: "conciseness" }); const res = await evaluator.evaluateStrings({ input: "What's 2+2?", prediction: "What's 2+2? That's an elementary question. The answer you're looking for is that two and two is four.", }); console.log({ res }); /* { res: { reasoning: `The criterion is conciseness, which means the submission should be brief and to the point. Looking at the submission, the answer to the question "What's 2+2?" is indeed "four". However, the respondent included additional information that was not necessary to answer the question, such as "That's an elementary question" and "The answer you're looking for is that two and two is". This additional information makes the response less concise than it could be. Therefore, the submission does not meet the criterion of conciseness.N`, value: 'N', score: '0' } } */
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/string/constitutional_criteria.ts
import { loadEvaluator } from "langchain/evaluation"; import { PRINCIPLES } from "langchain/chains"; console.log(`${Object.keys(PRINCIPLES).length} available principles`); console.log(Object.entries(PRINCIPLES).slice(0, 5)); /* 54 available principles [ [ 'harmful1', ConstitutionalPrinciple { critiqueRequest: 'Identify specific ways in which the assistant’s last response is harmful, unethical, racist, sexist, toxic, dangerous, or illegal.', revisionRequest: 'Please rewrite the assistant response to remove any and all harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.', name: 'harmful1' } ], [ 'harmful2', ConstitutionalPrinciple { critiqueRequest: 'Explain ways in which the assistant’s preceding response may be harmful to either the human or others. For example, if the assistant says anything that encourages dangerous or illegal activity, make sure to point it out. Other examples of harm include racism, sexism, and other social biases.', revisionRequest: 'Please rewrite the assistant response to remove any and all harmful content, including anything dangerous, illegal, racist, sexist or socially biased.', name: 'harmful2' } ], [ 'harmful3', ConstitutionalPrinciple { critiqueRequest: 'Identify all ways in which the assistant’s last response is harmful, unethical, or socially biased. Furthermore, provide specific details on how the assistant can improve its response.', revisionRequest: 'Please rewrite the assistant response to remove all harmful, unethical, or socially biased content, and move the conversation in a positive direction.', name: 'harmful3' } ], [ 'harmful4', ConstitutionalPrinciple { critiqueRequest: 'Identify specific ways in which the assistant’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.', revisionRequest: 'Rewrite the assistant response to remove all harmful content, and to politely point out any harmful assumptions from the human, such as racism, sexism, toxicity.', name: 'harmful4' } ], [ 'insensitive', ConstitutionalPrinciple { critiqueRequest: 'Discuss the assistant’s last response to the human and whether it is in any way insensitive, sexist, racist, or socially inappropriate.', revisionRequest: 'Revise the assistant’s last response to remove content that is in any way insensitive, sexist, racist, or socially inappropriate.', name: 'insensitive' } ] ] */ const chain = await loadEvaluator("criteria", { criteria: PRINCIPLES.harmful1, }); console.log("beginning evaluation"); const res = await chain.evaluateStrings({ input: "What do you think of Will?", prediction: "I say that man is a lilly-livered nincompoop", }); console.log(res); /* { reasoning: `The criterion asks to identify if the assistant's response is harmful, unethical, racist, sexist, toxic, dangerous, or illegal.The assistant's response is "I say that man is a lilly-livered nincompoop". This response is a negative opinion about the person named Will. It uses an old-fashioned insult, calling him a "lilly-livered nincompoop", which implies that the person is weak and foolish.1. Harmful: The response could be considered harmful as it is insulting and could potentially hurt Will's feelings.2. Unethical: It could be considered unethical to insult someone without providing any context or reason.3. Racist: The response does not contain any racial slurs or stereotypes, so it is not racist.4. Sexist: The response does not contain any gender-based slurs or stereotypes, so it is not sexist.5. Toxic: The response could be considered toxic as it is negative and insulting.6. Dangerous: The response does not incite violence or any dangerous activities, so it is not dangerous.7. Illegal: The response does not suggest or promote any illegal activities, so it is not illegal.Based on this analysis, the assistant's response could be considered harmful, unethical, and toxic, but it is not racist, sexist, dangerous, or illegal. Therefore, the submission does meet the criteria as it identifies ways in which the assistant's response could be harmful, unethical, and toxic.Y`, value: 'Y', score: 1 } */
0
lc_public_repos/langchainjs/examples/src/guides/evaluation
lc_public_repos/langchainjs/examples/src/guides/evaluation/string/criteria_with_reference.ts
import { loadEvaluator } from "langchain/evaluation"; const evaluator = await loadEvaluator("labeled_criteria", { criteria: "correctness", }); console.log("beginning evaluation"); const res = await evaluator.evaluateStrings({ input: "What is the capital of the US?", prediction: "Topeka, KS", reference: "The capital of the US is Topeka, KS, where it permanently moved from Washington D.C. on May 16, 2023", }); console.log(res); /* { reasoning: 'The criterion for this task is the correctness of the submitted answer. The submission states that the capital of the US is Topeka, KS. The reference provided confirms that the capital of the US is indeed Topeka, KS, and it was moved there from Washington D.C. on May 16, 2023. Therefore, the submission is correct, accurate, and factual according to the reference provided. The submission meets the criterion.Y', value: 'Y', score: 1 } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/indexes/recursive_text_splitter.ts
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; export const run = async () => { const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f. This is a weird text to write, but gotta test the splittingggg some how.\n\n Bye!\n\n-H.`; const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 10, chunkOverlap: 1, }); const output = await splitter.createDocuments([text]); console.log(output); };
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/indexes/python_text_splitter.ts
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; const pythonCode = `def hello_world(): print("Hello, World!") # Call the function hello_world()`; const splitter = RecursiveCharacterTextSplitter.fromLanguage("python", { chunkSize: 32, chunkOverlap: 0, }); const pythonOutput = await splitter.createDocuments([pythonCode]); console.log(pythonOutput); /* [ Document { pageContent: 'def hello_world():', metadata: { loc: [Object] } }, Document { pageContent: 'print("Hello, World!")', metadata: { loc: [Object] } }, Document { pageContent: '# Call the function', metadata: { loc: [Object] } }, Document { pageContent: 'hello_world()', metadata: { loc: [Object] } } ] */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/indexes/javascript_text_splitter.ts
import { SupportedTextSplitterLanguages, RecursiveCharacterTextSplitter, } from "@langchain/textsplitters"; console.log(SupportedTextSplitterLanguages); // Array of supported languages /* [ 'cpp', 'go', 'java', 'js', 'php', 'proto', 'python', 'rst', 'ruby', 'rust', 'scala', 'swift', 'markdown', 'latex', 'html' ] */ const jsCode = `function helloWorld() { console.log("Hello, World!"); } // Call the function helloWorld();`; const splitter = RecursiveCharacterTextSplitter.fromLanguage("js", { chunkSize: 32, chunkOverlap: 0, }); const jsOutput = await splitter.createDocuments([jsCode]); console.log(jsOutput); /* [ Document { pageContent: 'function helloWorld() {', metadata: { loc: [Object] } }, Document { pageContent: 'console.log("Hello, World!");', metadata: { loc: [Object] } }, Document { pageContent: '}\n// Call the function', metadata: { loc: [Object] } }, Document { pageContent: 'helloWorld();', metadata: { loc: [Object] } } ] */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/indexes/text_splitter.ts
import { CharacterTextSplitter } from "@langchain/textsplitters"; import { Document } from "@langchain/core/documents"; export const run = async () => { /* Split text */ const text = "foo bar baz 123"; const splitter = new CharacterTextSplitter({ separator: " ", chunkSize: 7, chunkOverlap: 3, }); const output = await splitter.createDocuments([text]); console.log({ output }); /* Split documents */ const docOutput = await splitter.splitDocuments([ new Document({ pageContent: text }), ]); console.log({ docOutput }); };
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/indexes/token_text_splitter.ts
import { TokenTextSplitter } from "@langchain/textsplitters"; import fs from "fs"; import path from "path"; import { Document } from "@langchain/core/documents"; export const run = async () => { /* Split text */ const text = fs.readFileSync( path.resolve(__dirname, "../../state_of_the_union.txt"), "utf8" ); const splitter = new TokenTextSplitter({ encodingName: "r50k_base", chunkSize: 10, chunkOverlap: 0, allowedSpecial: ["<|endoftext|>"], disallowedSpecial: [], }); const output = await splitter.createDocuments([text]); console.log({ output }); const docOutput = await splitter.splitDocuments([ new Document({ pageContent: text }), ]); console.log({ docOutput }); };
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/indexes/text_splitter_with_chunk_header.ts
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { CharacterTextSplitter } from "@langchain/textsplitters"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { createStuffDocumentsChain } from "langchain/chains/combine_documents"; import { createRetrievalChain } from "langchain/chains/retrieval"; const splitter = new CharacterTextSplitter({ chunkSize: 1536, chunkOverlap: 200, }); const jimDocs = await splitter.createDocuments( [`My favorite color is blue.`], [], { chunkHeader: `DOCUMENT NAME: Jim Interview\n\n---\n\n`, appendChunkOverlapHeader: true, } ); const pamDocs = await splitter.createDocuments( [`My favorite color is red.`], [], { chunkHeader: `DOCUMENT NAME: Pam Interview\n\n---\n\n`, appendChunkOverlapHeader: true, } ); const vectorstore = await HNSWLib.fromDocuments( jimDocs.concat(pamDocs), new OpenAIEmbeddings() ); const llm = new ChatOpenAI({ model: "gpt-3.5-turbo-1106", temperature: 0, }); const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([ [ "system", "Answer the user's questions based on the below context:\n\n{context}", ], ["human", "{input}"], ]); const combineDocsChain = await createStuffDocumentsChain({ llm, prompt: questionAnsweringPrompt, }); const chain = await createRetrievalChain({ retriever: vectorstore.asRetriever(), combineDocsChain, }); const res = await chain.invoke({ input: "What is Pam's favorite color?", }); console.log(JSON.stringify(res, null, 2)); /* { "input": "What is Pam's favorite color?", "chat_history": [], "context": [ { "pageContent": "DOCUMENT NAME: Pam Interview\n\n---\n\nMy favorite color is red.", "metadata": { "loc": { "lines": { "from": 1, "to": 1 } } } }, { "pageContent": "DOCUMENT NAME: Jim Interview\n\n---\n\nMy favorite color is blue.", "metadata": { "loc": { "lines": { "from": 1, "to": 1 } } } } ], "answer": "Pam's favorite color is red." } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/indexes/latex_text_splitter.ts
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; const text = `\\begin{document} \\title{🦜️🔗 LangChain} ⚡ Building applications with LLMs through composability ⚡ \\section{Quick Install} \\begin{verbatim} Hopefully this code block isn't split yarn add langchain \\end{verbatim} As an open source project in a rapidly developing field, we are extremely open to contributions. \\end{document}`; const splitter = RecursiveCharacterTextSplitter.fromLanguage("latex", { chunkSize: 100, chunkOverlap: 0, }); const output = await splitter.createDocuments([text]); console.log(output); /* [ Document { pageContent: '\\begin{document}\n' + '\\title{🦜️🔗 LangChain}\n' + '⚡ Building applications with LLMs through composability ⚡', metadata: { loc: [Object] } }, Document { pageContent: '\\section{Quick Install}', metadata: { loc: [Object] } }, Document { pageContent: '\\begin{verbatim}\n' + "Hopefully this code block isn't split\n" + 'yarn add langchain\n' + '\\end{verbatim}', metadata: { loc: [Object] } }, Document { pageContent: 'As an open source project in a rapidly developing field, we are extremely open to contributions.', metadata: { loc: [Object] } }, Document { pageContent: '\\end{document}', metadata: { loc: [Object] } } ] */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/indexes/recursive_text_splitter_custom_separators.ts
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import { Document } from "@langchain/core/documents"; const text = `Some other considerations include: - Do you deploy your backend and frontend together, or separately? - Do you deploy your backend co-located with your database, or separately? **Production Support:** As you move your LangChains into production, we'd love to offer more hands-on support. Fill out [this form](https://airtable.com/appwQzlErAS2qiP0L/shrGtGaVBVAz7NcV2) to share more about what you're building, and our team will get in touch. ## Deployment Options See below for a list of deployment options for your LangChain app. If you don't see your preferred option, please get in touch and we can add it to this list.`; const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 50, chunkOverlap: 1, separators: ["|", "##", ">", "-"], }); const docOutput = await splitter.splitDocuments([ new Document({ pageContent: text }), ]); console.log(docOutput); /* [ Document { pageContent: 'Some other considerations include:', metadata: { loc: [Object] } }, Document { pageContent: '- Do you deploy your backend and frontend together', metadata: { loc: [Object] } }, Document { pageContent: 'r, or separately?', metadata: { loc: [Object] } }, Document { pageContent: '- Do you deploy your backend co', metadata: { loc: [Object] } }, Document { pageContent: '-located with your database, or separately?\n\n**Pro', metadata: { loc: [Object] } }, Document { pageContent: 'oduction Support:** As you move your LangChains in', metadata: { loc: [Object] } }, Document { pageContent: "nto production, we'd love to offer more hands", metadata: { loc: [Object] } }, Document { pageContent: '-on support.\nFill out [this form](https://airtable', metadata: { loc: [Object] } }, Document { pageContent: 'e.com/appwQzlErAS2qiP0L/shrGtGaVBVAz7NcV2) to shar', metadata: { loc: [Object] } }, Document { pageContent: "re more about what you're building, and our team w", metadata: { loc: [Object] } }, Document { pageContent: 'will get in touch.', metadata: { loc: [Object] } }, Document { pageContent: '#', metadata: { loc: [Object] } }, Document { pageContent: '# Deployment Options\n' + '\n' + "See below for a list of deployment options for your LangChain app. If you don't see your preferred option, please get in touch and we can add it to this list.", metadata: { loc: [Object] } } ] */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/indexes/markdown_text_splitter.ts
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; const text = ` --- sidebar_position: 1 --- # Document transformers Once you've loaded documents, you'll often want to transform them to better suit your application. The simplest example is you may want to split a long document into smaller chunks that can fit into your model's context window. LangChain has a number of built-in document transformers that make it easy to split, combine, filter, and otherwise manipulate documents. ## Text splitters When you want to deal with long pieces of text, it is necessary to split up that text into chunks. As simple as this sounds, there is a lot of potential complexity here. Ideally, you want to keep the semantically related pieces of text together. What "semantically related" means could depend on the type of text. This notebook showcases several ways to do that. At a high level, text splitters work as following: 1. Split the text up into small, semantically meaningful chunks (often sentences). 2. Start combining these small chunks into a larger chunk until you reach a certain size (as measured by some function). 3. Once you reach that size, make that chunk its own piece of text and then start creating a new chunk of text with some overlap (to keep context between chunks). That means there are two different axes along which you can customize your text splitter: 1. How the text is split 2. How the chunk size is measured ## Get started with text splitters import GetStarted from "@snippets/modules/data_connection/document_transformers/get_started.mdx" <GetStarted/> `; const splitter = RecursiveCharacterTextSplitter.fromLanguage("markdown", { chunkSize: 500, chunkOverlap: 0, }); const output = await splitter.createDocuments([text]); console.log(output); /* [ Document { pageContent: '---\n' + 'sidebar_position: 1\n' + '---\n' + '# Document transformers\n' + '\n' + "Once you've loaded documents, you'll often want to transform them to better suit your application. The simplest example\n" + "is you may want to split a long document into smaller chunks that can fit into your model's context window. LangChain\n" + 'has a number of built-in document transformers that make it easy to split, combine, filter, and otherwise manipulate documents.', metadata: { loc: [Object] } }, Document { pageContent: '## Text splitters\n' + '\n' + 'When you want to deal with long pieces of text, it is necessary to split up that text into chunks.\n' + 'As simple as this sounds, there is a lot of potential complexity here. Ideally, you want to keep the semantically related pieces of text together. What "semantically related" means could depend on the type of text.\n' + 'This notebook showcases several ways to do that.\n' + '\n' + 'At a high level, text splitters work as following:', metadata: { loc: [Object] } }, Document { pageContent: '1. Split the text up into small, semantically meaningful chunks (often sentences).\n' + '2. Start combining these small chunks into a larger chunk until you reach a certain size (as measured by some function).\n' + '3. Once you reach that size, make that chunk its own piece of text and then start creating a new chunk of text with some overlap (to keep context between chunks).\n' + '\n' + 'That means there are two different axes along which you can customize your text splitter:', metadata: { loc: [Object] } }, Document { pageContent: '1. How the text is split\n2. How the chunk size is measured', metadata: { loc: [Object] } }, Document { pageContent: '## Get started with text splitters\n' + '\n' + 'import GetStarted from "@snippets/modules/data_connection/document_transformers/get_started.mdx"\n' + '\n' + '<GetStarted/>', metadata: { loc: [Object] } } ] */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/indexes/html_text_splitter.ts
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; const text = `<!DOCTYPE html> <html> <head> <title>🦜️🔗 LangChain</title> <style> body { font-family: Arial, sans-serif; } h1 { color: darkblue; } </style> </head> <body> <div> <h1>🦜️🔗 LangChain</h1> <p>⚡ Building applications with LLMs through composability ⚡</p> </div> <div> As an open source project in a rapidly developing field, we are extremely open to contributions. </div> </body> </html>`; const splitter = RecursiveCharacterTextSplitter.fromLanguage("html", { chunkSize: 175, chunkOverlap: 20, }); const output = await splitter.createDocuments([text]); console.log(output); /* [ Document { pageContent: '<!DOCTYPE html>\n<html>', metadata: { loc: [Object] } }, Document { pageContent: '<head>\n <title>🦜️🔗 LangChain</title>', metadata: { loc: [Object] } }, Document { pageContent: '<style>\n' + ' body {\n' + ' font-family: Arial, sans-serif;\n' + ' }\n' + ' h1 {\n' + ' color: darkblue;\n' + ' }\n' + ' </style>\n' + ' </head>', metadata: { loc: [Object] } }, Document { pageContent: '<body>\n' + ' <div>\n' + ' <h1>🦜️🔗 LangChain</h1>\n' + ' <p>⚡ Building applications with LLMs through composability ⚡</p>\n' + ' </div>', metadata: { loc: [Object] } }, Document { pageContent: '<div>\n' + ' As an open source project in a rapidly developing field, we are extremely open to contributions.\n' + ' </div>\n' + ' </body>\n' + '</html>', metadata: { loc: [Object] } } ] */
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/faiss_fromdocs.ts
import { FaissStore } from "@langchain/community/vectorstores/faiss"; import { OpenAIEmbeddings } from "@langchain/openai"; import { TextLoader } from "langchain/document_loaders/fs/text"; // Create docs with a loader const loader = new TextLoader("src/document_loaders/example_data/example.txt"); const docs = await loader.load(); // Load the docs into the vector store const vectorStore = await FaissStore.fromDocuments( docs, new OpenAIEmbeddings() ); // Search for the most similar document const resultOne = await vectorStore.similaritySearch("hello world", 1); console.log(resultOne);
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/voy.ts
import { VoyVectorStore } from "@langchain/community/vectorstores/voy"; import { Voy as VoyClient } from "voy-search"; import { OpenAIEmbeddings } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; // Create Voy client using the library. const voyClient = new VoyClient(); // Create embeddings const embeddings = new OpenAIEmbeddings(); // Create the Voy store. const store = new VoyVectorStore(voyClient, embeddings); // Add two documents with some metadata. await store.addDocuments([ new Document({ pageContent: "How has life been treating you?", metadata: { foo: "Mike", }, }), new Document({ pageContent: "And I took it personally...", metadata: { foo: "Testing", }, }), ]); const model = new OpenAIEmbeddings(); const query = await model.embedQuery("And I took it personally"); // Perform a similarity search. const resultsWithScore = await store.similaritySearchVectorWithScore(query, 1); // Print the results. console.log(JSON.stringify(resultsWithScore, null, 2)); /* [ [ { "pageContent": "And I took it personally...", "metadata": { "foo": "Testing" } }, 0 ] ] */
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/clickhouse_search.ts
import { ClickHouseStore } from "@langchain/community/vectorstores/clickhouse"; import { OpenAIEmbeddings } from "@langchain/openai"; // Initialize ClickHouse store const vectorStore = await ClickHouseStore.fromExistingIndex( new OpenAIEmbeddings(), { host: process.env.CLICKHOUSE_HOST || "localhost", port: process.env.CLICKHOUSE_PORT || 8443, username: process.env.CLICKHOUSE_USER || "username", password: process.env.CLICKHOUSE_PASSWORD || "password", database: process.env.CLICKHOUSE_DATABASE || "default", table: process.env.CLICKHOUSE_TABLE || "vector_table", } ); // Sleep 1 second to ensure that the search occurs after the successful insertion of data. // eslint-disable-next-line no-promise-executor-return await new Promise((resolve) => setTimeout(resolve, 1000)); // Perform similarity search without filtering const results = await vectorStore.similaritySearch("hello world", 1); console.log(results); // Perform similarity search with filtering const filteredResults = await vectorStore.similaritySearch("hello world", 1, { whereStr: "metadata.name = '1'", }); console.log(filteredResults);
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/hnswlib_delete.ts
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { OpenAIEmbeddings } from "@langchain/openai"; // Save the vector store to a directory const directory = "your/directory/here"; // Load the vector store from the same directory const loadedVectorStore = await HNSWLib.load(directory, new OpenAIEmbeddings()); await loadedVectorStore.delete({ directory });
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/supabase_with_metadata_filter.ts
import { SupabaseVectorStore } from "@langchain/community/vectorstores/supabase"; import { OpenAIEmbeddings } from "@langchain/openai"; import { createClient } from "@supabase/supabase-js"; // First, follow set-up instructions at // https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase const privateKey = process.env.SUPABASE_PRIVATE_KEY; if (!privateKey) throw new Error(`Expected env var SUPABASE_PRIVATE_KEY`); const url = process.env.SUPABASE_URL; if (!url) throw new Error(`Expected env var SUPABASE_URL`); export const run = async () => { const client = createClient(url, privateKey); const vectorStore = await SupabaseVectorStore.fromTexts( ["Hello world", "Hello world", "Hello world"], [{ user_id: 2 }, { user_id: 1 }, { user_id: 3 }], new OpenAIEmbeddings(), { client, tableName: "documents", queryName: "match_documents", } ); const result = await vectorStore.similaritySearch("Hello world", 1, { user_id: 3, }); console.log(result); };
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/mongodb_mmr.ts
import { MongoDBAtlasVectorSearch } from "@langchain/mongodb"; import { CohereEmbeddings } from "@langchain/cohere"; import { MongoClient } from "mongodb"; const client = new MongoClient(process.env.MONGODB_ATLAS_URI || ""); const namespace = "langchain.test"; const [dbName, collectionName] = namespace.split("."); const collection = client.db(dbName).collection(collectionName); const vectorStore = new MongoDBAtlasVectorSearch( new CohereEmbeddings({ model: "embed-english-v3.0" }), { collection, indexName: "default", // The name of the Atlas search index. Defaults to "default" textKey: "text", // The name of the collection field containing the raw content. Defaults to "text" embeddingKey: "embedding", // The name of the collection field containing the embedded text. Defaults to "embedding" } ); const resultOne = await vectorStore.maxMarginalRelevanceSearch("Hello world", { k: 4, fetchK: 20, // The number of documents to return on initial fetch }); console.log(resultOne); // Using MMR in a vector store retriever const retriever = await vectorStore.asRetriever({ searchType: "mmr", searchKwargs: { fetchK: 20, lambda: 0.1, }, }); const retrieverOutput = await retriever.invoke("Hello world"); console.log(retrieverOutput); await client.close();
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/faiss_saveload.ts
import { FaissStore } from "@langchain/community/vectorstores/faiss"; import { OpenAIEmbeddings } from "@langchain/openai"; // Create a vector store through any method, here from texts as an example const vectorStore = await FaissStore.fromTexts( ["Hello world", "Bye bye", "hello nice world"], [{ id: 2 }, { id: 1 }, { id: 3 }], new OpenAIEmbeddings() ); // Save the vector store to a directory const directory = "your/directory/here"; await vectorStore.save(directory); // Load the vector store from the same directory const loadedVectorStore = await FaissStore.load( directory, new OpenAIEmbeddings() ); // vectorStore and loadedVectorStore are identical const result = await loadedVectorStore.similaritySearch("hello world", 1); console.log(result);
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/supabase_deletion.ts
import { SupabaseVectorStore } from "@langchain/community/vectorstores/supabase"; import { OpenAIEmbeddings } from "@langchain/openai"; import { createClient } from "@supabase/supabase-js"; // First, follow set-up instructions at // https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase const privateKey = process.env.SUPABASE_PRIVATE_KEY; if (!privateKey) throw new Error(`Expected env var SUPABASE_PRIVATE_KEY`); const url = process.env.SUPABASE_URL; if (!url) throw new Error(`Expected env var SUPABASE_URL`); export const run = async () => { const client = createClient(url, privateKey); const embeddings = new OpenAIEmbeddings(); const store = new SupabaseVectorStore(embeddings, { client, tableName: "documents", }); const docs = [ { pageContent: "hello", metadata: { b: 1, c: 9, stuff: "right" } }, { pageContent: "hello", metadata: { b: 1, c: 9, stuff: "wrong" } }, ]; // Also takes an additional {ids: []} parameter for upsertion const ids = await store.addDocuments(docs); const resultA = await store.similaritySearch("hello", 2); console.log(resultA); /* [ Document { pageContent: "hello", metadata: { b: 1, c: 9, stuff: "right" } }, Document { pageContent: "hello", metadata: { b: 1, c: 9, stuff: "wrong" } }, ] */ await store.delete({ ids }); const resultB = await store.similaritySearch("hello", 2); console.log(resultB); /* [] */ };
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/weaviate_search.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ import weaviate, { ApiKey } from "weaviate-ts-client"; import { WeaviateStore } from "@langchain/weaviate"; import { OpenAIEmbeddings } from "@langchain/openai"; export async function run() { // Something wrong with the weaviate-ts-client types, so we need to disable const client = (weaviate as any).client({ scheme: process.env.WEAVIATE_SCHEME || "https", host: process.env.WEAVIATE_HOST || "localhost", apiKey: new ApiKey(process.env.WEAVIATE_API_KEY || "default"), }); // Create a store for an existing index const store = await WeaviateStore.fromExistingIndex(new OpenAIEmbeddings(), { client, indexName: "Test", metadataKeys: ["foo"], }); // Search the index without any filters const results = await store.similaritySearch("hello world", 1); console.log(results); /* [ Document { pageContent: 'hello world', metadata: { foo: 'bar' } } ] */ // Search the index with a filter, in this case, only return results where // the "foo" metadata key is equal to "baz", see the Weaviate docs for more // https://weaviate.io/developers/weaviate/api/graphql/filters const results2 = await store.similaritySearch("hello world", 1, { where: { operator: "Equal", path: ["foo"], valueText: "baz", }, }); console.log(results2); /* [ Document { pageContent: 'hi there', metadata: { foo: 'baz' } } ] */ }
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/myscale_search.ts
import { MyScaleStore } from "@langchain/community/vectorstores/myscale"; import { OpenAIEmbeddings } from "@langchain/openai"; const vectorStore = await MyScaleStore.fromExistingIndex( new OpenAIEmbeddings(), { host: process.env.MYSCALE_HOST || "localhost", port: process.env.MYSCALE_PORT || "8443", username: process.env.MYSCALE_USERNAME || "username", password: process.env.MYSCALE_PASSWORD || "password", database: "default", // defaults to "default" table: "your_table", // defaults to "vector_table" } ); const results = await vectorStore.similaritySearch("hello world", 1); console.log(results); const filteredResults = await vectorStore.similaritySearch("hello world", 1, { whereStr: "metadata.name = '1'", }); console.log(filteredResults);
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/faiss_loadfrompython.ts
import { FaissStore } from "@langchain/community/vectorstores/faiss"; import { OpenAIEmbeddings } from "@langchain/openai"; // The directory of data saved from Python const directory = "your/directory/here"; // Load the vector store from the directory const loadedVectorStore = await FaissStore.loadFromPython( directory, new OpenAIEmbeddings() ); // Search for the most similar document const result = await loadedVectorStore.similaritySearch("test", 2); console.log("result", result);
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/singlestore_hybrid_search.ts
import { SingleStoreVectorStore } from "@langchain/community/vectorstores/singlestore"; import { OpenAIEmbeddings } from "@langchain/openai"; export const run = async () => { const vectorStore = await SingleStoreVectorStore.fromTexts( [ "In the parched desert, a sudden rainstorm brought relief, as the droplets danced upon the thirsty earth, rejuvenating the landscape with the sweet scent of petrichor.", "Amidst the bustling cityscape, the rain fell relentlessly, creating a symphony of pitter-patter on the pavement, while umbrellas bloomed like colorful flowers in a sea of gray.", "High in the mountains, the rain transformed into a delicate mist, enveloping the peaks in a mystical veil, where each droplet seemed to whisper secrets to the ancient rocks below.", "Blanketing the countryside in a soft, pristine layer, the snowfall painted a serene tableau, muffling the world in a tranquil hush as delicate flakes settled upon the branches of trees like nature's own lacework.", "In the urban landscape, snow descended, transforming bustling streets into a winter wonderland, where the laughter of children echoed amidst the flurry of snowballs and the twinkle of holiday lights.", "Atop the rugged peaks, snow fell with an unyielding intensity, sculpting the landscape into a pristine alpine paradise, where the frozen crystals shimmered under the moonlight, casting a spell of enchantment over the wilderness below.", ], [ { category: "rain" }, { category: "rain" }, { category: "rain" }, { category: "snow" }, { category: "snow" }, { category: "snow" }, ], new OpenAIEmbeddings(), { connectionOptions: { host: process.env.SINGLESTORE_HOST, port: Number(process.env.SINGLESTORE_PORT), user: process.env.SINGLESTORE_USERNAME, password: process.env.SINGLESTORE_PASSWORD, database: process.env.SINGLESTORE_DATABASE, }, distanceMetric: "DOT_PRODUCT", useVectorIndex: true, useFullTextIndex: true, } ); const resultOne = await vectorStore.similaritySearch( "rainstorm in parched desert, rain", 1, { category: "rain" } ); console.log(resultOne[0].pageContent); await vectorStore.setSearchConfig({ searchStrategy: "TEXT_ONLY", }); const resultTwo = await vectorStore.similaritySearch( "rainstorm in parched desert, rain", 1 ); console.log(resultTwo[0].pageContent); await vectorStore.setSearchConfig({ searchStrategy: "FILTER_BY_TEXT", filterThreshold: 0.1, }); const resultThree = await vectorStore.similaritySearch( "rainstorm in parched desert, rain", 1 ); console.log(resultThree[0].pageContent); await vectorStore.setSearchConfig({ searchStrategy: "FILTER_BY_VECTOR", filterThreshold: 0.1, }); const resultFour = await vectorStore.similaritySearch( "rainstorm in parched desert, rain", 1 ); console.log(resultFour[0].pageContent); await vectorStore.setSearchConfig({ searchStrategy: "WEIGHTED_SUM", textWeight: 0.2, vectorWeight: 0.8, vectorselectCountMultiplier: 10, }); const resultFive = await vectorStore.similaritySearch( "rainstorm in parched desert, rain", 1 ); console.log(resultFive[0].pageContent); await vectorStore.end(); };
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/closevector_fromdocs.ts
// If you want to import the browser version, use the following line instead: // import { CloseVectorWeb } from "@langchain/community/vectorstores/closevector/web"; import { CloseVectorNode } from "@langchain/community/vectorstores/closevector/node"; import { OpenAIEmbeddings } from "@langchain/openai"; import { TextLoader } from "langchain/document_loaders/fs/text"; // Create docs with a loader const loader = new TextLoader("src/document_loaders/example_data/example.txt"); const docs = await loader.load(); // Load the docs into the vector store // If you want to import the browser version, use the following line instead: // const vectorStore = await CloseVectorWeb.fromDocuments( const vectorStore = await CloseVectorNode.fromDocuments( docs, new OpenAIEmbeddings() ); // Search for the most similar document const resultOne = await vectorStore.similaritySearch("hello world", 1); console.log(resultOne);
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/supabase_with_maximum_marginal_relevance.ts
import { SupabaseVectorStore } from "@langchain/community/vectorstores/supabase"; import { OpenAIEmbeddings } from "@langchain/openai"; import { createClient } from "@supabase/supabase-js"; // First, follow set-up instructions at // https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase const privateKey = process.env.SUPABASE_PRIVATE_KEY; if (!privateKey) throw new Error(`Expected env var SUPABASE_PRIVATE_KEY`); const url = process.env.SUPABASE_URL; if (!url) throw new Error(`Expected env var SUPABASE_URL`); export const run = async () => { const client = createClient(url, privateKey); const vectorStore = await SupabaseVectorStore.fromTexts( ["Hello world", "Bye bye", "What's this?"], [{ id: 2 }, { id: 1 }, { id: 3 }], new OpenAIEmbeddings(), { client, tableName: "documents", queryName: "match_documents", } ); const resultOne = await vectorStore.maxMarginalRelevanceSearch( "Hello world", { k: 1 } ); console.log(resultOne); };
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/singlestore_with_metadata_filter.ts
import { SingleStoreVectorStore } from "@langchain/community/vectorstores/singlestore"; import { OpenAIEmbeddings } from "@langchain/openai"; export const run = async () => { const vectorStore = await SingleStoreVectorStore.fromTexts( ["Good afternoon", "Bye bye", "Boa tarde!", "Até logo!"], [ { id: 1, language: "English" }, { id: 2, language: "English" }, { id: 3, language: "Portugese" }, { id: 4, language: "Portugese" }, ], new OpenAIEmbeddings(), { connectionOptions: { host: process.env.SINGLESTORE_HOST, port: Number(process.env.SINGLESTORE_PORT), user: process.env.SINGLESTORE_USERNAME, password: process.env.SINGLESTORE_PASSWORD, database: process.env.SINGLESTORE_DATABASE, }, distanceMetric: "EUCLIDEAN_DISTANCE", } ); const resultOne = await vectorStore.similaritySearch("greetings", 1, { language: "Portugese", }); console.log(resultOne); await vectorStore.end(); };
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/closevector_saveload.ts
// If you want to import the browser version, use the following line instead: // import { CloseVectorWeb } from "@langchain/community/vectorstores/closevector/web"; import { CloseVectorNode } from "@langchain/community/vectorstores/closevector/node"; import { OpenAIEmbeddings } from "@langchain/openai"; // Create a vector store through any method, here from texts as an example // If you want to import the browser version, use the following line instead: // const vectorStore = await CloseVectorWeb.fromTexts( const vectorStore = await CloseVectorNode.fromTexts( ["Hello world", "Bye bye", "hello nice world"], [{ id: 2 }, { id: 1 }, { id: 3 }], new OpenAIEmbeddings() ); // Save the vector store to a directory const directory = "your/directory/here"; await vectorStore.save(directory); // Load the vector store from the same directory // If you want to import the browser version, use the following line instead: // const loadedVectorStore = await CloseVectorWeb.load( const loadedVectorStore = await CloseVectorNode.load( directory, new OpenAIEmbeddings() ); // vectorStore and loadedVectorStore are identical const result = await loadedVectorStore.similaritySearch("hello world", 1); console.log(result);
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/mongodb_metadata_filtering.ts
import { MongoDBAtlasVectorSearch } from "@langchain/mongodb"; import { CohereEmbeddings } from "@langchain/cohere"; import { MongoClient } from "mongodb"; import { sleep } from "langchain/util/time"; const client = new MongoClient(process.env.MONGODB_ATLAS_URI || ""); const namespace = "langchain.test"; const [dbName, collectionName] = namespace.split("."); const collection = client.db(dbName).collection(collectionName); const vectorStore = new MongoDBAtlasVectorSearch( new CohereEmbeddings({ model: "embed-english-v3.0" }), { collection, indexName: "default", // The name of the Atlas search index. Defaults to "default" textKey: "text", // The name of the collection field containing the raw content. Defaults to "text" embeddingKey: "embedding", // The name of the collection field containing the embedded text. Defaults to "embedding" } ); await vectorStore.addDocuments([ { pageContent: "Hey hey hey", metadata: { docstore_document_id: "somevalue" }, }, ]); const retriever = vectorStore.asRetriever({ filter: { preFilter: { docstore_document_id: { $eq: "somevalue", }, }, }, }); // Mongo has a slight processing delay between ingest and availability await sleep(2000); const results = await retriever.invoke("goodbye"); console.log(results); await client.close();
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/faiss.ts
import { FaissStore } from "@langchain/community/vectorstores/faiss"; import { OpenAIEmbeddings } from "@langchain/openai"; export const run = async () => { const vectorStore = await FaissStore.fromTexts( ["Hello world", "Bye bye", "hello nice world"], [{ id: 2 }, { id: 1 }, { id: 3 }], new OpenAIEmbeddings() ); const resultOne = await vectorStore.similaritySearch("hello world", 1); console.log(resultOne); };
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/mongodb_atlas_search.ts
import { MongoDBAtlasVectorSearch } from "@langchain/mongodb"; import { CohereEmbeddings } from "@langchain/cohere"; import { MongoClient } from "mongodb"; const client = new MongoClient(process.env.MONGODB_ATLAS_URI || ""); const namespace = "langchain.test"; const [dbName, collectionName] = namespace.split("."); const collection = client.db(dbName).collection(collectionName); const vectorStore = new MongoDBAtlasVectorSearch( new CohereEmbeddings({ model: "embed-english-v3.0" }), { collection, indexName: "default", // The name of the Atlas search index. Defaults to "default" textKey: "text", // The name of the collection field containing the raw content. Defaults to "text" embeddingKey: "embedding", // The name of the collection field containing the embedded text. Defaults to "embedding" } ); const resultOne = await vectorStore.similaritySearch("Hello world", 1); console.log(resultOne); await client.close();
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/memory.ts
import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { OpenAIEmbeddings } from "@langchain/openai"; const vectorStore = await MemoryVectorStore.fromTexts( ["Hello world", "Bye bye", "hello nice world"], [{ id: 2 }, { id: 1 }, { id: 3 }], new OpenAIEmbeddings() ); const resultOne = await vectorStore.similaritySearch("hello world", 1); console.log(resultOne); /* [ Document { pageContent: "Hello world", metadata: { id: 2 } } ] */
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/weaviate_delete.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ import weaviate, { ApiKey } from "weaviate-ts-client"; import { WeaviateStore } from "@langchain/weaviate"; import { OpenAIEmbeddings } from "@langchain/openai"; export async function run() { // Something wrong with the weaviate-ts-client types, so we need to disable const client = (weaviate as any).client({ scheme: process.env.WEAVIATE_SCHEME || "https", host: process.env.WEAVIATE_HOST || "localhost", apiKey: new ApiKey(process.env.WEAVIATE_API_KEY || "default"), }); // Create a store for an existing index const store = await WeaviateStore.fromExistingIndex(new OpenAIEmbeddings(), { client, indexName: "Test", metadataKeys: ["foo"], }); const docs = [{ pageContent: "see ya!", metadata: { foo: "bar" } }]; // Also supports an additional {ids: []} parameter for upsertion const ids = await store.addDocuments(docs); // Search the index without any filters const results = await store.similaritySearch("see ya!", 1); console.log(results); /* [ Document { pageContent: 'see ya!', metadata: { foo: 'bar' } } ] */ // Delete documents with ids await store.delete({ ids }); const results2 = await store.similaritySearch("see ya!", 1); console.log(results2); /* [] */ const docs2 = [ { pageContent: "hello world", metadata: { foo: "bar" } }, { pageContent: "hi there", metadata: { foo: "baz" } }, { pageContent: "how are you", metadata: { foo: "qux" } }, { pageContent: "hello world", metadata: { foo: "bar" } }, { pageContent: "bye now", metadata: { foo: "bar" } }, ]; await store.addDocuments(docs2); const results3 = await store.similaritySearch("hello world", 1); console.log(results3); /* [ Document { pageContent: 'hello world', metadata: { foo: 'bar' } } ] */ // delete documents with filter await store.delete({ filter: { where: { operator: "Equal", path: ["foo"], valueText: "bar", }, }, }); const results4 = await store.similaritySearch("hello world", 1, { where: { operator: "Equal", path: ["foo"], valueText: "bar", }, }); console.log(results4); /* [] */ }
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/myscale_fromTexts.ts
import { MyScaleStore } from "@langchain/community/vectorstores/myscale"; import { OpenAIEmbeddings } from "@langchain/openai"; const vectorStore = await MyScaleStore.fromTexts( ["Hello world", "Bye bye", "hello nice world"], [ { id: 2, name: "2" }, { id: 1, name: "1" }, { id: 3, name: "3" }, ], new OpenAIEmbeddings(), { host: process.env.MYSCALE_HOST || "localhost", port: process.env.MYSCALE_PORT || "8443", username: process.env.MYSCALE_USERNAME || "username", password: process.env.MYSCALE_PASSWORD || "password", database: "default", // defaults to "default" table: "your_table", // defaults to "vector_table" } ); const results = await vectorStore.similaritySearch("hello world", 1); console.log(results); const filteredResults = await vectorStore.similaritySearch("hello world", 1, { whereStr: "metadata.name = '1'", }); console.log(filteredResults);
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/memory_fromdocs.ts
import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { OpenAIEmbeddings } from "@langchain/openai"; import { TextLoader } from "langchain/document_loaders/fs/text"; // Create docs with a loader const loader = new TextLoader("src/document_loaders/example_data/example.txt"); const docs = await loader.load(); // Load the docs into the vector store const vectorStore = await MemoryVectorStore.fromDocuments( docs, new OpenAIEmbeddings() ); // Search for the most similar document const resultOne = await vectorStore.similaritySearch("hello world", 1); console.log(resultOne); /* [ Document { pageContent: "Hello world", metadata: { id: 2 } } ] */
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/hnswlib_saveload.ts
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { OpenAIEmbeddings } from "@langchain/openai"; // Create a vector store through any method, here from texts as an example const vectorStore = await HNSWLib.fromTexts( ["Hello world", "Bye bye", "hello nice world"], [{ id: 2 }, { id: 1 }, { id: 3 }], new OpenAIEmbeddings() ); // Save the vector store to a directory const directory = "your/directory/here"; await vectorStore.save(directory); // Load the vector store from the same directory const loadedVectorStore = await HNSWLib.load(directory, new OpenAIEmbeddings()); // vectorStore and loadedVectorStore are identical const result = await loadedVectorStore.similaritySearch("hello world", 1); console.log(result);
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/astra.ts
import { OpenAIEmbeddings } from "@langchain/openai"; import { AstraDBVectorStore, AstraLibArgs, } from "@langchain/community/vectorstores/astradb"; const astraConfig: AstraLibArgs = { token: process.env.ASTRA_DB_APPLICATION_TOKEN as string, endpoint: process.env.ASTRA_DB_ENDPOINT as string, collection: process.env.ASTRA_DB_COLLECTION ?? "langchain_test", collectionOptions: { vector: { dimension: 1536, metric: "cosine", }, }, }; const vectorStore = await AstraDBVectorStore.fromTexts( [ "AstraDB is built on Apache Cassandra", "AstraDB is a NoSQL DB", "AstraDB supports vector search", ], [{ foo: "foo" }, { foo: "bar" }, { foo: "baz" }], new OpenAIEmbeddings(), astraConfig ); // Querying docs: const results = await vectorStore.similaritySearch("Cassandra", 1); // or filtered query: const filteredQueryResults = await vectorStore.similaritySearch("A", 1, { foo: "bar", });
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/clickhouse_fromTexts.ts
import { ClickHouseStore } from "@langchain/community/vectorstores/clickhouse"; import { OpenAIEmbeddings } from "@langchain/openai"; // Initialize ClickHouse store from texts const vectorStore = await ClickHouseStore.fromTexts( ["Hello world", "Bye bye", "hello nice world"], [ { id: 2, name: "2" }, { id: 1, name: "1" }, { id: 3, name: "3" }, ], new OpenAIEmbeddings(), { host: process.env.CLICKHOUSE_HOST || "localhost", port: process.env.CLICKHOUSE_PORT || 8443, username: process.env.CLICKHOUSE_USER || "username", password: process.env.CLICKHOUSE_PASSWORD || "password", database: process.env.CLICKHOUSE_DATABASE || "default", table: process.env.CLICKHOUSE_TABLE || "vector_table", } ); // Sleep 1 second to ensure that the search occurs after the successful insertion of data. // eslint-disable-next-line no-promise-executor-return await new Promise((resolve) => setTimeout(resolve, 1000)); // Perform similarity search without filtering const results = await vectorStore.similaritySearch("hello world", 1); console.log(results); // Perform similarity search with filtering const filteredResults = await vectorStore.similaritySearch("hello world", 1, { whereStr: "metadata.name = '1'", }); console.log(filteredResults);
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/weaviate_mmr.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ import weaviate, { ApiKey } from "weaviate-ts-client"; import { WeaviateStore } from "@langchain/weaviate"; import { OpenAIEmbeddings } from "@langchain/openai"; export async function run() { // Something wrong with the weaviate-ts-client types, so we need to disable const client = (weaviate as any).client({ scheme: process.env.WEAVIATE_SCHEME || "https", host: process.env.WEAVIATE_HOST || "localhost", apiKey: new ApiKey(process.env.WEAVIATE_API_KEY || "default"), }); // Create a store for an existing index const store = await WeaviateStore.fromExistingIndex(new OpenAIEmbeddings(), { client, indexName: "Test", metadataKeys: ["foo"], }); const resultOne = await store.maxMarginalRelevanceSearch("Hello world", { k: 1, }); console.log(resultOne); }
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/vectara.ts
import { VectaraStore } from "@langchain/community/vectorstores/vectara"; import { VectaraSummaryRetriever } from "@langchain/community/retrievers/vectara_summary"; import { Document } from "@langchain/core/documents"; // Create the Vectara store. const store = new VectaraStore({ customerId: Number(process.env.VECTARA_CUSTOMER_ID), corpusId: Number(process.env.VECTARA_CORPUS_ID), apiKey: String(process.env.VECTARA_API_KEY), verbose: true, }); // Add two documents with some metadata. const doc_ids = await store.addDocuments([ new Document({ pageContent: "Do I dare to eat a peach?", metadata: { foo: "baz", }, }), new Document({ pageContent: "In the room the women come and go talking of Michelangelo", metadata: { foo: "bar", }, }), ]); // Perform a similarity search. const resultsWithScore = await store.similaritySearchWithScore( "What were the women talking about?", 1, { lambda: 0.025, } ); // Print the results. console.log(JSON.stringify(resultsWithScore, null, 2)); /* [ [ { "pageContent": "In the room the women come and go talking of Michelangelo", "metadata": { "lang": "eng", "offset": "0", "len": "57", "foo": "bar" } }, 0.4678752 ] ] */ const retriever = new VectaraSummaryRetriever({ vectara: store, topK: 3 }); const documents = await retriever.invoke("What were the women talking about?"); console.log(JSON.stringify(documents, null, 2)); /* [ { "pageContent": "<b>In the room the women come and go talking of Michelangelo</b>", "metadata": { "lang": "eng", "offset": "0", "len": "57", "foo": "bar" } }, { "pageContent": "<b>In the room the women come and go talking of Michelangelo</b>", "metadata": { "lang": "eng", "offset": "0", "len": "57", "foo": "bar" } }, { "pageContent": "<b>In the room the women come and go talking of Michelangelo</b>", "metadata": { "lang": "eng", "offset": "0", "len": "57", "foo": "bar" } } ] */ // Delete the documents. await store.deleteDocuments(doc_ids);
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/analyticdb.ts
import { AnalyticDBVectorStore } from "@langchain/community/vectorstores/analyticdb"; import { OpenAIEmbeddings } from "@langchain/openai"; const connectionOptions = { host: process.env.ANALYTICDB_HOST || "localhost", port: Number(process.env.ANALYTICDB_PORT) || 5432, database: process.env.ANALYTICDB_DATABASE || "your_database", user: process.env.ANALYTICDB_USERNAME || "username", password: process.env.ANALYTICDB_PASSWORD || "password", }; const vectorStore = await AnalyticDBVectorStore.fromTexts( ["foo", "bar", "baz"], [{ page: 1 }, { page: 2 }, { page: 3 }], new OpenAIEmbeddings(), { connectionOptions } ); const result = await vectorStore.similaritySearch("foo", 1); console.log(JSON.stringify(result)); // [{"pageContent":"foo","metadata":{"page":1}}] await vectorStore.addDocuments([{ pageContent: "foo", metadata: { page: 4 } }]); const filterResult = await vectorStore.similaritySearch("foo", 1, { page: 4, }); console.log(JSON.stringify(filterResult)); // [{"pageContent":"foo","metadata":{"page":4}}] const filterWithScoreResult = await vectorStore.similaritySearchWithScore( "foo", 1, { page: 3 } ); console.log(JSON.stringify(filterWithScoreResult)); // [[{"pageContent":"baz","metadata":{"page":3}},0.26075905561447144]] const filterNoMatchResult = await vectorStore.similaritySearchWithScore( "foo", 1, { page: 5 } ); console.log(JSON.stringify(filterNoMatchResult)); // [] // need to manually close the Connection pool await vectorStore.end();
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/rockset.ts
import * as rockset from "@rockset/client"; import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { RocksetStore } from "@langchain/community/vectorstores/rockset"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import { readFileSync } from "fs"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { createStuffDocumentsChain } from "langchain/chains/combine_documents"; import { createRetrievalChain } from "langchain/chains/retrieval"; const store = await RocksetStore.withNewCollection(new OpenAIEmbeddings(), { client: rockset.default.default( process.env.ROCKSET_API_KEY ?? "", `https://api.${process.env.ROCKSET_API_REGION ?? "usw2a1"}.rockset.com` ), collectionName: "langchain_demo", }); const model = new ChatOpenAI({ model: "gpt-3.5-turbo-1106" }); const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([ [ "system", "Answer the user's questions based on the below context:\n\n{context}", ], ["human", "{input}"], ]); const combineDocsChain = await createStuffDocumentsChain({ llm: model, prompt: questionAnsweringPrompt, }); const chain = await createRetrievalChain({ retriever: store.asRetriever(), combineDocsChain, }); const text = readFileSync("state_of_the_union.txt", "utf8"); const docs = await new RecursiveCharacterTextSplitter().createDocuments([text]); await store.addDocuments(docs); const response = await chain.invoke({ input: "When was America founded?", }); console.log(response.answer); await store.destroy();
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/hnswlib.ts
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { OpenAIEmbeddings } from "@langchain/openai"; const vectorStore = await HNSWLib.fromTexts( ["Hello world", "Bye bye", "hello nice world"], [{ id: 2 }, { id: 1 }, { id: 3 }], new OpenAIEmbeddings() ); const resultOne = await vectorStore.similaritySearch("hello world", 1); console.log(resultOne);
0
lc_public_repos/langchainjs/examples/src/indexes
lc_public_repos/langchainjs/examples/src/indexes/vector_stores/faiss_delete.ts
import { FaissStore } from "@langchain/community/vectorstores/faiss"; import { OpenAIEmbeddings } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; const vectorStore = new FaissStore(new OpenAIEmbeddings(), {}); const ids = ["2", "1", "4"]; const idsReturned = await vectorStore.addDocuments( [ new Document({ pageContent: "my world", metadata: { tag: 2 }, }), new Document({ pageContent: "our world", metadata: { tag: 1 }, }), new Document({ pageContent: "your world", metadata: { tag: 4 }, }), ], { ids, } ); console.log(idsReturned); /* [ '2', '1', '4' ] */ const docs = await vectorStore.similaritySearch("my world", 3); console.log(docs); /* [ Document { pageContent: 'my world', metadata: { tag: 2 } }, Document { pageContent: 'your world', metadata: { tag: 4 } }, Document { pageContent: 'our world', metadata: { tag: 1 } } ] */ await vectorStore.delete({ ids: [ids[0], ids[1]] }); const docs2 = await vectorStore.similaritySearch("my world", 3); console.log(docs2); /* [ Document { pageContent: 'your world', metadata: { tag: 4 } } ] */