index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/stores/upstash_redis_storage.ts
import { Redis } from "@upstash/redis"; import { UpstashRedisStore } from "@langchain/community/storage/upstash_redis"; import { AIMessage, HumanMessage } from "@langchain/core/messages"; // Pro tip: define a helper function for getting your client // along with handling the case where your environment variables // are not set. const getClient = () => { if ( !process.env.UPSTASH_REDIS_REST_URL || !process.env.UPSTASH_REDIS_REST_TOKEN ) { throw new Error( "UPSTASH_REDIS_REST_URL and UPSTASH_REDIS_REST_TOKEN must be set in the environment" ); } const client = new Redis({ url: process.env.UPSTASH_REDIS_REST_URL, token: process.env.UPSTASH_REDIS_REST_TOKEN, }); return client; }; // Define the client and store const client = getClient(); const store = new UpstashRedisStore({ client, }); // Define our encoder/decoder for converting between strings and Uint8Arrays const encoder = new TextEncoder(); const decoder = new TextDecoder(); /** * Here you would define your LLM and chat chain, call * the LLM and eventually get a list of messages. * For this example, we'll assume we already have a list. */ const messages = Array.from({ length: 5 }).map((_, index) => { if (index % 2 === 0) { return new AIMessage("ai stuff..."); } return new HumanMessage("human stuff..."); }); // Set your messages in the store // The key will be prefixed with `message:id:` and end // with the index. await store.mset( messages.map((message, index) => [ `message:id:${index}`, encoder.encode(JSON.stringify(message)), ]) ); // Now you can get your messages from the store const retrievedMessages = await store.mget(["message:id:0", "message:id:1"]); // Make sure to decode the values console.log(retrievedMessages.map((v) => decoder.decode(v))); /** [ '{"id":["langchain","AIMessage"],"kwargs":{"content":"ai stuff..."}}', '{"id":["langchain","HumanMessage"],"kwargs":{"content":"human stuff..."}}' ] */ // Or, if you want to get back all the keys you can call // the `yieldKeys` method. // Optionally, you can pass a key prefix to only get back // keys which match that prefix. const yieldedKeys = []; for await (const key of store.yieldKeys("message:id")) { yieldedKeys.push(key); } // The keys are not encoded, so no decoding is necessary console.log(yieldedKeys); /** [ 'message:id:2', 'message:id:1', 'message:id:3', 'message:id:0', 'message:id:4' ] */ // Finally, let's delete the keys from the store await store.mdelete(yieldedKeys);
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/stores/file_system_storage.ts
import fs from "fs"; import { LocalFileStore } from "langchain/storage/file_system"; import { AIMessage, HumanMessage } from "@langchain/core/messages"; // Instantiate the store using the `fromPath` method. const store = await LocalFileStore.fromPath("./messages"); // Define our encoder/decoder for converting between strings and Uint8Arrays const encoder = new TextEncoder(); const decoder = new TextDecoder(); /** * Here you would define your LLM and chat chain, call * the LLM and eventually get a list of messages. * For this example, we'll assume we already have a list. */ const messages = Array.from({ length: 5 }).map((_, index) => { if (index % 2 === 0) { return new AIMessage("ai stuff..."); } return new HumanMessage("human stuff..."); }); // Set your messages in the store // The key will be prefixed with `message:id:` and end // with the index. await store.mset( messages.map((message, index) => [ `message:id:${index}`, encoder.encode(JSON.stringify(message)), ]) ); // Now you can get your messages from the store const retrievedMessages = await store.mget(["message:id:0", "message:id:1"]); // Make sure to decode the values console.log(retrievedMessages.map((v) => decoder.decode(v))); /** [ '{"id":["langchain","AIMessage"],"kwargs":{"content":"ai stuff..."}}', '{"id":["langchain","HumanMessage"],"kwargs":{"content":"human stuff..."}}' ] */ // Or, if you want to get back all the keys you can call // the `yieldKeys` method. // Optionally, you can pass a key prefix to only get back // keys which match that prefix. const yieldedKeys = []; for await (const key of store.yieldKeys("message:id:")) { yieldedKeys.push(key); } // The keys are not encoded, so no decoding is necessary console.log(yieldedKeys); /** [ 'message:id:2', 'message:id:1', 'message:id:3', 'message:id:0', 'message:id:4' ] */ // Finally, let's delete the keys from the store // and delete the file. await store.mdelete(yieldedKeys); await fs.promises.rm("./messages", { recursive: true, force: true });
0
lc_public_repos/langchainjs/examples/src/caches
lc_public_repos/langchainjs/examples/src/caches/azure_cosmosdb_nosql/azure_cosmosdb_nosql.ts
import { AzureCosmosDBNoSQLConfig, AzureCosmosDBNoSQLSemanticCache, } from "@langchain/azure-cosmosdb"; import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; const embeddings = new OpenAIEmbeddings(); const config: AzureCosmosDBNoSQLConfig = { databaseName: "<DATABASE_NAME>", containerName: "<CONTAINER_NAME>", // use endpoint to initiate client with managed identity connectionString: "<CONNECTION_STRING>", }; /** * Sets the threshold similarity score for returning cached results based on vector distance. * Cached output is returned only if the similarity score meets or exceeds this threshold; * otherwise, a new result is generated. Default is 0.6, adjustable via the constructor * to suit various distance functions and use cases. * (see: https://learn.microsoft.com/azure/cosmos-db/nosql/query/vectordistance). */ const similarityScoreThreshold = 0.5; const cache = new AzureCosmosDBNoSQLSemanticCache( embeddings, config, similarityScoreThreshold ); const model = new ChatOpenAI({ cache }); // Invoke the model to perform an action const response1 = await model.invoke("Do something random!"); console.log(response1); /* AIMessage { content: "Sure! I'll generate a random number for you: 37", additional_kwargs: {} } */ const response2 = await model.invoke("Do something random!"); console.log(response2); /* AIMessage { content: "Sure! I'll generate a random number for you: 37", additional_kwargs: {} } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/structured_parser.ts
import { OpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; // With a `StructuredOutputParser` we can define a schema for the output. const parser = StructuredOutputParser.fromNamesAndDescriptions({ answer: "answer to the user's question", source: "source used to answer the user's question, should be a website.", }); const formatInstructions = parser.getFormatInstructions(); const prompt = new PromptTemplate({ template: "Answer the users question as best as possible.\n{format_instructions}\n{question}", inputVariables: ["question"], partialVariables: { format_instructions: formatInstructions }, }); const model = new OpenAI({ temperature: 0 }); const input = await prompt.format({ question: "What is the capital of France?", }); const response = await model.invoke(input); console.log(input); /* Answer the users question as best as possible. You must format your output as a JSON value that adheres to a given "JSON Schema" instance. "JSON Schema" is a declarative language that allows you to annotate and validate JSON documents. For example, the example "JSON Schema" instance {{"properties": {{"foo": {{"description": "a list of test words", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}} would match an object with one required property, "foo". The "type" property specifies "foo" must be an "array", and the "description" property semantically describes it as "a list of test words". The items within "foo" must be strings. Thus, the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of this example "JSON Schema". The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. Your output will be parsed and type-checked according to the provided schema instance, so make sure all fields in your output match the schema exactly and there are no trailing commas! Here is the JSON Schema instance your output must adhere to. Include the enclosing markdown codeblock: ```json {"type":"object","properties":{"answer":{"type":"string","description":"answer to the user's question"},"source":{"type":"string","description":"source used to answer the user's question, should be a website."}},"required":["answer","source"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"} ``` What is the capital of France? */ console.log(response); /* {"answer": "Paris", "source": "https://en.wikipedia.org/wiki/Paris"} */ console.log(await parser.parse(response)); // { answer: 'Paris', source: 'https://en.wikipedia.org/wiki/Paris' }
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/bytes_output_parser_sequence.ts
import { ChatOpenAI } from "@langchain/openai"; import { BytesOutputParser } from "@langchain/core/output_parsers"; import { RunnableSequence } from "@langchain/core/runnables"; const chain = RunnableSequence.from([ new ChatOpenAI({ temperature: 0 }), new BytesOutputParser(), ]); const stream = await chain.stream("Hello there!"); const decoder = new TextDecoder(); for await (const chunk of stream) { if (chunk) { console.log(decoder.decode(chunk)); } }
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/regex_parser.ts
import { OpenAI } from "@langchain/openai"; import { RegexParser } from "langchain/output_parsers"; import { PromptTemplate } from "@langchain/core/prompts"; export const run = async () => { const parser = new RegexParser( /Humor: ([0-9]+), Sophistication: (A|B|C|D|E)/, ["mark", "grade"], "noConfidence" ); const formatInstructions = parser.getFormatInstructions(); const prompt = new PromptTemplate({ template: "Grade the joke.\n\n{format_instructions}\n\nJoke: {joke}", inputVariables: ["joke"], partialVariables: { format_instructions: formatInstructions }, }); const model = new OpenAI({ temperature: 0 }); const input = await prompt.format({ joke: "What time is the appointment? Tooth hurt-y.", }); console.log(input); /* Grade the joke. Your response should match the following regex: /Humor: ([0-9]+), Sophistication: (A|B|C|D|E)/ Joke: What time is the appointment? Tooth hurt-y. */ const response = await model.invoke(input); console.log(response); /* Humor: 8, Sophistication: D */ };
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/fix_parser.ts
import { z } from "zod"; import { ChatOpenAI } from "@langchain/openai"; import { OutputFixingParser } from "langchain/output_parsers"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; export const run = async () => { const parser = StructuredOutputParser.fromZodSchema( z.object({ answer: z.string().describe("answer to the user's question"), sources: z .array(z.string()) .describe("sources used to answer the question, should be websites."), }) ); /** This is a bad output because sources is a string, not a list */ const badOutput = `\`\`\`json { "answer": "foo", "sources": "foo.com" } \`\`\``; try { await parser.parse(badOutput); } catch (e) { console.log("Failed to parse bad output: ", e); /* Failed to parse bad output: OutputParserException [Error]: Failed to parse. Text: ```json { "answer": "foo", "sources": "foo.com" } ```. Error: [ { "code": "invalid_type", "expected": "array", "received": "string", "path": [ "sources" ], "message": "Expected array, received string" } ] at StructuredOutputParser.parse (/Users/ankushgola/Code/langchainjs/langchain/src/output_parsers/structured.ts:71:13) at run (/Users/ankushgola/Code/langchainjs/examples/src/prompts/fix_parser.ts:25:18) at <anonymous> (/Users/ankushgola/Code/langchainjs/examples/src/index.ts:33:22) */ } const fixParser = OutputFixingParser.fromLLM( new ChatOpenAI({ temperature: 0 }), parser ); const output = await fixParser.parse(badOutput); console.log("Fixed output: ", output); // Fixed output: { answer: 'foo', sources: [ 'foo.com' ] } };
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/string_output_parser.ts
import { ChatOpenAI } from "@langchain/openai"; import { StringOutputParser } from "@langchain/core/output_parsers"; const parser = new StringOutputParser(); const model = new ChatOpenAI({ temperature: 0 }); const stream = await model.pipe(parser).stream("Hello there!"); for await (const chunk of stream) { console.log(chunk); } /* Hello ! How can I assist you today ? */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/custom_list_parser_sequence.ts
import { OpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { CustomListOutputParser } from "@langchain/core/output_parsers"; import { RunnableSequence } from "@langchain/core/runnables"; // With a `CustomListOutputParser`, we can parse a list with a specific length and separator. const parser = new CustomListOutputParser({ length: 3, separator: "\n" }); const chain = RunnableSequence.from([ PromptTemplate.fromTemplate( "Provide a list of {subject}.\n{format_instructions}" ), new OpenAI({ temperature: 0 }), parser, ]); /* Provide a list of great fiction books (book, author). Your response should be a list of 3 items separated by "\n" (eg: `foo\n bar\n baz`) */ const response = await chain.invoke({ subject: "great fiction books (book, author)", format_instructions: parser.getFormatInstructions(), }); console.log(response); /* [ 'The Catcher in the Rye, J.D. Salinger', 'To Kill a Mockingbird, Harper Lee', 'The Great Gatsby, F. Scott Fitzgerald' ] */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/xml_output_parser.ts
import { XMLOutputParser } from "@langchain/core/output_parsers"; const XML_EXAMPLE = `<?xml version="1.0" encoding="UTF-8"?> <userProfile> <userID>12345</userID> <name>John Doe</name> <email>john.doe@example.com</email> <roles> <role>Admin</role> <role>User</role> </roles> <preferences> <theme>Dark</theme> <notifications> <email>true</email> <sms>false</sms> </notifications> </preferences> </userProfile>`; const parser = new XMLOutputParser(); const result = await parser.invoke(XML_EXAMPLE); console.log(JSON.stringify(result, null, 2)); /* { "userProfile": [ { "userID": "12345" }, { "name": "John Doe" }, { "email": "john.doe@example.com" }, { "roles": [ { "role": "Admin" }, { "role": "User" } ] }, { "preferences": [ { "theme": "Dark" }, { "notifications": [ { "email": "true" }, { "sms": "false" } ] } ] } ] } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/semantic_similarity_example_selector_metadata_filtering.ts
// Ephemeral, in-memory vector store for demo purposes import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai"; import { PromptTemplate, FewShotPromptTemplate } from "@langchain/core/prompts"; import { Document } from "@langchain/core/documents"; import { SemanticSimilarityExampleSelector } from "@langchain/core/example_selectors"; const embeddings = new OpenAIEmbeddings(); const memoryVectorStore = new MemoryVectorStore(embeddings); const examples = [ { query: "healthy food", output: `lettuce`, food_type: "vegetable", }, { query: "healthy food", output: `schnitzel`, food_type: "veal", }, { query: "foo", output: `bar`, food_type: "baz", }, ]; const exampleSelector = new SemanticSimilarityExampleSelector({ vectorStore: memoryVectorStore, k: 2, // Only embed the "query" key of each example inputKeys: ["query"], // Filter type will depend on your specific vector store. // See the section of the docs for the specific vector store you are using. filter: (doc: Document) => doc.metadata.food_type === "vegetable", }); for (const example of examples) { // Format and add an example to the underlying vector store await exampleSelector.addExample(example); } // Create a prompt template that will be used to format the examples. const examplePrompt = PromptTemplate.fromTemplate(`<example> <user_input> {query} </user_input> <output> {output} </output> </example>`); // Create a FewShotPromptTemplate that will use the example selector. const dynamicPrompt = new FewShotPromptTemplate({ // We provide an ExampleSelector instead of examples. exampleSelector, examplePrompt, prefix: `Answer the user's question, using the below examples as reference:`, suffix: "User question:\n{query}", inputVariables: ["query"], }); const model = new ChatOpenAI({}); const chain = dynamicPrompt.pipe(model); const result = await chain.invoke({ query: "What is exactly one type of healthy food?", }); console.log(result); /* AIMessage { content: 'One type of healthy food is lettuce.', additional_kwargs: { function_call: undefined } } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/json_structured_output_parser.ts
import { ChatOpenAI } from "@langchain/openai"; import { HumanMessage } from "@langchain/core/messages"; import { JsonOutputFunctionsParser } from "@langchain/core/output_parsers/openai_functions"; // Instantiate the parser const parser = new JsonOutputFunctionsParser(); // Define the function schema const extractionFunctionSchema = { name: "extractor", description: "Extracts fields from the input.", parameters: { type: "object", properties: { tone: { type: "string", enum: ["positive", "negative"], description: "The overall tone of the input", }, word_count: { type: "number", description: "The number of words in the input", }, chat_response: { type: "string", description: "A response to the human's input", }, }, required: ["tone", "word_count", "chat_response"], }, }; // Instantiate the ChatOpenAI class const model = new ChatOpenAI({ model: "gpt-4" }); // Create a new runnable, bind the function to the model, and pipe the output through the parser const runnable = model .bind({ functions: [extractionFunctionSchema], function_call: { name: "extractor" }, }) .pipe(parser); // Invoke the runnable with an input const result = await runnable.invoke([ new HumanMessage("What a beautiful day!"), ]); console.log({ result }); /** { result: { tone: 'positive', word_count: 4, chat_response: "Indeed, it's a lovely day!" } } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/custom_list_parser.ts
import { OpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { CustomListOutputParser } from "@langchain/core/output_parsers"; // With a `CustomListOutputParser`, we can parse a list with a specific length and separator. const parser = new CustomListOutputParser({ length: 3, separator: "\n" }); const formatInstructions = parser.getFormatInstructions(); const prompt = new PromptTemplate({ template: "Provide a list of {subject}.\n{format_instructions}", inputVariables: ["subject"], partialVariables: { format_instructions: formatInstructions }, }); const model = new OpenAI({ temperature: 0 }); const input = await prompt.format({ subject: "great fiction books (book, author)", }); const response = await model.invoke(input); console.log(input); /* Provide a list of great fiction books (book, author). Your response should be a list of 3 items separated by "\n" (eg: `foo\n bar\n baz`) */ console.log(response); /* The Catcher in the Rye, J.D. Salinger To Kill a Mockingbird, Harper Lee The Great Gatsby, F. Scott Fitzgerald */ console.log(await parser.parse(response)); /* [ 'The Catcher in the Rye, J.D. Salinger', 'To Kill a Mockingbird, Harper Lee', 'The Great Gatsby, F. Scott Fitzgerald' ] */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/pipeline_prompt.ts
import { PromptTemplate, PipelinePromptTemplate, } from "@langchain/core/prompts"; const fullPrompt = PromptTemplate.fromTemplate(`{introduction} {example} {start}`); const introductionPrompt = PromptTemplate.fromTemplate( `You are impersonating {person}.` ); const examplePrompt = PromptTemplate.fromTemplate(`Here's an example of an interaction: Q: {example_q} A: {example_a}`); const startPrompt = PromptTemplate.fromTemplate(`Now, do this for real! Q: {input} A:`); const composedPrompt = new PipelinePromptTemplate({ pipelinePrompts: [ { name: "introduction", prompt: introductionPrompt, }, { name: "example", prompt: examplePrompt, }, { name: "start", prompt: startPrompt, }, ], finalPrompt: fullPrompt, }); const formattedPrompt = await composedPrompt.format({ person: "Elon Musk", example_q: `What's your favorite car?`, example_a: "Telsa", input: `What's your favorite social media site?`, }); console.log(formattedPrompt); /* You are impersonating Elon Musk. Here's an example of an interaction: Q: What's your favorite car? A: Telsa Now, do this for real! Q: What's your favorite social media site? A: */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/http_response_output_parser.ts
import { ChatOpenAI } from "@langchain/openai"; import { HttpResponseOutputParser } from "langchain/output_parsers"; const handler = async () => { const parser = new HttpResponseOutputParser(); const model = new ChatOpenAI({ temperature: 0 }); const stream = await model.pipe(parser).stream("Hello there!"); const httpResponse = new Response(stream, { headers: { "Content-Type": "text/plain; charset=utf-8", }, }); return httpResponse; }; await handler();
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/comma_list_parser.ts
import { OpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { CommaSeparatedListOutputParser } from "@langchain/core/output_parsers"; export const run = async () => { // With a `CommaSeparatedListOutputParser`, we can parse a comma separated list. const parser = new CommaSeparatedListOutputParser(); const formatInstructions = parser.getFormatInstructions(); const prompt = new PromptTemplate({ template: "List five {subject}.\n{format_instructions}", inputVariables: ["subject"], partialVariables: { format_instructions: formatInstructions }, }); const model = new OpenAI({ temperature: 0 }); const input = await prompt.format({ subject: "ice cream flavors" }); const response = await model.invoke(input); console.log(input); /* List five ice cream flavors. Your response should be a list of comma separated values, eg: `foo, bar, baz` */ console.log(response); // Vanilla, Chocolate, Strawberry, Mint Chocolate Chip, Cookies and Cream console.log(await parser.parse(response)); /* [ 'Vanilla', 'Chocolate', 'Strawberry', 'Mint Chocolate Chip', 'Cookies and Cream' ] */ };
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/use_with_llm_chain.ts
import { z } from "zod"; import { ChatOpenAI } from "@langchain/openai"; import { LLMChain } from "langchain/chains"; import { OutputFixingParser } from "langchain/output_parsers"; import { PromptTemplate } from "@langchain/core/prompts"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; const outputParser = StructuredOutputParser.fromZodSchema( z .array( z.object({ fields: z.object({ Name: z.string().describe("The name of the country"), Capital: z.string().describe("The country's capital"), }), }) ) .describe("An array of Airtable records, each representing a country") ); const chatModel = new ChatOpenAI({ model: "gpt-4", // Or gpt-3.5-turbo temperature: 0, // For best results with the output fixing parser }); const outputFixingParser = OutputFixingParser.fromLLM(chatModel, outputParser); // Don't forget to include formatting instructions in the prompt! const prompt = new PromptTemplate({ template: `Answer the user's question as best you can:\n{format_instructions}\n{query}`, inputVariables: ["query"], partialVariables: { format_instructions: outputFixingParser.getFormatInstructions(), }, }); const answerFormattingChain = new LLMChain({ llm: chatModel, prompt, outputKey: "records", // For readability - otherwise the chain output will default to a property named "text" outputParser: outputFixingParser, }); const result = await answerFormattingChain.invoke({ query: "List 5 countries.", }); console.log(JSON.stringify(result.records, null, 2)); /* [ { "fields": { "Name": "United States", "Capital": "Washington, D.C." } }, { "fields": { "Name": "Canada", "Capital": "Ottawa" } }, { "fields": { "Name": "Germany", "Capital": "Berlin" } }, { "fields": { "Name": "Japan", "Capital": "Tokyo" } }, { "fields": { "Name": "Australia", "Capital": "Canberra" } } ] */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/string_output_parser_sequence.ts
import { ChatOpenAI } from "@langchain/openai"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { RunnableSequence } from "@langchain/core/runnables"; const chain = RunnableSequence.from([ new ChatOpenAI({ temperature: 0 }), new StringOutputParser(), ]); const stream = await chain.stream("Hello there!"); for await (const chunk of stream) { console.log(chunk); }
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/semantic_similarity_example_selector.ts
import { OpenAIEmbeddings } from "@langchain/openai"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { PromptTemplate, FewShotPromptTemplate } from "@langchain/core/prompts"; import { SemanticSimilarityExampleSelector } from "@langchain/core/example_selectors"; // Create a prompt template that will be used to format the examples. const examplePrompt = PromptTemplate.fromTemplate( "Input: {input}\nOutput: {output}" ); // Create a SemanticSimilarityExampleSelector that will be used to select the examples. const exampleSelector = await SemanticSimilarityExampleSelector.fromExamples( [ { input: "happy", output: "sad" }, { input: "tall", output: "short" }, { input: "energetic", output: "lethargic" }, { input: "sunny", output: "gloomy" }, { input: "windy", output: "calm" }, ], new OpenAIEmbeddings(), HNSWLib, { k: 1 } ); // Create a FewShotPromptTemplate that will use the example selector. const dynamicPrompt = new FewShotPromptTemplate({ // We provide an ExampleSelector instead of examples. exampleSelector, examplePrompt, prefix: "Give the antonym of every input", suffix: "Input: {adjective}\nOutput:", inputVariables: ["adjective"], }); // Input is about the weather, so should select eg. the sunny/gloomy example console.log(await dynamicPrompt.format({ adjective: "rainy" })); /* Give the antonym of every input Input: sunny Output: gloomy Input: rainy Output: */ // Input is a measurement, so should select the tall/short example console.log(await dynamicPrompt.format({ adjective: "large" })); /* Give the antonym of every input Input: tall Output: short Input: large Output: */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/http_response_output_parser_event_stream.ts
import { ChatOpenAI } from "@langchain/openai"; import { HttpResponseOutputParser } from "langchain/output_parsers"; const handler = async () => { const parser = new HttpResponseOutputParser({ contentType: "text/event-stream", }); const model = new ChatOpenAI({ temperature: 0 }); // Values are stringified to avoid dealing with newlines and should // be parsed with `JSON.parse()` when consuming. const stream = await model.pipe(parser).stream("Hello there!"); const httpResponse = new Response(stream, { headers: { "Content-Type": "text/event-stream", }, }); return httpResponse; }; await handler();
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/combining_parser_sequence.ts
import { OpenAI } from "@langchain/openai"; import { RegexParser, CombiningOutputParser } from "langchain/output_parsers"; import { PromptTemplate } from "@langchain/core/prompts"; import { RunnableSequence } from "@langchain/core/runnables"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; const answerParser = StructuredOutputParser.fromNamesAndDescriptions({ answer: "answer to the user's question", source: "source used to answer the user's question, should be a website.", }); const confidenceParser = new RegexParser( /Confidence: (A|B|C), Explanation: (.*)/, ["confidence", "explanation"], "noConfidence" ); const parser = new CombiningOutputParser(answerParser, confidenceParser); const chain = RunnableSequence.from([ PromptTemplate.fromTemplate( "Answer the users question as best as possible.\n{format_instructions}\n{question}" ), new OpenAI({ temperature: 0 }), parser, ]); /* Answer the users question as best as possible. Return the following outputs, each formatted as described below: Output 1: The output should be formatted as a JSON instance that conforms to the JSON schema below. As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}} the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. Here is the output schema: ``` {"type":"object","properties":{"answer":{"type":"string","description":"answer to the user's question"},"source":{"type":"string","description":"source used to answer the user's question, should be a website."}},"required":["answer","source"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"} ``` Output 2: Your response should match the following regex: /Confidence: (A|B|C), Explanation: (.*)/ What is the capital of France? */ const response = await chain.invoke({ question: "What is the capital of France?", format_instructions: parser.getFormatInstructions(), }); console.log(response); /* { answer: 'Paris', source: 'https://www.worldatlas.com/articles/what-is-the-capital-of-france.html', confidence: 'A', explanation: 'The capital of France is Paris.' } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/xml_output_parser_streaming.ts
import { XMLOutputParser } from "@langchain/core/output_parsers"; import { FakeStreamingLLM } from "@langchain/core/utils/testing"; const XML_EXAMPLE = `<?xml version="1.0" encoding="UTF-8"?> <userProfile> <userID>12345</userID> <roles> <role>Admin</role> <role>User</role> </roles> </userProfile>`; const parser = new XMLOutputParser(); // Define your LLM, in this example we'll use demo streaming LLM const streamingLLM = new FakeStreamingLLM({ responses: [XML_EXAMPLE], }).pipe(parser); // Pipe the parser to the LLM const stream = await streamingLLM.stream(XML_EXAMPLE); for await (const chunk of stream) { console.log(JSON.stringify(chunk, null, 2)); } /* {} { "userProfile": "" } { "userProfile": "\n" } { "userProfile": [ { "userID": "" } ] } { "userProfile": [ { "userID": "123" } ] } { "userProfile": [ { "userID": "12345" }, { "roles": "" } ] } { "userProfile": [ { "userID": "12345" }, { "roles": [ { "role": "A" } ] } ] } { "userProfile": [ { "userID": "12345" }, { "roles": [ { "role": "Admi" } ] } ] } { "userProfile": [ { "userID": "12345" }, { "roles": [ { "role": "Admin" }, { "role": "U" } ] } ] } { "userProfile": [ { "userID": "12345" }, { "roles": [ { "role": "Admin" }, { "role": "User" } ] } ] } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/structured_parser_sequence.ts
import { OpenAI } from "@langchain/openai"; import { RunnableSequence } from "@langchain/core/runnables"; import { PromptTemplate } from "@langchain/core/prompts"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; const parser = StructuredOutputParser.fromNamesAndDescriptions({ answer: "answer to the user's question", source: "source used to answer the user's question, should be a website.", }); const chain = RunnableSequence.from([ PromptTemplate.fromTemplate( "Answer the users question as best as possible.\n{format_instructions}\n{question}" ), new OpenAI({ temperature: 0 }), parser, ]); console.log(parser.getFormatInstructions()); /* Answer the users question as best as possible. You must format your output as a JSON value that adheres to a given "JSON Schema" instance. "JSON Schema" is a declarative language that allows you to annotate and validate JSON documents. For example, the example "JSON Schema" instance {{"properties": {{"foo": {{"description": "a list of test words", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}} would match an object with one required property, "foo". The "type" property specifies "foo" must be an "array", and the "description" property semantically describes it as "a list of test words". The items within "foo" must be strings. Thus, the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of this example "JSON Schema". The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. Your output will be parsed and type-checked according to the provided schema instance, so make sure all fields in your output match the schema exactly and there are no trailing commas! Here is the JSON Schema instance your output must adhere to. Include the enclosing markdown codeblock: ``` {"type":"object","properties":{"answer":{"type":"string","description":"answer to the user's question"},"sources":{"type":"array","items":{"type":"string"},"description":"sources used to answer the question, should be websites."}},"required":["answer","sources"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"} ``` What is the capital of France? */ const response = await chain.invoke({ question: "What is the capital of France?", format_instructions: parser.getFormatInstructions(), }); console.log(response); // { answer: 'Paris', source: 'https://en.wikipedia.org/wiki/Paris' }
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/structured_parser_zod_sequence.ts
import { z } from "zod"; import { OpenAI } from "@langchain/openai"; import { RunnableSequence } from "@langchain/core/runnables"; import { PromptTemplate } from "@langchain/core/prompts"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; // We can use zod to define a schema for the output using the `fromZodSchema` method of `StructuredOutputParser`. const parser = StructuredOutputParser.fromZodSchema( z.object({ answer: z.string().describe("answer to the user's question"), sources: z .array(z.string()) .describe("sources used to answer the question, should be websites."), }) ); const chain = RunnableSequence.from([ PromptTemplate.fromTemplate( "Answer the users question as best as possible.\n{format_instructions}\n{question}" ), new OpenAI({ temperature: 0 }), parser, ]); console.log(parser.getFormatInstructions()); /* Answer the users question as best as possible. You must format your output as a JSON value that adheres to a given "JSON Schema" instance. "JSON Schema" is a declarative language that allows you to annotate and validate JSON documents. For example, the example "JSON Schema" instance {{"properties": {{"foo": {{"description": "a list of test words", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}} would match an object with one required property, "foo". The "type" property specifies "foo" must be an "array", and the "description" property semantically describes it as "a list of test words". The items within "foo" must be strings. Thus, the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of this example "JSON Schema". The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. Your output will be parsed and type-checked according to the provided schema instance, so make sure all fields in your output match the schema exactly and there are no trailing commas! Here is the JSON Schema instance your output must adhere to. Include the enclosing markdown codeblock: ``` {"type":"object","properties":{"answer":{"type":"string","description":"answer to the user's question"},"sources":{"type":"array","items":{"type":"string"},"description":"sources used to answer the question, should be websites."}},"required":["answer","sources"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"} ``` What is the capital of France? */ const response = await chain.invoke({ question: "What is the capital of France?", format_instructions: parser.getFormatInstructions(), }); console.log(response); /* { answer: 'Paris', sources: [ 'https://en.wikipedia.org/wiki/Paris' ] } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/multi_modal_inline.ts
import { ChatPromptTemplate } from "@langchain/core/prompts"; import fs from "node:fs/promises"; const hotdogImage = await fs.readFile("hotdog.jpg"); // Convert the image to base64 const base64Image = hotdogImage.toString("base64"); const imageURL = "https://avatars.githubusercontent.com/u/126733545?s=200&v=4"; const multiModalPrompt = ChatPromptTemplate.fromMessages([ ["system", "You have 20:20 vision! Describe the user's image."], [ "human", [ { type: "image_url", image_url: { url: "{imageURL}", detail: "high", }, }, { type: "image_url", image_url: "data:image/jpeg;base64,{base64Image}", }, ], ], ]); const formattedPrompt = await multiModalPrompt.invoke({ imageURL, base64Image, }); console.log(JSON.stringify(formattedPrompt, null, 2)); /** { "kwargs": { "messages": [ { "kwargs": { "content": "You have 20:20 vision! Describe the user's image.", } }, { "kwargs": { "content": [ { "type": "image_url", "image_url": { "url": "https://avatars.githubusercontent.com/u/126733545?s=200&v=4", "detail": "high" } }, { "type": "image_url", "image_url": { "url": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEBLAEsAAD/4QBWRX...", } } ], } } ] } } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/comma_list_parser_sequence.ts
import { OpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { CommaSeparatedListOutputParser } from "@langchain/core/output_parsers"; import { RunnableSequence } from "@langchain/core/runnables"; export const run = async () => { // With a `CommaSeparatedListOutputParser`, we can parse a comma separated list. const parser = new CommaSeparatedListOutputParser(); const chain = RunnableSequence.from([ PromptTemplate.fromTemplate("List five {subject}.\n{format_instructions}"), new OpenAI({ temperature: 0 }), parser, ]); /* List five ice cream flavors. Your response should be a list of comma separated values, eg: `foo, bar, baz` */ const response = await chain.invoke({ subject: "ice cream flavors", format_instructions: parser.getFormatInstructions(), }); console.log(response); /* [ 'Vanilla', 'Chocolate', 'Strawberry', 'Mint Chocolate Chip', 'Cookies and Cream' ] */ };
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/prompt_value.ts
import { ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts"; export const run = async () => { const template = "What is a good name for a company that makes {product}?"; const promptA = new PromptTemplate({ template, inputVariables: ["product"] }); // The `formatPromptValue` method returns a `PromptValue` object that can be used to format the prompt as a string or a list of `ChatMessage` objects. const responseA = await promptA.formatPromptValue({ product: "colorful socks", }); const responseAString = responseA.toString(); console.log({ responseAString }); /* { responseAString: 'What is a good name for a company that makes colorful socks?' } */ const responseAMessages = responseA.toChatMessages(); console.log({ responseAMessages }); /* { responseAMessages: [ HumanMessage { text: 'What is a good name for a company that makes colorful socks?' } ] } */ const chatPrompt = ChatPromptTemplate.fromMessages([ SystemMessagePromptTemplate.fromTemplate( "You are a helpful assistant that translates {input_language} to {output_language}." ), HumanMessagePromptTemplate.fromTemplate("{text}"), ]); // `formatPromptValue` also works with `ChatPromptTemplate`. const responseB = await chatPrompt.formatPromptValue({ input_language: "English", output_language: "French", text: "I love programming.", }); const responseBString = responseB.toString(); console.log({ responseBString }); /* { responseBString: '[{"text":"You are a helpful assistant that translates English to French."},{"text":"I love programming."}]' } */ const responseBMessages = responseB.toChatMessages(); console.log({ responseBMessages }); /* { responseBMessages: [ SystemMessage { text: 'You are a helpful assistant that translates English to French.' }, HumanMessage { text: 'I love programming.' } ] } */ };
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/combining_parser.ts
import { OpenAI } from "@langchain/openai"; import { RegexParser, CombiningOutputParser } from "langchain/output_parsers"; import { PromptTemplate } from "@langchain/core/prompts"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; const answerParser = StructuredOutputParser.fromNamesAndDescriptions({ answer: "answer to the user's question", source: "source used to answer the user's question, should be a website.", }); const confidenceParser = new RegexParser( /Confidence: (A|B|C), Explanation: (.*)/, ["confidence", "explanation"], "noConfidence" ); const parser = new CombiningOutputParser(answerParser, confidenceParser); const formatInstructions = parser.getFormatInstructions(); const prompt = new PromptTemplate({ template: "Answer the users question as best as possible.\n{format_instructions}\n{question}", inputVariables: ["question"], partialVariables: { format_instructions: formatInstructions }, }); const model = new OpenAI({ temperature: 0 }); const input = await prompt.format({ question: "What is the capital of France?", }); const response = await model.invoke(input); console.log(input); /* Answer the users question as best as possible. Return the following outputs, each formatted as described below: Output 1: The output should be formatted as a JSON instance that conforms to the JSON schema below. As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}} the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. Here is the output schema: ``` {"type":"object","properties":{"answer":{"type":"string","description":"answer to the user's question"},"source":{"type":"string","description":"source used to answer the user's question, should be a website."}},"required":["answer","source"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"} ``` Output 2: Your response should match the following regex: /Confidence: (A|B|C), Explanation: (.*)/ What is the capital of France? */ console.log(response); /* Output 1: {"answer":"Paris","source":"https://www.worldatlas.com/articles/what-is-the-capital-of-france.html"} Output 2: Confidence: A, Explanation: The capital of France is Paris. */ console.log(await parser.parse(response)); /* { answer: 'Paris', source: 'https://www.worldatlas.com/articles/what-is-the-capital-of-france.html', confidence: 'A', explanation: 'The capital of France is Paris.' } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/datetime_parser.ts
import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { DatetimeOutputParser } from "langchain/output_parsers"; const parser = new DatetimeOutputParser(); const prompt = ChatPromptTemplate.fromTemplate(`Answer the users question: {question} {format_instructions}`); const promptWithInstructions = await prompt.partial({ format_instructions: parser.getFormatInstructions(), }); const model = new ChatOpenAI({ temperature: 0 }); const chain = promptWithInstructions.pipe(model).pipe(parser); const response = await chain.invoke({ question: "When was Chicago incorporated?", }); console.log(response, response instanceof Date); /* 1837-03-04T00:00:00.000Z, true */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/json_structured_output_parser_streaming.ts
import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { JsonOutputFunctionsParser } from "@langchain/core/output_parsers/openai_functions"; const schema = z.object({ setup: z.string().describe("The setup for the joke"), punchline: z.string().describe("The punchline to the joke"), }); const modelParams = { functions: [ { name: "joke", description: "A joke", parameters: zodToJsonSchema(schema), }, ], function_call: { name: "joke" }, }; const prompt = ChatPromptTemplate.fromTemplate( `tell me a long joke about {foo}` ); const model = new ChatOpenAI({ temperature: 0, }).bind(modelParams); const chain = prompt .pipe(model) .pipe(new JsonOutputFunctionsParser({ diff: true })); const stream = await chain.stream({ foo: "bears", }); // Stream a diff as JSON patch operations for await (const chunk of stream) { console.log(chunk); } /* [] [ { op: 'add', path: '/setup', value: '' } ] [ { op: 'replace', path: '/setup', value: 'Why' } ] [ { op: 'replace', path: '/setup', value: 'Why don' } ] [ { op: 'replace', path: '/setup', value: "Why don't" } ] [ { op: 'replace', path: '/setup', value: "Why don't bears" } ] [ { op: 'replace', path: '/setup', value: "Why don't bears wear" } ] [ { op: 'replace', path: '/setup', value: "Why don't bears wear shoes" } ] [ { op: 'replace', path: '/setup', value: "Why don't bears wear shoes?" }, { op: 'add', path: '/punchline', value: '' } ] [ { op: 'replace', path: '/punchline', value: 'Because' } ] [ { op: 'replace', path: '/punchline', value: 'Because they' } ] [ { op: 'replace', path: '/punchline', value: 'Because they have' } ] [ { op: 'replace', path: '/punchline', value: 'Because they have bear' } ] [ { op: 'replace', path: '/punchline', value: 'Because they have bear feet' } ] [ { op: 'replace', path: '/punchline', value: 'Because they have bear feet!' } ] */ const chain2 = prompt.pipe(model).pipe(new JsonOutputFunctionsParser()); const stream2 = await chain2.stream({ foo: "beets", }); // Stream the entire aggregated JSON object for await (const chunk of stream2) { console.log(chunk); } /* {} { setup: '' } { setup: 'Why' } { setup: 'Why did' } { setup: 'Why did the' } { setup: 'Why did the beet' } { setup: 'Why did the beet go' } { setup: 'Why did the beet go to' } { setup: 'Why did the beet go to therapy' } { setup: 'Why did the beet go to therapy?', punchline: '' } { setup: 'Why did the beet go to therapy?', punchline: 'Because' } { setup: 'Why did the beet go to therapy?', punchline: 'Because it' } { setup: 'Why did the beet go to therapy?', punchline: 'Because it had' } { setup: 'Why did the beet go to therapy?', punchline: 'Because it had a' } { setup: 'Why did the beet go to therapy?', punchline: 'Because it had a lot' } { setup: 'Why did the beet go to therapy?', punchline: 'Because it had a lot of' } { setup: 'Why did the beet go to therapy?', punchline: 'Because it had a lot of unresolved' } { setup: 'Why did the beet go to therapy?', punchline: 'Because it had a lot of unresolved issues' } { setup: 'Why did the beet go to therapy?', punchline: 'Because it had a lot of unresolved issues!' } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/http_response_output_parser_custom.ts
import { ChatOpenAI } from "@langchain/openai"; import { HttpResponseOutputParser } from "langchain/output_parsers"; import { JsonOutputFunctionsParser } from "@langchain/core/output_parsers/openai_functions"; const handler = async () => { const parser = new HttpResponseOutputParser({ contentType: "text/event-stream", outputParser: new JsonOutputFunctionsParser({ diff: true }), }); const model = new ChatOpenAI({ temperature: 0 }).bind({ functions: [ { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, ], // You can set the `function_call` arg to force the model to use a function function_call: { name: "get_current_weather", }, }); const stream = await model.pipe(parser).stream("Hello there!"); const httpResponse = new Response(stream, { headers: { "Content-Type": "text/event-stream", }, }); return httpResponse; }; await handler();
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/length_based_example_selector.ts
import { PromptTemplate, FewShotPromptTemplate } from "@langchain/core/prompts"; import { LengthBasedExampleSelector } from "@langchain/core/example_selectors"; export async function run() { // Create a prompt template that will be used to format the examples. const examplePrompt = new PromptTemplate({ inputVariables: ["input", "output"], template: "Input: {input}\nOutput: {output}", }); // Create a LengthBasedExampleSelector that will be used to select the examples. const exampleSelector = await LengthBasedExampleSelector.fromExamples( [ { input: "happy", output: "sad" }, { input: "tall", output: "short" }, { input: "energetic", output: "lethargic" }, { input: "sunny", output: "gloomy" }, { input: "windy", output: "calm" }, ], { examplePrompt, maxLength: 25, } ); // Create a FewShotPromptTemplate that will use the example selector. const dynamicPrompt = new FewShotPromptTemplate({ // We provide an ExampleSelector instead of examples. exampleSelector, examplePrompt, prefix: "Give the antonym of every input", suffix: "Input: {adjective}\nOutput:", inputVariables: ["adjective"], }); // An example with small input, so it selects all examples. console.log(await dynamicPrompt.format({ adjective: "big" })); /* Give the antonym of every input Input: happy Output: sad Input: tall Output: short Input: energetic Output: lethargic Input: sunny Output: gloomy Input: windy Output: calm Input: big Output: */ // An example with long input, so it selects only one example. const longString = "big and huge and massive and large and gigantic and tall and much much much much much bigger than everything else"; console.log(await dynamicPrompt.format({ adjective: longString })); /* Give the antonym of every input Input: happy Output: sad Input: big and huge and massive and large and gigantic and tall and much much much much much bigger than everything else Output: */ }
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/semantic_similarity_example_selector_custom_retriever.ts
/* eslint-disable @typescript-eslint/no-non-null-assertion */ // Requires a vectorstore that supports maximal marginal relevance search import { Pinecone } from "@pinecone-database/pinecone"; import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai"; import { PineconeStore } from "@langchain/pinecone"; import { PromptTemplate, FewShotPromptTemplate } from "@langchain/core/prompts"; import { SemanticSimilarityExampleSelector } from "@langchain/core/example_selectors"; const pinecone = new Pinecone(); const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX!); /** * Pinecone allows you to partition the records in an index into namespaces. * Queries and other operations are then limited to one namespace, * so different requests can search different subsets of your index. * Read more about namespaces here: https://docs.pinecone.io/guides/indexes/use-namespaces * * NOTE: If you have namespace enabled in your Pinecone index, you must provide the namespace when creating the PineconeStore. */ const namespace = "pinecone"; const pineconeVectorstore = await PineconeStore.fromExistingIndex( new OpenAIEmbeddings(), { pineconeIndex, namespace } ); const pineconeMmrRetriever = pineconeVectorstore.asRetriever({ searchType: "mmr", k: 2, }); const examples = [ { query: "healthy food", output: `lettuce`, food_type: "vegetable", }, { query: "healthy food", output: `schnitzel`, food_type: "veal", }, { query: "foo", output: `bar`, food_type: "baz", }, ]; const exampleSelector = new SemanticSimilarityExampleSelector({ vectorStoreRetriever: pineconeMmrRetriever, // Only embed the "query" key of each example inputKeys: ["query"], }); for (const example of examples) { // Format and add an example to the underlying vector store await exampleSelector.addExample(example); } // Create a prompt template that will be used to format the examples. const examplePrompt = PromptTemplate.fromTemplate(`<example> <user_input> {query} </user_input> <output> {output} </output> </example>`); // Create a FewShotPromptTemplate that will use the example selector. const dynamicPrompt = new FewShotPromptTemplate({ // We provide an ExampleSelector instead of examples. exampleSelector, examplePrompt, prefix: `Answer the user's question, using the below examples as reference:`, suffix: "User question:\n{query}", inputVariables: ["query"], }); const model = new ChatOpenAI({}); const chain = dynamicPrompt.pipe(model); const result = await chain.invoke({ query: "What is exactly one type of healthy food?", }); console.log(result); /* AIMessage { content: 'lettuce.', additional_kwargs: { function_call: undefined } } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/few_shot.ts
import { FewShotPromptTemplate, PromptTemplate } from "@langchain/core/prompts"; export const run = async () => { // First, create a list of few-shot examples. const examples = [ { word: "happy", antonym: "sad" }, { word: "tall", antonym: "short" }, ]; // Next, we specify the template to format the examples we have provided. const exampleFormatterTemplate = "Word: {word}\nAntonym: {antonym}\n"; const examplePrompt = new PromptTemplate({ inputVariables: ["word", "antonym"], template: exampleFormatterTemplate, }); // Finally, we create the `FewShotPromptTemplate` const fewShotPrompt = new FewShotPromptTemplate({ /* These are the examples we want to insert into the prompt. */ examples, /* This is how we want to format the examples when we insert them into the prompt. */ examplePrompt, /* The prefix is some text that goes before the examples in the prompt. Usually, this consists of instructions. */ prefix: "Give the antonym of every input", /* The suffix is some text that goes after the examples in the prompt. Usually, this is where the user input will go */ suffix: "Word: {input}\nAntonym:", /* The input variables are the variables that the overall prompt expects. */ inputVariables: ["input"], /* The example_separator is the string we will use to join the prefix, examples, and suffix together with. */ exampleSeparator: "\n\n", /* The template format is the formatting method to use for the template. Should usually be f-string. */ templateFormat: "f-string", }); // We can now generate a prompt using the `format` method. console.log(await fewShotPrompt.format({ input: "big" })); /* Give the antonym of every input Word: happy Antonym: sad Word: tall Antonym: short Word: big Antonym: */ };
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/multi_modal.ts
import { HumanMessage } from "@langchain/core/messages"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import fs from "node:fs/promises"; const hotdogImage = await fs.readFile("hotdog.jpg"); const base64Image = hotdogImage.toString("base64"); const imageURL = "https://avatars.githubusercontent.com/u/126733545?s=200&v=4"; const langchainLogoMessage = new HumanMessage({ content: [ { type: "image_url", image_url: { url: "{imageURL}", detail: "high", }, }, ], }); const base64ImageMessage = new HumanMessage({ content: [ { type: "image_url", image_url: "data:image/jpeg;base64,{base64Image}", }, ], }); const multiModalPrompt = ChatPromptTemplate.fromMessages([ ["system", "You have 20:20 vision! Describe the user's image."], langchainLogoMessage, base64ImageMessage, ]); const formattedPrompt = await multiModalPrompt.invoke({ imageURL, base64Image, }); console.log(JSON.stringify(formattedPrompt, null, 2)); /** { "kwargs": { "messages": [ { "kwargs": { "content": "You have 20:20 vision! Describe the user's image.", } }, { "kwargs": { "content": [ { "type": "image_url", "image_url": { "url": "https://avatars.githubusercontent.com/u/126733545?s=200&v=4", "detail": "high" } } ], } }, { "kwargs": { "content": [ { "type": "image_url", "image_url": "data:image/png;base64,/9j/4AAQSkZJRgABAQEBLAEsAAD/4Q..." } ], } } ] } } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/bytes_output_parser.ts
import { ChatOpenAI } from "@langchain/openai"; import { BytesOutputParser } from "@langchain/core/output_parsers"; const handler = async () => { const parser = new BytesOutputParser(); const model = new ChatOpenAI({ temperature: 0 }); const stream = await model.pipe(parser).stream("Hello there!"); const httpResponse = new Response(stream, { headers: { "Content-Type": "text/plain; charset=utf-8", }, }); return httpResponse; }; await handler();
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/semantic_similarity_example_selector_from_existing.ts
// Ephemeral, in-memory vector store for demo purposes import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai"; import { PromptTemplate, FewShotPromptTemplate } from "@langchain/core/prompts"; import { SemanticSimilarityExampleSelector } from "@langchain/core/example_selectors"; const embeddings = new OpenAIEmbeddings(); const memoryVectorStore = new MemoryVectorStore(embeddings); const examples = [ { query: "healthy food", output: `galbi`, }, { query: "healthy food", output: `schnitzel`, }, { query: "foo", output: `bar`, }, ]; const exampleSelector = new SemanticSimilarityExampleSelector({ vectorStore: memoryVectorStore, k: 2, // Only embed the "query" key of each example inputKeys: ["query"], }); for (const example of examples) { // Format and add an example to the underlying vector store await exampleSelector.addExample(example); } // Create a prompt template that will be used to format the examples. const examplePrompt = PromptTemplate.fromTemplate(`<example> <user_input> {query} </user_input> <output> {output} </output> </example>`); // Create a FewShotPromptTemplate that will use the example selector. const dynamicPrompt = new FewShotPromptTemplate({ // We provide an ExampleSelector instead of examples. exampleSelector, examplePrompt, prefix: `Answer the user's question, using the below examples as reference:`, suffix: "User question: {query}", inputVariables: ["query"], }); const formattedValue = await dynamicPrompt.format({ query: "What is a healthy food?", }); console.log(formattedValue); /* Answer the user's question, using the below examples as reference: <example> <user_input> healthy </user_input> <output> galbi </output> </example> <example> <user_input> healthy </user_input> <output> schnitzel </output> </example> User question: What is a healthy food? */ const model = new ChatOpenAI({}); const chain = dynamicPrompt.pipe(model); const result = await chain.invoke({ query: "What is a healthy food?" }); console.log(result); /* AIMessage { content: 'A healthy food can be galbi or schnitzel.', additional_kwargs: { function_call: undefined } } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/prompts.ts
import { ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts"; export const run = async () => { // A `PromptTemplate` consists of a template string and a list of input variables. const template = "What is a good name for a company that makes {product}?"; const promptA = new PromptTemplate({ template, inputVariables: ["product"] }); // We can use the `format` method to format the template with the given input values. const responseA = await promptA.format({ product: "colorful socks" }); console.log({ responseA }); /* { responseA: 'What is a good name for a company that makes colorful socks?' } */ // We can also use the `fromTemplate` method to create a `PromptTemplate` object. const promptB = PromptTemplate.fromTemplate( "What is a good name for a company that makes {product}?" ); const responseB = await promptB.format({ product: "colorful socks" }); console.log({ responseB }); /* { responseB: 'What is a good name for a company that makes colorful socks?' } */ // For chat models, we provide a `ChatPromptTemplate` class that can be used to format chat prompts. const chatPrompt = ChatPromptTemplate.fromMessages([ SystemMessagePromptTemplate.fromTemplate( "You are a helpful assistant that translates {input_language} to {output_language}." ), HumanMessagePromptTemplate.fromTemplate("{text}"), ]); // The result can be formatted as a string using the `format` method. const responseC = await chatPrompt.format({ input_language: "English", output_language: "French", text: "I love programming.", }); console.log({ responseC }); /* { responseC: '[{"text":"You are a helpful assistant that translates English to French."},{"text":"I love programming."}]' } */ // The result can also be formatted as a list of `ChatMessage` objects by returning a `PromptValue` object and calling the `toChatMessages` method. // More on this below. const responseD = await chatPrompt.formatPromptValue({ input_language: "English", output_language: "French", text: "I love programming.", }); const messages = responseD.toChatMessages(); console.log({ messages }); /* { messages: [ SystemMessage { text: 'You are a helpful assistant that translates English to French.' }, HumanMessage { text: 'I love programming.' } ] } */ };
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/structured_parser_zod.ts
import { z } from "zod"; import { OpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; // We can use zod to define a schema for the output using the `fromZodSchema` method of `StructuredOutputParser`. const parser = StructuredOutputParser.fromZodSchema( z.object({ answer: z.string().describe("answer to the user's question"), sources: z .array(z.string()) .describe("sources used to answer the question, should be websites."), }) ); const formatInstructions = parser.getFormatInstructions(); const prompt = new PromptTemplate({ template: "Answer the users question as best as possible.\n{format_instructions}\n{question}", inputVariables: ["question"], partialVariables: { format_instructions: formatInstructions }, }); const model = new OpenAI({ temperature: 0 }); const input = await prompt.format({ question: "What is the capital of France?", }); const response = await model.invoke(input); console.log(input); /* Answer the users question as best as possible. The output should be formatted as a JSON instance that conforms to the JSON schema below. As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}} the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. Here is the output schema: ``` {"type":"object","properties":{"answer":{"type":"string","description":"answer to the user's question"},"sources":{"type":"array","items":{"type":"string"},"description":"sources used to answer the question, should be websites."}},"required":["answer","sources"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"} ``` What is the capital of France? */ console.log(response); /* {"answer": "Paris", "sources": ["https://en.wikipedia.org/wiki/Paris"]} */ console.log(await parser.parse(response)); /* { answer: 'Paris', sources: [ 'https://en.wikipedia.org/wiki/Paris' ] } */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/prompts/partial.ts
import { PromptTemplate } from "@langchain/core/prompts"; export const run = async () => { // The `partial` method returns a new `PromptTemplate` object that can be used to format the prompt with only some of the input variables. const promptA = new PromptTemplate({ template: "{foo}{bar}", inputVariables: ["foo", "bar"], }); const partialPromptA = await promptA.partial({ foo: "foo" }); console.log(await partialPromptA.format({ bar: "bar" })); // foobar // You can also explicitly specify the partial variables when creating the `PromptTemplate` object. const promptB = new PromptTemplate({ template: "{foo}{bar}", inputVariables: ["foo"], partialVariables: { bar: "bar" }, }); console.log(await promptB.format({ foo: "foo" })); // foobar // You can also use partial formatting with function inputs instead of string inputs. const promptC = new PromptTemplate({ template: "Tell me a {adjective} joke about the day {date}", inputVariables: ["adjective", "date"], }); const partialPromptC = await promptC.partial({ date: () => new Date().toLocaleDateString(), }); console.log(await partialPromptC.format({ adjective: "funny" })); // Tell me a funny joke about the day 3/22/2023 const promptD = new PromptTemplate({ template: "Tell me a {adjective} joke about the day {date}", inputVariables: ["adjective"], partialVariables: { date: () => new Date().toLocaleDateString() }, }); console.log(await promptD.format({ adjective: "funny" })); // Tell me a funny joke about the day 3/22/2023 };
0
lc_public_repos/langchainjs/examples/src/prompts
lc_public_repos/langchainjs/examples/src/prompts/quickstart/basic_fstring.ts
import { PromptTemplate } from "@langchain/core/prompts"; // If a template is passed in, the input variables are inferred automatically from the template. const prompt = PromptTemplate.fromTemplate( `You are a naming consultant for new companies. What is a good name for a company that makes {product}?` ); const formattedPrompt = await prompt.format({ product: "colorful socks", }); /* You are a naming consultant for new companies. What is a good name for a company that makes colorful socks? */
0
lc_public_repos/langchainjs/examples/src/prompts
lc_public_repos/langchainjs/examples/src/prompts/quickstart/basic_mustache.ts
import { PromptTemplate } from "@langchain/core/prompts"; // If a template is passed in, the input variables are inferred automatically from the template. const prompt = PromptTemplate.fromTemplate( `You are a naming consultant for new companies. What is a good name for a company that makes {{product}}?`, { templateFormat: "mustache", } ); const formattedPrompt = await prompt.format({ product: "colorful socks", }); /* You are a naming consultant for new companies. What is a good name for a company that makes colorful socks? */
0
lc_public_repos/langchainjs/examples/src/prompts
lc_public_repos/langchainjs/examples/src/prompts/quickstart/input_vars_mustache.ts
import { PromptTemplate } from "@langchain/core/prompts"; const template = "Tell me a {{adjective}} joke about {{content}}."; const promptTemplate = PromptTemplate.fromTemplate(template, { templateFormat: "mustache", }); console.log(promptTemplate.inputVariables); // ['adjective', 'content'] const formattedPromptTemplate = await promptTemplate.format({ adjective: "funny", content: "chickens", }); console.log(formattedPromptTemplate); // "Tell me a funny joke about chickens."
0
lc_public_repos/langchainjs/examples/src/prompts
lc_public_repos/langchainjs/examples/src/prompts/quickstart/input_vars_fstring.ts
import { PromptTemplate } from "@langchain/core/prompts"; const template = "Tell me a {adjective} joke about {content}."; const promptTemplate = PromptTemplate.fromTemplate(template); console.log(promptTemplate.inputVariables); // ['adjective', 'content'] const formattedPromptTemplate = await promptTemplate.format({ adjective: "funny", content: "chickens", }); console.log(formattedPromptTemplate); // "Tell me a funny joke about chickens."
0
lc_public_repos/langchainjs/examples/src/prompts
lc_public_repos/langchainjs/examples/src/prompts/quickstart/hard_coded_mustache.ts
import { PromptTemplate } from "@langchain/core/prompts"; // An example prompt with no input variables const noInputPrompt = new PromptTemplate({ inputVariables: [], template: "Tell me a joke.", }); const formattedNoInputPrompt = await noInputPrompt.format({}); console.log(formattedNoInputPrompt); // "Tell me a joke." // An example prompt with one input variable const oneInputPrompt = new PromptTemplate({ inputVariables: ["adjective"], template: "Tell me a {{adjective}} joke.", templateFormat: "mustache", }); const formattedOneInputPrompt = await oneInputPrompt.format({ adjective: "funny", }); console.log(formattedOneInputPrompt); // "Tell me a funny joke." // An example prompt with multiple input variables const multipleInputPrompt = new PromptTemplate({ inputVariables: ["adjective", "content"], template: "Tell me a {{adjective}} joke about {{content}}.", templateFormat: "mustache", }); const formattedMultipleInputPrompt = await multipleInputPrompt.format({ adjective: "funny", content: "chickens", }); console.log(formattedMultipleInputPrompt); // "Tell me a funny joke about chickens."
0
lc_public_repos/langchainjs/examples/src/prompts
lc_public_repos/langchainjs/examples/src/prompts/quickstart/hard_coded_fstring.ts
import { PromptTemplate } from "@langchain/core/prompts"; // An example prompt with no input variables const noInputPrompt = new PromptTemplate({ inputVariables: [], template: "Tell me a joke.", }); const formattedNoInputPrompt = await noInputPrompt.format({}); console.log(formattedNoInputPrompt); // "Tell me a joke." // An example prompt with one input variable const oneInputPrompt = new PromptTemplate({ inputVariables: ["adjective"], template: "Tell me a {adjective} joke.", }); const formattedOneInputPrompt = await oneInputPrompt.format({ adjective: "funny", }); console.log(formattedOneInputPrompt); // "Tell me a funny joke." // An example prompt with multiple input variables const multipleInputPrompt = new PromptTemplate({ inputVariables: ["adjective", "content"], template: "Tell me a {adjective} joke about {content}.", }); const formattedMultipleInputPrompt = await multipleInputPrompt.format({ adjective: "funny", content: "chickens", }); console.log(formattedMultipleInputPrompt); // "Tell me a funny joke about chickens."
0
lc_public_repos/langchainjs/examples/src/prompts
lc_public_repos/langchainjs/examples/src/prompts/quickstart/test.ts
import { ChatPromptTemplate } from "@langchain/core/prompts"; const systemTemplate = "You are a helpful assistant that translates {input_language} to {output_language}."; const humanTemplate = "{text}"; const chatPrompt = ChatPromptTemplate.fromMessages([ ["system", systemTemplate], ["human", humanTemplate], ]); // Format the messages const formattedChatPrompt = await chatPrompt.formatMessages({ input_language: "English", output_language: "French", text: "I love programming.", }); console.log(formattedChatPrompt);
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/cache/momento.ts
import { OpenAI } from "@langchain/openai"; import { CacheClient, Configurations, CredentialProvider, } from "@gomomento/sdk"; import { MomentoCache } from "@langchain/community/caches/momento"; // See https://github.com/momentohq/client-sdk-javascript for connection options const client = new CacheClient({ configuration: Configurations.Laptop.v1(), credentialProvider: CredentialProvider.fromEnvironmentVariable({ environmentVariableName: "MOMENTO_API_KEY", }), defaultTtlSeconds: 60 * 60 * 24, }); const cache = await MomentoCache.fromProps({ client, cacheName: "langchain", }); const model = new OpenAI({ cache });
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/cache/upstash_redis_advanced.ts
import { Redis } from "@upstash/redis"; import https from "https"; import { OpenAI } from "@langchain/openai"; import { UpstashRedisCache } from "@langchain/community/caches/upstash_redis"; // const client = new Redis({ // url: process.env.UPSTASH_REDIS_REST_URL!, // token: process.env.UPSTASH_REDIS_REST_TOKEN!, // agent: new https.Agent({ keepAlive: true }), // }); // Or simply call Redis.fromEnv() to automatically load the UPSTASH_REDIS_REST_URL and UPSTASH_REDIS_REST_TOKEN environment variables. const client = Redis.fromEnv({ agent: new https.Agent({ keepAlive: true }), }); const cache = new UpstashRedisCache({ client }); const model = new OpenAI({ cache });
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/cache/cloudflare_kv.ts
import type { KVNamespace } from "@cloudflare/workers-types"; import { OpenAI } from "@langchain/openai"; import { CloudflareKVCache } from "@langchain/cloudflare"; export interface Env { KV_NAMESPACE: KVNamespace; OPENAI_API_KEY: string; } export default { async fetch(_request: Request, env: Env) { try { const cache = new CloudflareKVCache(env.KV_NAMESPACE); const model = new OpenAI({ cache, model: "gpt-3.5-turbo-instruct", apiKey: env.OPENAI_API_KEY, }); const response = await model.invoke("How are you today?"); return new Response(JSON.stringify(response), { headers: { "content-type": "application/json" }, }); } catch (err: any) { console.log(err.message); return new Response(err.message, { status: 500 }); } }, };
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/cache/upstash_redis.ts
import { OpenAI } from "@langchain/openai"; import { UpstashRedisCache } from "@langchain/community/caches/upstash_redis"; // See https://docs.upstash.com/redis/howto/connectwithupstashredis#quick-start for connection options const cache = new UpstashRedisCache({ config: { url: "UPSTASH_REDIS_REST_URL", token: "UPSTASH_REDIS_REST_TOKEN", }, }); const model = new OpenAI({ cache });
0
lc_public_repos/langchainjs/examples/src/cache
lc_public_repos/langchainjs/examples/src/cache/chat_models/momento.ts
import { ChatOpenAI } from "@langchain/openai"; import { CacheClient, Configurations, CredentialProvider, } from "@gomomento/sdk"; import { MomentoCache } from "@langchain/community/caches/momento"; // See https://github.com/momentohq/client-sdk-javascript for connection options const client = new CacheClient({ configuration: Configurations.Laptop.v1(), credentialProvider: CredentialProvider.fromEnvironmentVariable({ environmentVariableName: "MOMENTO_API_KEY", }), defaultTtlSeconds: 60 * 60 * 24, }); const cache = await MomentoCache.fromProps({ client, cacheName: "langchain", }); const model = new ChatOpenAI({ cache });
0
lc_public_repos/langchainjs/examples/src/cache
lc_public_repos/langchainjs/examples/src/cache/chat_models/upstash_redis_advanced.ts
import { Redis } from "@upstash/redis"; import https from "https"; import { ChatOpenAI } from "@langchain/openai"; import { UpstashRedisCache } from "@langchain/community/caches/upstash_redis"; // const client = new Redis({ // url: process.env.UPSTASH_REDIS_REST_URL!, // token: process.env.UPSTASH_REDIS_REST_TOKEN!, // agent: new https.Agent({ keepAlive: true }), // }); // Or simply call Redis.fromEnv() to automatically load the UPSTASH_REDIS_REST_URL and UPSTASH_REDIS_REST_TOKEN environment variables. const client = Redis.fromEnv({ agent: new https.Agent({ keepAlive: true }), }); const cache = new UpstashRedisCache({ client }); const model = new ChatOpenAI({ cache });
0
lc_public_repos/langchainjs/examples/src/cache
lc_public_repos/langchainjs/examples/src/cache/chat_models/cloudflare_kv.ts
import type { KVNamespace } from "@cloudflare/workers-types"; import { ChatOpenAI } from "@langchain/openai"; import { CloudflareKVCache } from "@langchain/cloudflare"; export interface Env { KV_NAMESPACE: KVNamespace; OPENAI_API_KEY: string; } export default { async fetch(_request: Request, env: Env) { try { const cache = new CloudflareKVCache(env.KV_NAMESPACE); const model = new ChatOpenAI({ cache, model: "gpt-3.5-turbo", apiKey: env.OPENAI_API_KEY, }); const response = await model.invoke("How are you today?"); return new Response(JSON.stringify(response), { headers: { "content-type": "application/json" }, }); } catch (err: any) { console.log(err.message); return new Response(err.message, { status: 500 }); } }, };
0
lc_public_repos/langchainjs/examples/src/cache
lc_public_repos/langchainjs/examples/src/cache/chat_models/upstash_redis.ts
import { ChatOpenAI } from "@langchain/openai"; import { UpstashRedisCache } from "@langchain/community/caches/upstash_redis"; // See https://docs.upstash.com/redis/howto/connectwithupstashredis#quick-start for connection options const cache = new UpstashRedisCache({ config: { url: "UPSTASH_REDIS_REST_URL", token: "UPSTASH_REDIS_REST_TOKEN", }, }); const model = new ChatOpenAI({ cache });
0
lc_public_repos/langchainjs/examples/src/cache
lc_public_repos/langchainjs/examples/src/cache/chat_models/redis.ts
import { ChatOpenAI } from "@langchain/openai"; import { Redis } from "ioredis"; import { RedisCache } from "@langchain/community/caches/ioredis"; const client = new Redis("redis://localhost:6379"); const cache = new RedisCache(client, { ttl: 60, // Optional key expiration value }); const model = new ChatOpenAI({ cache }); const response1 = await model.invoke("Do something random!"); console.log(response1); /* AIMessage { content: "Sure! I'll generate a random number for you: 37", additional_kwargs: {} } */ const response2 = await model.invoke("Do something random!"); console.log(response2); /* AIMessage { content: "Sure! I'll generate a random number for you: 37", additional_kwargs: {} } */ await client.disconnect();
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/local_retrieval_qa/load_documents.ts
import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { HuggingFaceTransformersEmbeddings } from "@langchain/community/embeddings/hf_transformers"; const loader = new CheerioWebBaseLoader( "https://lilianweng.github.io/posts/2023-06-23-agent/" ); const docs = await loader.load(); const splitter = new RecursiveCharacterTextSplitter({ chunkOverlap: 0, chunkSize: 500, }); const splitDocuments = await splitter.splitDocuments(docs); const vectorstore = await HNSWLib.fromDocuments( splitDocuments, new HuggingFaceTransformersEmbeddings() ); const retrievedDocs = await vectorstore.similaritySearch( "What are the approaches to Task Decomposition?" ); console.log(retrievedDocs[0]); /* Document { pageContent: 'Task decomposition can be done (1) by LLM with simple prompting like "Steps for XYZ.\\n1.", "What are the subgoals for achieving XYZ?", (2) by using task-specific instructions; e.g. "Write a story outline." for writing a novel, or (3) with human inputs.', metadata: { source: 'https://lilianweng.github.io/posts/2023-06-23-agent/', loc: { lines: [Object] } } } */
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/local_retrieval_qa/chain.ts
import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { Ollama } from "@langchain/community/llms/ollama"; import { HuggingFaceTransformersEmbeddings } from "@langchain/community/embeddings/hf_transformers"; import { formatDocumentsAsString } from "langchain/util/document"; import { PromptTemplate } from "@langchain/core/prompts"; import { RunnableSequence, RunnablePassthrough, } from "@langchain/core/runnables"; import { StringOutputParser } from "@langchain/core/output_parsers"; const loader = new CheerioWebBaseLoader( "https://lilianweng.github.io/posts/2023-06-23-agent/" ); const docs = await loader.load(); const splitter = new RecursiveCharacterTextSplitter({ chunkOverlap: 0, chunkSize: 500, }); const splitDocuments = await splitter.splitDocuments(docs); const vectorstore = await HNSWLib.fromDocuments( splitDocuments, new HuggingFaceTransformersEmbeddings() ); const retriever = vectorstore.asRetriever(); // Prompt const prompt = PromptTemplate.fromTemplate(`Answer the question based only on the following context: {context} Question: {question}`); // Llama 2 7b wrapped by Ollama const model = new Ollama({ baseUrl: "http://localhost:11434", model: "llama2", }); const chain = RunnableSequence.from([ { context: retriever.pipe(formatDocumentsAsString), question: new RunnablePassthrough(), }, prompt, model, new StringOutputParser(), ]); const result = await chain.invoke( "What are the approaches to Task Decomposition?" ); console.log(result); /* Based on the provided context, there are three approaches to task decomposition: 1. Using simple prompts like "Steps for XYZ" or "What are the subgoals for achieving XYZ?" to elicit a list of tasks from a language model (LLM). 2. Providing task-specific instructions, such as "Write a story outline" for writing a novel, to guide the LLM in decomposing the task into smaller subtasks. 3. Incorporating human inputs to help the LLM learn and improve its decomposition abilities over time. */
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/local_retrieval_qa/qa_chain.ts
import { RetrievalQAChain, loadQAStuffChain } from "langchain/chains"; import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { Ollama } from "@langchain/community/llms/ollama"; import { HuggingFaceTransformersEmbeddings } from "@langchain/community/embeddings/hf_transformers"; import { PromptTemplate } from "@langchain/core/prompts"; const loader = new CheerioWebBaseLoader( "https://lilianweng.github.io/posts/2023-06-23-agent/" ); const docs = await loader.load(); const splitter = new RecursiveCharacterTextSplitter({ chunkOverlap: 0, chunkSize: 500, }); const splitDocuments = await splitter.splitDocuments(docs); const vectorstore = await HNSWLib.fromDocuments( splitDocuments, new HuggingFaceTransformersEmbeddings() ); const retriever = vectorstore.asRetriever(); // Llama 2 7b wrapped by Ollama const model = new Ollama({ baseUrl: "http://localhost:11434", model: "llama2", }); const template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum and keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer. {context} Question: {question} Helpful Answer:`; const QA_CHAIN_PROMPT = new PromptTemplate({ inputVariables: ["context", "question"], template, }); // Create a retrieval QA chain that uses a Llama 2-powered QA stuff chain with a custom prompt. const chain = new RetrievalQAChain({ combineDocumentsChain: loadQAStuffChain(model, { prompt: QA_CHAIN_PROMPT }), retriever, returnSourceDocuments: true, inputKey: "question", }); const response = await chain.invoke({ question: "What are the approaches to Task Decomposition?", }); console.log(response); /* { text: 'Thanks for asking! There are several approaches to task decomposition, which can be categorized into three main types:\n' + '\n' + '1. Using language models with simple prompting (e.g., "Steps for XYZ."), or asking for subgoals for achieving XYZ.\n' + '2. Providing task-specific instructions, such as writing a story outline for writing a novel.\n' + '3. Incorporating human inputs to decompose tasks.\n' + '\n' + 'Each approach has its advantages and limitations, and the choice of which one to use depends on the specific task and the desired level of complexity and adaptability. Thanks for asking!', sourceDocuments: [ Document { pageContent: 'Task decomposition can be done (1) by LLM with simple prompting like "Steps for XYZ.\\n1.", "What are the subgoals for achieving XYZ?", (2) by using task-specific instructions; e.g. "Write a story outline." for writing a novel, or (3) with human inputs.', metadata: [Object] }, Document { pageContent: 'Fig. 1. Overview of a LLM-powered autonomous agent system.\n' + 'Component One: Planning#\n' + 'A complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\n' + 'Task Decomposition#', metadata: [Object] }, Document { pageContent: 'Challenges in long-term planning and task decomposition: Planning over a lengthy history and effectively exploring the solution space remain challenging. LLMs struggle to adjust plans when faced with unexpected errors, making them less robust compared to humans who learn from trial and error.', metadata: [Object] }, Document { pageContent: 'Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.', metadata: [Object] } ] } */
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/sql/quickstart_chain.ts
import { ChatOpenAI } from "@langchain/openai"; import { createSqlQueryChain } from "langchain/chains/sql_db"; import { SqlDatabase } from "langchain/sql_db"; import { DataSource } from "typeorm"; const datasource = new DataSource({ type: "sqlite", database: "../../../../Chinook.db", }); const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, }); const llm = new ChatOpenAI({ model: "gpt-4", temperature: 0 }); const chain = await createSqlQueryChain({ llm, db, dialect: "sqlite", }); const response = await chain.invoke({ question: "How many employees are there?", }); console.log("response", response); /** response SELECT COUNT(*) FROM "Employee" */ console.log("db run result", await db.run(response)); /** db run result [{"COUNT(*)":8}] */
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/sql/large_db.ts
import { ChatPromptTemplate } from "@langchain/core/prompts"; import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; import { ChatOpenAI } from "@langchain/openai"; import { createSqlQueryChain } from "langchain/chains/sql_db"; import { SqlDatabase } from "langchain/sql_db"; import { DataSource } from "typeorm"; import { z } from "zod"; const datasource = new DataSource({ type: "sqlite", database: "../../../../Chinook.db", }); const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, }); const llm = new ChatOpenAI({ model: "gpt-4", temperature: 0 }); const Table = z.object({ names: z.array(z.string()).describe("Names of tables in SQL database"), }); const tableNames = db.allTables.map((t) => t.tableName).join("\n"); const system = `Return the names of ALL the SQL tables that MIGHT be relevant to the user question. The tables are: ${tableNames} Remember to include ALL POTENTIALLY RELEVANT tables, even if you're not sure that they're needed.`; const prompt = ChatPromptTemplate.fromMessages([ ["system", system], ["human", "{input}"], ]); const tableChain = prompt.pipe(llm.withStructuredOutput(Table)); console.log( await tableChain.invoke({ input: "What are all the genres of Alanis Morisette songs?", }) ); /** { names: [ 'Artist', 'Track', 'Genre' ] } */ // ------------- // You can see a LangSmith trace of the above chain here: // https://smith.langchain.com/public/5ca0c91e-4a40-44ef-8c45-9a4247dc474c/r // ------------- /** This works pretty well! Except, as we’ll see below, we actually need a few other tables as well. This would be pretty difficult for the model to know based just on the user question. In this case, we might think to simplify our model’s job by grouping the tables together. We’ll just ask the model to choose between categories “Music” and “Business”, and then take care of selecting all the relevant tables from there: */ const prompt2 = ChatPromptTemplate.fromMessages([ [ "system", `Return the names of the SQL tables that are relevant to the user question. The tables are: Music Business`, ], ["human", "{input}"], ]); const categoryChain = prompt2.pipe(llm.withStructuredOutput(Table)); console.log( await categoryChain.invoke({ input: "What are all the genres of Alanis Morisette songs?", }) ); /** { names: [ 'Music' ] } */ // ------------- // You can see a LangSmith trace of the above chain here: // https://smith.langchain.com/public/12b62e78-bfbe-42ff-86f2-ad738a476554/r // ------------- const getTables = (categories: z.infer<typeof Table>): Array<string> => { let tables: Array<string> = []; for (const category of categories.names) { if (category === "Music") { tables = tables.concat([ "Album", "Artist", "Genre", "MediaType", "Playlist", "PlaylistTrack", "Track", ]); } else if (category === "Business") { tables = tables.concat([ "Customer", "Employee", "Invoice", "InvoiceLine", ]); } } return tables; }; const tableChain2 = categoryChain.pipe(getTables); console.log( await tableChain2.invoke({ input: "What are all the genres of Alanis Morisette songs?", }) ); /** [ 'Album', 'Artist', 'Genre', 'MediaType', 'Playlist', 'PlaylistTrack', 'Track' ] */ // ------------- // You can see a LangSmith trace of the above chain here: // https://smith.langchain.com/public/e78c10aa-e923-4a24-b0c8-f7a6f5d316ce/r // ------------- // Now that we’ve got a chain that can output the relevant tables for any query we can combine this with our createSqlQueryChain, which can accept a list of tableNamesToUse to determine which table schemas are included in the prompt: const queryChain = await createSqlQueryChain({ llm, db, dialect: "sqlite", }); const tableChain3 = RunnableSequence.from([ { input: (i: { question: string }) => i.question, }, tableChain2, ]); const fullChain = RunnablePassthrough.assign({ tableNamesToUse: tableChain3, }).pipe(queryChain); const query = await fullChain.invoke({ question: "What are all the genres of Alanis Morisette songs?", }); console.log(query); /** SELECT DISTINCT "Genre"."Name" FROM "Genre" JOIN "Track" ON "Genre"."GenreId" = "Track"."GenreId" JOIN "Album" ON "Track"."AlbumId" = "Album"."AlbumId" JOIN "Artist" ON "Album"."ArtistId" = "Artist"."ArtistId" WHERE "Artist"."Name" = 'Alanis Morissette' LIMIT 5; */ console.log(await db.run(query)); /** [{"Name":"Rock"}] */ // ------------- // You can see a LangSmith trace of the above chain here: // https://smith.langchain.com/public/c7d576d0-3462-40db-9edc-5492f10555bf/r // ------------- // We might rephrase our question slightly to remove redundancy in the answer const query2 = await fullChain.invoke({ question: "What is the set of all unique genres of Alanis Morisette songs?", }); console.log(query2); /** SELECT DISTINCT Genre.Name FROM Genre JOIN Track ON Genre.GenreId = Track.GenreId JOIN Album ON Track.AlbumId = Album.AlbumId JOIN Artist ON Album.ArtistId = Artist.ArtistId WHERE Artist.Name = 'Alanis Morissette' */ console.log(await db.run(query2)); /** [{"Name":"Rock"}] */ // ------------- // You can see a LangSmith trace of the above chain here: // https://smith.langchain.com/public/6e80087d-e930-4f22-9b40-f7edb95a2145/r // -------------
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/sql/db_check.ts
import { SqlDatabase } from "langchain/sql_db"; import { DataSource } from "typeorm"; const datasource = new DataSource({ type: "sqlite", database: "../../../../Chinook.db", }); const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, }); console.log(db.allTables.map((t) => t.tableName)); /** [ 'Album', 'Artist', 'Customer', 'Employee', 'Genre', 'Invoice', 'InvoiceLine', 'MediaType', 'Playlist', 'PlaylistTrack', 'Track' ] */
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/sql/quickstart_answer_question.ts
import { ChatOpenAI } from "@langchain/openai"; import { createSqlQueryChain } from "langchain/chains/sql_db"; import { SqlDatabase } from "langchain/sql_db"; import { DataSource } from "typeorm"; import { QuerySqlTool } from "langchain/tools/sql"; import { PromptTemplate } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; const datasource = new DataSource({ type: "sqlite", database: "../../../../Chinook.db", }); const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, }); const llm = new ChatOpenAI({ model: "gpt-4", temperature: 0 }); const executeQuery = new QuerySqlTool(db); const writeQuery = await createSqlQueryChain({ llm, db, dialect: "sqlite", }); const answerPrompt = PromptTemplate.fromTemplate(`Given the following user question, corresponding SQL query, and SQL result, answer the user question. Question: {question} SQL Query: {query} SQL Result: {result} Answer: `); const answerChain = answerPrompt.pipe(llm).pipe(new StringOutputParser()); const chain = RunnableSequence.from([ RunnablePassthrough.assign({ query: writeQuery }).assign({ result: (i: { query: string }) => executeQuery.invoke(i.query), }), answerChain, ]); console.log(await chain.invoke({ question: "How many employees are there" })); /** There are 8 employees. */
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/sql/quickstart_execute_sql.ts
import { ChatOpenAI } from "@langchain/openai"; import { createSqlQueryChain } from "langchain/chains/sql_db"; import { SqlDatabase } from "langchain/sql_db"; import { DataSource } from "typeorm"; import { QuerySqlTool } from "langchain/tools/sql"; const datasource = new DataSource({ type: "sqlite", database: "../../../../Chinook.db", }); const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, }); const llm = new ChatOpenAI({ model: "gpt-4", temperature: 0 }); const executeQuery = new QuerySqlTool(db); const writeQuery = await createSqlQueryChain({ llm, db, dialect: "sqlite", }); const chain = writeQuery.pipe(executeQuery); console.log(await chain.invoke({ question: "How many employees are there" })); /** [{"COUNT(*)":8}] */
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/sql/query_checking.ts
import { StringOutputParser } from "@langchain/core/output_parsers"; import { ChatPromptTemplate, PromptTemplate } from "@langchain/core/prompts"; import { RunnableSequence } from "@langchain/core/runnables"; import { ChatOpenAI } from "@langchain/openai"; import { createSqlQueryChain } from "langchain/chains/sql_db"; import { SqlDatabase } from "langchain/sql_db"; import { DataSource } from "typeorm"; const datasource = new DataSource({ type: "sqlite", database: "../../../../Chinook.db", }); const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, }); const llm = new ChatOpenAI({ model: "gpt-4", temperature: 0 }); const chain = await createSqlQueryChain({ llm, db, dialect: "sqlite", }); /** * And we want to validate its outputs. We can do so by extending the chain with a second prompt and model call: */ const SYSTEM_PROMPT = `Double check the user's {dialect} query for common mistakes, including: - Using NOT IN with NULL values - Using UNION when UNION ALL should have been used - Using BETWEEN for exclusive ranges - Data type mismatch in predicates - Properly quoting identifiers - Using the correct number of arguments for functions - Casting to the correct data type - Using the proper columns for joins If there are any of the above mistakes, rewrite the query. If there are no mistakes, just reproduce the original query. Output the final SQL query only.`; const prompt = await ChatPromptTemplate.fromMessages([ ["system", SYSTEM_PROMPT], ["human", "{query}"], ]).partial({ dialect: "sqlite" }); const validationChain = prompt.pipe(llm).pipe(new StringOutputParser()); const fullChain = RunnableSequence.from([ { query: async (i: { question: string }) => chain.invoke(i), }, validationChain, ]); const query = await fullChain.invoke({ question: "What's the average Invoice from an American customer whose Fax is missing since 2003 but before 2010", }); console.log("query", query); /** query SELECT AVG("Total") FROM "Invoice" WHERE "CustomerId" IN (SELECT "CustomerId" FROM "Customer" WHERE "Country" = 'USA' AND "Fax" IS NULL) AND "InvoiceDate" BETWEEN '2003-01-01 00:00:00' AND '2009-12-31 23:59:59' */ console.log("db query results", await db.run(query)); /** db query results [{"AVG(\"Total\")":6.632999999999998}] */ // ------------- // You can see a LangSmith trace of the above chain here: // https://smith.langchain.com/public/d1131395-8477-47cd-8f74-e0c5491ea956/r // ------------- // The obvious downside of this approach is that we need to make two model calls instead of one to generate our query. // To get around this we can try to perform the query generation and query check in a single model invocation: const SYSTEM_PROMPT_2 = `You are a {dialect} expert. Given an input question, create a syntactically correct {dialect} query to run. Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per {dialect}. You can order the results to return the most informative data in the database. Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers. Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. Pay attention to use date('now') function to get the current date, if the question involves "today". Only use the following tables: {table_info} Write an initial draft of the query. Then double check the {dialect} query for common mistakes, including: - Using NOT IN with NULL values - Using UNION when UNION ALL should have been used - Using BETWEEN for exclusive ranges - Data type mismatch in predicates - Properly quoting identifiers - Using the correct number of arguments for functions - Casting to the correct data type - Using the proper columns for joins Use format: First draft: <<FIRST_DRAFT_QUERY>> Final answer: <<FINAL_ANSWER_QUERY>>`; const prompt2 = await PromptTemplate.fromTemplate( `System: ${SYSTEM_PROMPT_2} Human: {input}` ).partial({ dialect: "sqlite" }); const parseFinalAnswer = (output: string): string => output.split("Final answer: ")[1]; const chain2 = ( await createSqlQueryChain({ llm, db, prompt: prompt2, dialect: "sqlite", }) ).pipe(parseFinalAnswer); const query2 = await chain2.invoke({ question: "What's the average Invoice from an American customer whose Fax is missing since 2003 but before 2010", }); console.log("query2", query2); /** query2 SELECT AVG("Total") FROM "Invoice" WHERE "CustomerId" IN (SELECT "CustomerId" FROM "Customer" WHERE "Country" = 'USA' AND "Fax" IS NULL) AND date("InvoiceDate") BETWEEN date('2003-01-01') AND date('2009-12-31') LIMIT 5 */ console.log("db query results", await db.run(query2)); /** db query results [{"AVG(\"Total\")":6.632999999999998}] */ // ------------- // You can see a LangSmith trace of the above chain here: // https://smith.langchain.com/public/e21d6146-eca9-4de6-a078-808fd09979ea/r // -------------
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/sql/db.ts
import { SqlDatabase } from "langchain/sql_db"; import { DataSource } from "typeorm"; const datasource = new DataSource({ type: "sqlite", database: "../../../../Chinook.db", }); export const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, });
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/sql/large_db_high_cardinality.ts
import { DocumentInterface } from "@langchain/core/documents"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { createSqlQueryChain } from "langchain/chains/sql_db"; import { SqlDatabase } from "langchain/sql_db"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { DataSource } from "typeorm"; const datasource = new DataSource({ type: "sqlite", database: "../../../../Chinook.db", }); const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, }); async function queryAsList(database: any, query: string): Promise<string[]> { const res: Array<{ [key: string]: string }> = JSON.parse( await database.run(query) ) .flat() .filter((el: any) => el != null); const justValues: Array<string> = res.map((item) => Object.values(item)[0] .replace(/\b\d+\b/g, "") .trim() ); return justValues; } let properNouns: string[] = await queryAsList(db, "SELECT Name FROM Artist"); properNouns = properNouns.concat( await queryAsList(db, "SELECT Title FROM Album") ); properNouns = properNouns.concat( await queryAsList(db, "SELECT Name FROM Genre") ); console.log(properNouns.length); /** 647 */ console.log(properNouns.slice(0, 5)); /** [ 'AC/DC', 'Accept', 'Aerosmith', 'Alanis Morissette', 'Alice In Chains' ] */ // Now we can embed and store all of our values in a vector database: const vectorDb = await MemoryVectorStore.fromTexts( properNouns, {}, new OpenAIEmbeddings() ); const retriever = vectorDb.asRetriever(15); // And put together a query construction chain that first retrieves values from the database and inserts them into the prompt: const system = `You are a SQLite expert. Given an input question, create a syntactically correct SQLite query to run. Unless otherwise specified, do not return more than {top_k} rows. Here is the relevant table info: {table_info} Here is a non-exhaustive list of possible feature values. If filtering on a feature value make sure to check its spelling against this list first: {proper_nouns}`; const prompt = ChatPromptTemplate.fromMessages([ ["system", system], ["human", "{input}"], ]); const llm = new ChatOpenAI({ model: "gpt-4", temperature: 0 }); const queryChain = await createSqlQueryChain({ llm, db, prompt, dialect: "sqlite", }); const retrieverChain = RunnableSequence.from([ (i: { question: string }) => i.question, retriever, (docs: Array<DocumentInterface>) => docs.map((doc) => doc.pageContent).join("\n"), ]); const chain = RunnablePassthrough.assign({ proper_nouns: retrieverChain, }).pipe(queryChain); // To try out our chain, let’s see what happens when we try filtering on “elenis moriset”, a misspelling of Alanis Morissette, without and with retrieval: // Without retrieval const query = await queryChain.invoke({ question: "What are all the genres of Elenis Moriset songs?", proper_nouns: "", }); console.log("query", query); /** query SELECT DISTINCT Genre.Name FROM Genre JOIN Track ON Genre.GenreId = Track.GenreId JOIN Album ON Track.AlbumId = Album.AlbumId JOIN Artist ON Album.ArtistId = Artist.ArtistId WHERE Artist.Name = 'Elenis Moriset' LIMIT 5; */ console.log("db query results", await db.run(query)); /** db query results [] */ // ------------- // You can see a LangSmith trace of the above chain here: // https://smith.langchain.com/public/b153cb9b-6fbb-43a8-b2ba-4c86715183b9/r // ------------- // With retrieval: const query2 = await chain.invoke({ question: "What are all the genres of Elenis Moriset songs?", }); console.log("query2", query2); /** query2 SELECT DISTINCT Genre.Name FROM Genre JOIN Track ON Genre.GenreId = Track.GenreId JOIN Album ON Track.AlbumId = Album.AlbumId JOIN Artist ON Album.ArtistId = Artist.ArtistId WHERE Artist.Name = 'Alanis Morissette'; */ console.log("db query results", await db.run(query2)); /** db query results [{"Name":"Rock"}] */ // ------------- // You can see a LangSmith trace of the above chain here: // https://smith.langchain.com/public/2f4f0e37-3b7f-47b5-837c-e2952489cac0/r // -------------
0
lc_public_repos/langchainjs/examples/src/use_cases/sql
lc_public_repos/langchainjs/examples/src/use_cases/sql/agents/high_cardinality_columns.ts
import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { AgentExecutor, createOpenAIToolsAgent } from "langchain/agents"; import { SqlToolkit } from "langchain/agents/toolkits/sql"; import { SqlDatabase } from "langchain/sql_db"; import { Tool } from "@langchain/core/tools"; import { createRetrieverTool } from "langchain/tools/retriever"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { DataSource } from "typeorm"; const datasource = new DataSource({ type: "sqlite", database: "../../../../Chinook.db", }); const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, }); async function queryAsList(query: string): Promise<string[]> { const res: Array<{ [key: string]: string }> = JSON.parse(await db.run(query)) .flat() .filter((el: any) => el != null); const justValues: Array<string> = res.map((item) => Object.values(item)[0] .replace(/\b\d+\b/g, "") .trim() ); return justValues; } const artists = await queryAsList("SELECT Name FROM Artist"); const albums = await queryAsList("SELECT Title FROM Album"); console.log(albums.slice(0, 5)); /** [ 'For Those About To Rock We Salute You', 'Balls to the Wall', 'Restless and Wild', 'Let There Be Rock', 'Big Ones' ] */ // Now we can proceed with creating the custom retriever tool and the final agent: const vectorDb = await MemoryVectorStore.fromTexts( artists, {}, new OpenAIEmbeddings() ); const retriever = vectorDb.asRetriever(15); const description = `Use to look up values to filter on. Input is an approximate spelling of the proper noun, output is valid proper nouns. Use the noun most similar to the search.`; const retrieverTool = createRetrieverTool(retriever, { description, name: "search_proper_nouns", }) as unknown as Tool; const system = `You are an agent designed to interact with a SQL database. Given an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database. Never query for all the columns from a specific table, only ask for the relevant columns given the question. You have access to tools for interacting with the database. Only use the given tools. Only use the information returned by the tools to construct your final answer. You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again. DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database. If you need to filter on a proper noun, you must ALWAYS first look up the filter value using the "search_proper_nouns" tool! You have access to the following tables: {table_names} If the question does not seem related to the database, just return "I don't know" as the answer.`; const prompt = ChatPromptTemplate.fromMessages([ ["system", system], ["human", "{input}"], new MessagesPlaceholder("agent_scratchpad"), ]); const llm = new ChatOpenAI({ model: "gpt-4", temperature: 0 }); const sqlToolKit = new SqlToolkit(db, llm); const newPrompt = await prompt.partial({ dialect: sqlToolKit.dialect, top_k: "10", table_names: db.allTables.map((t) => t.tableName).join(", "), }); const tools = [...sqlToolKit.getTools(), retrieverTool]; const runnableAgent = await createOpenAIToolsAgent({ llm, tools, prompt: newPrompt, }); const agentExecutor = new AgentExecutor({ agent: runnableAgent, tools, }); console.log( await agentExecutor.invoke({ input: "How many albums does alis in chain have?", }) ); /** { input: 'How many albums does alis in chain have?', output: 'Alice In Chains has 1 album.' } */
0
lc_public_repos/langchainjs/examples/src/use_cases/sql
lc_public_repos/langchainjs/examples/src/use_cases/sql/agents/example_selector.ts
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { SemanticSimilarityExampleSelector } from "@langchain/core/example_selectors"; import { FewShotPromptTemplate, PromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { SqlToolkit } from "langchain/agents/toolkits/sql"; import { SqlDatabase } from "langchain/sql_db"; import { DataSource } from "typeorm"; import { AgentExecutor, createOpenAIToolsAgent } from "langchain/agents"; import { examples } from "./examples.js"; const exampleSelector = await SemanticSimilarityExampleSelector.fromExamples( examples, new OpenAIEmbeddings(), HNSWLib, { k: 5, inputKeys: ["input"], } ); // Now we can create our FewShotPromptTemplate, which takes our example selector, an example prompt for formatting each example, and a string prefix and suffix to put before and after our formatted examples: const SYSTEM_PREFIX = `You are an agent designed to interact with a SQL database. Given an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database. Never query for all the columns from a specific table, only ask for the relevant columns given the question. You have access to tools for interacting with the database. Only use the given tools. Only use the information returned by the tools to construct your final answer. You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again. DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database. If the question does not seem related to the database, just return "I don't know" as the answer. Here are some examples of user inputs and their corresponding SQL queries:`; const fewShotPrompt = new FewShotPromptTemplate({ exampleSelector, examplePrompt: PromptTemplate.fromTemplate( "User input: {input}\nSQL query: {query}" ), inputVariables: ["input", "dialect", "top_k"], prefix: SYSTEM_PREFIX, suffix: "", }); // Since our underlying agent is an [OpenAI tools agent](https://js.langchain.com/docs/modules/agents/agent_types/openai_tools_agent), which uses // OpenAI function calling, our full prompt should be a chat prompt with a human message template and an agentScratchpad MessagesPlaceholder. // The few-shot prompt will be used for our system message: const fullPrompt = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(fewShotPrompt), ["human", "{input}"], new MessagesPlaceholder("agent_scratchpad"), ]); // And now we can create our agent with our custom prompt: const llm = new ChatOpenAI({ model: "gpt-4", temperature: 0 }); const datasource = new DataSource({ type: "sqlite", database: "../../../../Chinook.db", }); const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, }); const sqlToolKit = new SqlToolkit(db, llm); const tools = sqlToolKit.getTools(); const newPrompt = await fullPrompt.partial({ dialect: sqlToolKit.dialect, top_k: "10", }); const runnableAgent = await createOpenAIToolsAgent({ llm, tools, prompt: newPrompt, }); const agentExecutor = new AgentExecutor({ agent: runnableAgent, tools, }); console.log( await agentExecutor.invoke({ input: "How many artists are there?" }) ); /** { input: 'How many artists are there?', output: 'There are 275 artists.' } */
0
lc_public_repos/langchainjs/examples/src/use_cases/sql
lc_public_repos/langchainjs/examples/src/use_cases/sql/agents/index.ts
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { ChatOpenAI } from "@langchain/openai"; import { createOpenAIToolsAgent, AgentExecutor } from "langchain/agents"; import { SqlToolkit } from "langchain/agents/toolkits/sql"; import { AIMessage } from "@langchain/core/messages"; import { SqlDatabase } from "langchain/sql_db"; import { DataSource } from "typeorm"; const datasource = new DataSource({ type: "sqlite", database: "../../../../Chinook.db", }); const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource, }); const llm = new ChatOpenAI({ model: "gpt-3.5-turbo", temperature: 0 }); const sqlToolKit = new SqlToolkit(db, llm); const tools = sqlToolKit.getTools(); const SQL_PREFIX = `You are an agent designed to interact with a SQL database. Given an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results using the LIMIT clause. You can order the results by a relevant column to return the most interesting examples in the database. Never query for all the columns from a specific table, only ask for a the few relevant columns given the question. You have access to tools for interacting with the database. Only use the below tools. Only use the information returned by the below tools to construct your final answer. You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again. DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database. If the question does not seem related to the database, just return "I don't know" as the answer.`; const SQL_SUFFIX = `Begin! Question: {input} Thought: I should look at the tables in the database to see what I can query. {agent_scratchpad}`; const prompt = ChatPromptTemplate.fromMessages([ ["system", SQL_PREFIX], HumanMessagePromptTemplate.fromTemplate("{input}"), new AIMessage(SQL_SUFFIX.replace("{agent_scratchpad}", "")), new MessagesPlaceholder("agent_scratchpad"), ]); const newPrompt = await prompt.partial({ dialect: sqlToolKit.dialect, top_k: "10", }); const runnableAgent = await createOpenAIToolsAgent({ llm, tools, prompt: newPrompt, }); const agentExecutor = new AgentExecutor({ agent: runnableAgent, tools, }); console.log( await agentExecutor.invoke({ input: "List the total sales per country. Which country's customers spent the most?", }) ); /** { input: "List the total sales per country. Which country's customers spent the most?", output: 'The total sales per country are as follows:\n' + '\n' + '1. USA: $523.06\n' + '2. Canada: $303.96\n' + '3. France: $195.10\n' + '4. Brazil: $190.10\n' + '5. Germany: $156.48\n' + '6. United Kingdom: $112.86\n' + '7. Czech Republic: $90.24\n' + '8. Portugal: $77.24\n' + '9. India: $75.26\n' + '10. Chile: $46.62\n' + '\n' + "To find out which country's customers spent the most, we can see that the customers from the USA spent the most with a total sales of $523.06." } */ console.log( await agentExecutor.invoke({ input: "Describe the playlisttrack table", }) ); /** { input: 'Describe the playlisttrack table', output: 'The `PlaylistTrack` table has two columns: `PlaylistId` and `TrackId`. Both columns are of type INTEGER and are not nullable (NOT NULL).\n' + '\n' + 'Here are three sample rows from the `PlaylistTrack` table:\n' + '\n' + '| PlaylistId | TrackId |\n' + '|------------|---------|\n' + '| 1 | 3402 |\n' + '| 1 | 3389 |\n' + '| 1 | 3390 |\n' + '\n' + 'Please let me know if there is anything else I can help you with.' } */
0
lc_public_repos/langchainjs/examples/src/use_cases/sql
lc_public_repos/langchainjs/examples/src/use_cases/sql/agents/examples.ts
export const examples = [ { input: "List all artists.", query: "SELECT * FROM Artist;" }, { input: "Find all albums for the artist 'AC/DC'.", query: "SELECT * FROM Album WHERE ArtistId = (SELECT ArtistId FROM Artist WHERE Name = 'AC/DC');", }, { input: "List all tracks in the 'Rock' genre.", query: "SELECT * FROM Track WHERE GenreId = (SELECT GenreId FROM Genre WHERE Name = 'Rock');", }, { input: "Find the total duration of all tracks.", query: "SELECT SUM(Milliseconds) FROM Track;", }, { input: "List all customers from Canada.", query: "SELECT * FROM Customer WHERE Country = 'Canada';", }, { input: "How many tracks are there in the album with ID 5?", query: "SELECT COUNT(*) FROM Track WHERE AlbumId = 5;", }, { input: "Find the total number of invoices.", query: "SELECT COUNT(*) FROM Invoice;", }, { input: "List all tracks that are longer than 5 minutes.", query: "SELECT * FROM Track WHERE Milliseconds > 300000;", }, { input: "Who are the top 5 customers by total purchase?", query: "SELECT CustomerId, SUM(Total) AS TotalPurchase FROM Invoice GROUP BY CustomerId ORDER BY TotalPurchase DESC LIMIT 5;", }, { input: "Which albums are from the year 2000?", query: "SELECT * FROM Album WHERE strftime('%Y', ReleaseDate) = '2000';", }, { input: "How many employees are there", query: 'SELECT COUNT(*) FROM "Employee"', }, ];
0
lc_public_repos/langchainjs/examples/src/use_cases/sql
lc_public_repos/langchainjs/examples/src/use_cases/sql/prompting/dynamic_few_shot.ts
import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { SemanticSimilarityExampleSelector } from "@langchain/core/example_selectors"; import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { FewShotPromptTemplate, PromptTemplate } from "@langchain/core/prompts"; import { createSqlQueryChain } from "langchain/chains/sql_db"; import { examples } from "./examples.js"; import { db } from "../db.js"; const exampleSelector = await SemanticSimilarityExampleSelector.fromExamples< typeof MemoryVectorStore >(examples, new OpenAIEmbeddings(), MemoryVectorStore, { k: 5, inputKeys: ["input"], }); console.log( await exampleSelector.selectExamples({ input: "how many artists are there?" }) ); /** [ { input: 'List all artists.', query: 'SELECT * FROM Artist;' }, { input: 'How many employees are there', query: 'SELECT COUNT(*) FROM "Employee"' }, { input: 'How many tracks are there in the album with ID 5?', query: 'SELECT COUNT(*) FROM Track WHERE AlbumId = 5;' }, { input: 'Which albums are from the year 2000?', query: "SELECT * FROM Album WHERE strftime('%Y', ReleaseDate) = '2000';" }, { input: "List all tracks in the 'Rock' genre.", query: "SELECT * FROM Track WHERE GenreId = (SELECT GenreId FROM Genre WHERE Name = 'Rock');" } ] */ // To use it, we can pass the ExampleSelector directly in to our FewShotPromptTemplate: const examplePrompt = PromptTemplate.fromTemplate( `User input: {input}\nSQL Query: {query}` ); const prompt = new FewShotPromptTemplate({ exampleSelector, examplePrompt, prefix: `You are a SQLite expert. Given an input question, create a syntactically correct SQLite query to run. Unless otherwise specified, do not return more than {top_k} rows. Here is the relevant table info: {table_info} Below are a number of examples of questions and their corresponding SQL queries.`, suffix: "User input: {input}\nSQL query: ", inputVariables: ["input", "top_k", "table_info"], }); console.log( await prompt.format({ input: "How many artists are there?", top_k: "3", table_info: "foo", }) ); /** You are a SQLite expert. Given an input question, create a syntactically correct SQLite query to run. Unless otherwise specified, do not return more than 3 rows. Here is the relevant table info: foo Below are a number of examples of questions and their corresponding SQL queries. User input: List all artists. SQL Query: SELECT * FROM Artist; User input: How many employees are there SQL Query: SELECT COUNT(*) FROM "Employee" User input: How many tracks are there in the album with ID 5? SQL Query: SELECT COUNT(*) FROM Track WHERE AlbumId = 5; User input: Which albums are from the year 2000? SQL Query: SELECT * FROM Album WHERE strftime('%Y', ReleaseDate) = '2000'; User input: List all tracks in the 'Rock' genre. SQL Query: SELECT * FROM Track WHERE GenreId = (SELECT GenreId FROM Genre WHERE Name = 'Rock'); User input: How many artists are there? SQL query: */ // Now we can use it in a chain: const llm = new ChatOpenAI({ temperature: 0, }); const chain = await createSqlQueryChain({ db, llm, prompt, dialect: "sqlite", }); console.log(await chain.invoke({ question: "how many artists are there?" })); /** SELECT COUNT(*) FROM Artist; */
0
lc_public_repos/langchainjs/examples/src/use_cases/sql
lc_public_repos/langchainjs/examples/src/use_cases/sql/prompting/table_definitions.ts
import { db } from "../db.js"; const context = await db.getTableInfo(); console.log(context); /** CREATE TABLE Album ( AlbumId INTEGER NOT NULL, Title NVARCHAR(160) NOT NULL, ArtistId INTEGER NOT NULL ) SELECT * FROM "Album" LIMIT 3; AlbumId Title ArtistId 1 For Those About To Rock We Salute You 1 2 Balls to the Wall 2 3 Restless and Wild 2 CREATE TABLE Artist ( ArtistId INTEGER NOT NULL, Name NVARCHAR(120) ) SELECT * FROM "Artist" LIMIT 3; ArtistId Name 1 AC/DC 2 Accept 3 Aerosmith CREATE TABLE Customer ( CustomerId INTEGER NOT NULL, FirstName NVARCHAR(40) NOT NULL, LastName NVARCHAR(20) NOT NULL, Company NVARCHAR(80), Address NVARCHAR(70), City NVARCHAR(40), State NVARCHAR(40), Country NVARCHAR(40), PostalCode NVARCHAR(10), Phone NVARCHAR(24), Fax NVARCHAR(24), Email NVARCHAR(60) NOT NULL, SupportRepId INTEGER ) SELECT * FROM "Customer" LIMIT 3; CustomerId FirstName LastName Company Address City State Country PostalCode Phone Fax Email SupportRepId 1 Luís Gonçalves Embraer - Empresa Brasileira de Aeronáutica S.A. Av. Brigadeiro Faria Lima, 2170 São José dos Campos SP Brazil 12227-000 +55 (12) 3923-5555 +55 (12) 3923-5566 luisg@embraer.com.br 3 2 Leonie Köhler null Theodor-Heuss-Straße 34 Stuttgart null Germany 70174 +49 0711 2842222 null leonekohler@surfeu.de 5 3 François Tremblay null 1498 rue Bélanger Montréal QC Canada H2G 1A7 +1 (514) 721-4711 null ftremblay@gmail.com 3 CREATE TABLE Employee ( EmployeeId INTEGER NOT NULL, LastName NVARCHAR(20) NOT NULL, FirstName NVARCHAR(20) NOT NULL, Title NVARCHAR(30), ReportsTo INTEGER, BirthDate DATETIME, HireDate DATETIME, Address NVARCHAR(70), City NVARCHAR(40), State NVARCHAR(40), Country NVARCHAR(40), PostalCode NVARCHAR(10), Phone NVARCHAR(24), Fax NVARCHAR(24), Email NVARCHAR(60) ) SELECT * FROM "Employee" LIMIT 3; EmployeeId LastName FirstName Title ReportsTo BirthDate HireDate Address City State Country PostalCode Phone Fax Email 1 Adams Andrew General Manager null 1962-02-18 00:00:00 2002-08-14 00:00:00 11120 Jasper Ave NW Edmonton AB Canada T5K 2N1 +1 (780) 428-9482 +1 (780) 428-3457 andrew@chinookcorp.com 2 Edwards Nancy Sales Manager 1 1958-12-08 00:00:00 2002-05-01 00:00:00 825 8 Ave SW Calgary AB Canada T2P 2T3 +1 (403) 262-3443 +1 (403) 262-3322 nancy@chinookcorp.com 3 Peacock Jane Sales Support Agent 2 1973-08-29 00:00:00 2002-04-01 00:00:00 1111 6 Ave SW Calgary AB Canada T2P 5M5 +1 (403) 262-3443 +1 (403) 262-6712 jane@chinookcorp.com CREATE TABLE Genre ( GenreId INTEGER NOT NULL, Name NVARCHAR(120) ) SELECT * FROM "Genre" LIMIT 3; GenreId Name 1 Rock 2 Jazz 3 Metal CREATE TABLE Invoice ( InvoiceId INTEGER NOT NULL, CustomerId INTEGER NOT NULL, InvoiceDate DATETIME NOT NULL, BillingAddress NVARCHAR(70), BillingCity NVARCHAR(40), BillingState NVARCHAR(40), BillingCountry NVARCHAR(40), BillingPostalCode NVARCHAR(10), Total NUMERIC(10,2) NOT NULL ) SELECT * FROM "Invoice" LIMIT 3; InvoiceId CustomerId InvoiceDate BillingAddress BillingCity BillingState BillingCountry BillingPostalCode Total 1 2 2009-01-01 00:00:00 Theodor-Heuss-Straße 34 Stuttgart null Germany 70174 1.98 2 4 2009-01-02 00:00:00 Ullevålsveien 14 Oslo null Norway 0171 3.96 3 8 2009-01-03 00:00:00 Grétrystraat 63 Brussels null Belgium 1000 5.94 CREATE TABLE InvoiceLine ( InvoiceLineId INTEGER NOT NULL, InvoiceId INTEGER NOT NULL, TrackId INTEGER NOT NULL, UnitPrice NUMERIC(10,2) NOT NULL, Quantity INTEGER NOT NULL ) SELECT * FROM "InvoiceLine" LIMIT 3; InvoiceLineId InvoiceId TrackId UnitPrice Quantity 1 1 2 0.99 1 2 1 4 0.99 1 3 2 6 0.99 1 CREATE TABLE MediaType ( MediaTypeId INTEGER NOT NULL, Name NVARCHAR(120) ) SELECT * FROM "MediaType" LIMIT 3; MediaTypeId Name 1 MPEG audio file 2 Protected AAC audio file 3 Protected MPEG-4 video file CREATE TABLE Playlist ( PlaylistId INTEGER NOT NULL, Name NVARCHAR(120) ) SELECT * FROM "Playlist" LIMIT 3; PlaylistId Name 1 Music 2 Movies 3 TV Shows CREATE TABLE PlaylistTrack ( PlaylistId INTEGER NOT NULL, TrackId INTEGER NOT NULL ) SELECT * FROM "PlaylistTrack" LIMIT 3; PlaylistId TrackId 1 3402 1 3389 1 3390 CREATE TABLE Track ( TrackId INTEGER NOT NULL, Name NVARCHAR(200) NOT NULL, AlbumId INTEGER, MediaTypeId INTEGER NOT NULL, GenreId INTEGER, Composer NVARCHAR(220), Milliseconds INTEGER NOT NULL, Bytes INTEGER, UnitPrice NUMERIC(10,2) NOT NULL ) SELECT * FROM "Track" LIMIT 3; TrackId Name AlbumId MediaTypeId GenreId Composer Milliseconds Bytes UnitPrice 1 For Those About To Rock (We Salute You) 1 1 1 Angus Young, Malcolm Young, Brian Johnson 343719 11170334 0.99 2 Balls to the Wall 2 2 1 U. Dirkschneider, W. Hoffmann, H. Frank, P. Baltes, S. Kaufmann, G. Hoffmann 342562 5510424 0.99 3 Fast As a Shark 3 2 1 F. Baltes, S. Kaufman, U. Dirkscneider & W. Hoffman 230619 3990994 0.99 */
0
lc_public_repos/langchainjs/examples/src/use_cases/sql
lc_public_repos/langchainjs/examples/src/use_cases/sql/prompting/few_shot.ts
import { FewShotPromptTemplate, PromptTemplate } from "@langchain/core/prompts"; import { examples } from "./examples.js"; const examplePrompt = PromptTemplate.fromTemplate( `User input: {input}\nSQL Query: {query}` ); const prompt = new FewShotPromptTemplate({ examples: examples.slice(0, 5), examplePrompt, prefix: `You are a SQLite expert. Given an input question, create a syntactically correct SQLite query to run. Unless otherwise specified, do not return more than {top_k} rows. Here is the relevant table info: {table_info} Below are a number of examples of questions and their corresponding SQL queries.`, suffix: "User input: {input}\nSQL query: ", inputVariables: ["input", "top_k", "table_info"], }); console.log( await prompt.format({ input: "How many artists are there?", top_k: "3", table_info: "foo", }) ); /** You are a SQLite expert. Given an input question, create a syntactically correct SQLite query to run. Unless otherwise specified, do not return more than 3 rows. Here is the relevant table info: foo Below are a number of examples of questions and their corresponding SQL queries. User input: List all artists. SQL Query: SELECT * FROM Artist; User input: Find all albums for the artist 'AC/DC'. SQL Query: SELECT * FROM Album WHERE ArtistId = (SELECT ArtistId FROM Artist WHERE Name = 'AC/DC'); User input: List all tracks in the 'Rock' genre. SQL Query: SELECT * FROM Track WHERE GenreId = (SELECT GenreId FROM Genre WHERE Name = 'Rock'); User input: Find the total duration of all tracks. SQL Query: SELECT SUM(Milliseconds) FROM Track; User input: List all customers from Canada. SQL Query: SELECT * FROM Customer WHERE Country = 'Canada'; User input: How many artists are there? SQL query: */
0
lc_public_repos/langchainjs/examples/src/use_cases/sql
lc_public_repos/langchainjs/examples/src/use_cases/sql/prompting/examples.ts
export const examples = [ { input: "List all artists.", query: "SELECT * FROM Artist;" }, { input: "Find all albums for the artist 'AC/DC'.", query: "SELECT * FROM Album WHERE ArtistId = (SELECT ArtistId FROM Artist WHERE Name = 'AC/DC');", }, { input: "List all tracks in the 'Rock' genre.", query: "SELECT * FROM Track WHERE GenreId = (SELECT GenreId FROM Genre WHERE Name = 'Rock');", }, { input: "Find the total duration of all tracks.", query: "SELECT SUM(Milliseconds) FROM Track;", }, { input: "List all customers from Canada.", query: "SELECT * FROM Customer WHERE Country = 'Canada';", }, { input: "How many tracks are there in the album with ID 5?", query: "SELECT COUNT(*) FROM Track WHERE AlbumId = 5;", }, { input: "Find the total number of invoices.", query: "SELECT COUNT(*) FROM Invoice;", }, { input: "List all tracks that are longer than 5 minutes.", query: "SELECT * FROM Track WHERE Milliseconds > 300000;", }, { input: "Who are the top 5 customers by total purchase?", query: "SELECT CustomerId, SUM(Total) AS TotalPurchase FROM Invoice GROUP BY CustomerId ORDER BY TotalPurchase DESC LIMIT 5;", }, { input: "Which albums are from the year 2000?", query: "SELECT * FROM Album WHERE strftime('%Y', ReleaseDate) = '2000';", }, { input: "How many employees are there", query: 'SELECT COUNT(*) FROM "Employee"', }, ];
0
lc_public_repos/langchainjs/examples/src/use_cases/sql
lc_public_repos/langchainjs/examples/src/use_cases/sql/prompting/list_dialects.ts
import { SQL_PROMPTS_MAP } from "langchain/chains/sql_db"; console.log({ SQL_PROMPTS_MAP: Object.keys(SQL_PROMPTS_MAP) }); /** { SQL_PROMPTS_MAP: [ 'oracle', 'postgres', 'sqlite', 'mysql', 'mssql', 'sap hana' ] } */ // For example, using our current DB we can see that we’ll get a SQLite-specific prompt: console.log({ sqlite: SQL_PROMPTS_MAP.sqlite, }); /** { sqlite: PromptTemplate { inputVariables: [ 'dialect', 'table_info', 'input', 'top_k' ], template: 'You are a SQLite expert. Given an input question, first create a syntactically correct SQLite query to run, then look at the results of the query and return the answer to the input question.\n' + 'Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per SQLite. You can order the results to return the most informative data in the database.\n' + 'Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.\n' + 'Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.\n' + '\n' + 'Use the following format:\n' + '\n' + 'Question: "Question here"\n' + 'SQLQuery: "SQL Query to run"\n' + 'SQLResult: "Result of the SQLQuery"\n' + 'Answer: "Final answer here"\n' + '\n' + 'Only use the following tables:\n' + '{table_info}\n' + '\n' + 'Question: {input}', } } */
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/advanced/conversational_qa.ts
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import { BufferMemory } from "langchain/memory"; import * as fs from "fs"; import { LLMChain } from "langchain/chains"; import { formatDocumentsAsString } from "langchain/util/document"; import { RunnableBranch, RunnableSequence } from "@langchain/core/runnables"; import { PromptTemplate } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; export const run = async () => { /* Initialize the LLM to use to answer the question */ const model = new ChatOpenAI({}); /* Load in the file we want to do question answering over */ const text = fs.readFileSync("state_of_the_union.txt", "utf8"); /* Split the text into chunks */ const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 }); const docs = await textSplitter.createDocuments([text]); /* Create the vectorstore */ const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings()); const retriever = vectorStore.asRetriever(); const serializeChatHistory = (chatHistory: string | Array<string>) => { if (Array.isArray(chatHistory)) { return chatHistory.join("\n"); } return chatHistory; }; const memory = new BufferMemory({ memoryKey: "chatHistory", }); /** * Create a prompt template for generating an answer based on context and * a question. * * Chat history will be an empty string if it's the first question. * * inputVariables: ["chatHistory", "context", "question"] */ const questionPrompt = PromptTemplate.fromTemplate( `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. ---------------- CHAT HISTORY: {chatHistory} ---------------- CONTEXT: {context} ---------------- QUESTION: {question} ---------------- Helpful Answer:` ); /** * Creates a prompt template for __generating a question__ to then ask an LLM * based on previous chat history, context and the question. * * inputVariables: ["chatHistory", "question"] */ const questionGeneratorTemplate = PromptTemplate.fromTemplate(`Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. ---------------- CHAT HISTORY: {chatHistory} ---------------- FOLLOWUP QUESTION: {question} ---------------- Standalone question:`); const handleProcessQuery = async (input: { question: string; context: string; chatHistory?: string | Array<string>; }) => { const chain = new LLMChain({ llm: model, prompt: questionPrompt, outputParser: new StringOutputParser(), }); const { text } = await chain.invoke({ ...input, chatHistory: serializeChatHistory(input.chatHistory ?? ""), }); await memory.saveContext( { human: input.question, }, { ai: text, } ); return text; }; const answerQuestionChain = RunnableSequence.from([ { question: (input: { question: string; chatHistory?: string | Array<string>; }) => input.question, }, { question: (previousStepResult: { question: string; chatHistory?: string | Array<string>; }) => previousStepResult.question, chatHistory: (previousStepResult: { question: string; chatHistory?: string | Array<string>; }) => serializeChatHistory(previousStepResult.chatHistory ?? ""), context: async (previousStepResult: { question: string; chatHistory?: string | Array<string>; }) => { // Fetch relevant docs and serialize to a string. const relevantDocs = await retriever.invoke( previousStepResult.question ); const serialized = formatDocumentsAsString(relevantDocs); return serialized; }, }, handleProcessQuery, ]); const generateQuestionChain = RunnableSequence.from([ { question: (input: { question: string; chatHistory: string | Array<string>; }) => input.question, chatHistory: async () => { const memoryResult = await memory.loadMemoryVariables({}); return serializeChatHistory(memoryResult.chatHistory ?? ""); }, }, questionGeneratorTemplate, model, // Take the result of the above model call, and pass it through to the // next RunnableSequence chain which will answer the question { question: (previousStepResult: { text: string }) => previousStepResult.text, }, answerQuestionChain, ]); const branch = RunnableBranch.from([ [ async () => { const memoryResult = await memory.loadMemoryVariables({}); const isChatHistoryPresent = !memoryResult.chatHistory.length; return isChatHistoryPresent; }, answerQuestionChain, ], [ async () => { const memoryResult = await memory.loadMemoryVariables({}); const isChatHistoryPresent = !!memoryResult.chatHistory && memoryResult.chatHistory.length; return isChatHistoryPresent; }, generateQuestionChain, ], answerQuestionChain, ]); const fullChain = RunnableSequence.from([ { question: (input: { question: string }) => input.question, }, branch, ]); const resultOne = await fullChain.invoke({ question: "What did the president say about Justice Breyer?", }); console.log({ resultOne }); /** * { * resultOne: 'The president thanked Justice Breyer for his service and described him as an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court.' * } */ const resultTwo = await fullChain.invoke({ question: "Was it nice?", }); console.log({ resultTwo }); /** * { * resultTwo: "Yes, the president's description of Justice Breyer was positive." * } */ };
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/advanced/violation_of_expectations_chain.ts
import { ViolationOfExpectationsChain } from "langchain/experimental/chains/violation_of_expectations"; import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { AIMessage, HumanMessage } from "@langchain/core/messages"; // Short GPT generated conversation between a human and an AI. const dummyMessages = [ new HumanMessage( "I've been thinking about the importance of time with myself to discover my voice. I feel like 1-2 hours is never enough." ), new AIMessage( "The concept of 'adequate time' varies. Have you tried different formats of introspection, such as morning pages or long-form writing, to see if they make the process more efficient?" ), new HumanMessage( "I have tried journaling but never consistently. Sometimes it feels like writing doesn't capture everything." ), new AIMessage( "Writing has its limits. What about other mediums like digital art, or interactive journal apps with dynamic prompts that dig deeper? Even coding a personal project can be a form of self-discovery." ), new HumanMessage( "That's an interesting idea. I've never thought about coding as a form of self-discovery." ), new AIMessage( "Since you're comfortable with code, consider building a tool to log and analyze your emotional state, thoughts, or personal growth metrics. It merges skill with introspection, makes the data quantifiable." ), new HumanMessage( "The idea of quantifying emotions and personal growth is fascinating. But I wonder how much it can really capture the 'dark zone' within us." ), new AIMessage( "Good point. The 'dark zone' isn't fully quantifiable. But a tool could serve as a scaffold to explore those areas. It gives a structured approach to an unstructured problem." ), new HumanMessage( "You might be onto something. A structured approach could help unearth patterns or triggers I hadn't noticed." ), new AIMessage( "Exactly. It's about creating a framework to understand what can't easily be understood. Then you can allocate those 5+ hours more effectively, targeting areas that your data flags." ), ]; // Instantiate with an empty string to start, since we have no data yet. const vectorStore = await HNSWLib.fromTexts( [" "], [{ id: 1 }], new OpenAIEmbeddings() ); const retriever = vectorStore.asRetriever(); // Instantiate the LLM, const llm = new ChatOpenAI({ model: "gpt-4", }); // And the chain. const voeChain = ViolationOfExpectationsChain.fromLLM(llm, retriever); // Requires an input key of "chat_history" with an array of messages. const result = await voeChain.invoke({ chat_history: dummyMessages, }); console.log({ result, }); /** * Output: { result: [ 'The user has experience with coding and has tried journaling before, but struggles with maintaining consistency and fully expressing their thoughts and feelings through writing.', 'The user shows a thoughtful approach towards new concepts and is willing to engage with and contemplate novel ideas before making a decision. They also consider time effectiveness as a crucial factor in their decision-making process.', 'The user is curious and open-minded about new concepts, but also values introspection and self-discovery in understanding emotions and personal growth.', 'The user is open to new ideas and strategies, specifically those that involve a structured approach to identifying patterns or triggers.', 'The user may not always respond or engage with prompts, indicating a need for varied and adaptable communication strategies.' ] } */
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/human_in_the_loop/helpers.ts
import { ChatOpenAI } from "@langchain/openai"; import { StructuredTool } from "@langchain/core/tools"; import { z } from "zod"; import { Runnable, RunnableLambda, RunnablePassthrough, } from "@langchain/core/runnables"; class CountEmails extends StructuredTool { schema = z.object({ lastNDays: z.number(), }); name = "count_emails"; description = "Count the number of emails sent in the last N days."; async _call(input: z.infer<typeof this.schema>): Promise<string> { return (input.lastNDays * 2).toString(); } } class SendEmail extends StructuredTool { schema = z.object({ message: z.string(), recipient: z.string(), }); name = "send_email"; description = "Send an email."; async _call(input: z.infer<typeof this.schema>): Promise<string> { return `Successfully sent email to ${input.recipient}`; } } const tools = [new CountEmails(), new SendEmail()]; export const model = new ChatOpenAI({ model: "gpt-3.5-turbo", temperature: 0, }).bind({ tools, }); const callTool = (toolInvocation: Record<string, any>): Runnable => { const toolMap: Record<string, StructuredTool> = tools.reduce((acc, tool) => { acc[tool.name] = tool; return acc; }, {} as Record<string, StructuredTool>); const tool = toolMap[toolInvocation.type]; return RunnablePassthrough.assign({ output: (input, config) => tool.invoke(input.args, config), }); }; export const callToolList = new RunnableLambda({ func: callTool }).map();
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/human_in_the_loop/accept-feedback.ts
import * as readline from "readline"; import { JsonOutputToolsParser } from "@langchain/core/output_parsers/openai_tools"; import { callToolList, model } from "./helpers.js"; // Use readline to ask the user for approval function askQuestion(question: string): Promise<string> { const rl = readline.createInterface({ input: process.stdin, output: process.stdout, }); return new Promise((resolve) => { rl.question(question, (answer) => { rl.close(); resolve(answer); }); }); } async function humanApproval(toolInvocations: any[]): Promise<any[]> { const toolStrs = toolInvocations .map((toolCall) => JSON.stringify(toolCall, null, 2)) .join("\n\n"); const msg = `Do you approve of the following tool invocations\n\n${toolStrs} Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\n`; // Ask the user for approval const resp = await askQuestion(msg); if (!["yes", "y"].includes(resp.toLowerCase())) { throw new Error(`Tool invocations not approved:\n\n${toolStrs}`); } return toolInvocations; } const chain = model .pipe(new JsonOutputToolsParser()) .pipe(humanApproval) .pipe(callToolList); const response = await chain.invoke( "how many emails did i get in the last 5 days?" ); console.log(response); /** Do you approve of the following tool invocations { "type": "count_emails", "args": { "lastNDays": 5 } } Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. y [ { type: 'count_emails', args: { lastNDays: 5 }, output: '10' } ] */ const response2 = await chain.invoke( "Send sally@gmail.com an email saying 'What's up homie'" ); console.log(response2); /** Do you approve of the following tool invocations { "type": "send_email", "args": { "message": "What's up homie", "recipient": "sally@gmail.com" } } Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. y [ { type: 'send_email', args: { message: "What's up homie", recipient: 'sally@gmail.com' }, output: 'Successfully sent email to sally@gmail.com' } ] */
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/youtube/chat_with_podcast.ts
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { SearchApiLoader } from "@langchain/community/document_loaders/web/searchapi"; import { TokenTextSplitter } from "@langchain/textsplitters"; import { FaissStore } from "@langchain/community/vectorstores/faiss"; import { createRetrievalChain } from "langchain/chains/retrieval"; import { createStuffDocumentsChain } from "langchain/chains/combine_documents"; import { ChatPromptTemplate } from "@langchain/core/prompts"; const loader = new SearchApiLoader({ engine: "youtube_transcripts", video_id: "WTOm65IZneg", }); const docs = await loader.load(); const textSplitterQA = new TokenTextSplitter({ chunkSize: 1000, chunkOverlap: 200, }); const docsQA = await textSplitterQA.splitDocuments(docs); const llm = new ChatOpenAI({ model: "gpt-3.5-turbo-1106", temperature: 0.2, }); const embeddings = new OpenAIEmbeddings(); const vectorstore = await FaissStore.fromDocuments(docsQA, embeddings); const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([ [ "system", "Answer the user's questions based on the below context:\n\n{context}", ], ["human", "{input}"], ]); const combineDocsChain = await createStuffDocumentsChain({ llm, prompt: questionAnsweringPrompt, }); const qaChain = await createRetrievalChain({ retriever: vectorstore.asRetriever(), combineDocsChain, }); const question = "How many people did he want to help?"; const result = await qaChain.invoke({ input: question }); console.log(result.answer); /* MrBeast wanted to help 1,000 deaf people hear again. He and his team were able to help over 40 people at the time of the video, and they were on their way to reaching their goal of 1,000. */
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/youtube/podcast_summary.ts
import { loadSummarizationChain } from "langchain/chains"; import { SearchApiLoader } from "@langchain/community/document_loaders/web/searchapi"; import { TokenTextSplitter } from "@langchain/textsplitters"; import { PromptTemplate } from "@langchain/core/prompts"; import { ChatAnthropic } from "@langchain/anthropic"; const loader = new SearchApiLoader({ engine: "youtube_transcripts", video_id: "WTOm65IZneg", }); const docs = await loader.load(); const splitter = new TokenTextSplitter({ chunkSize: 10000, chunkOverlap: 250, }); const docsSummary = await splitter.splitDocuments(docs); const llmSummary = new ChatAnthropic({ model: "claude-3-sonnet-20240229", temperature: 0.3, }); const summaryTemplate = ` You are an expert in summarizing YouTube videos. Your goal is to create a summary of a podcast. Below you find the transcript of a podcast: -------- {text} -------- The transcript of the podcast will also be used as the basis for a question and answer bot. Provide some examples questions and answers that could be asked about the podcast. Make these questions very specific. Total output will be a summary of the video and a list of example questions the user could ask of the video. SUMMARY AND QUESTIONS: `; const SUMMARY_PROMPT = PromptTemplate.fromTemplate(summaryTemplate); const summaryRefineTemplate = ` You are an expert in summarizing YouTube videos. Your goal is to create a summary of a podcast. We have provided an existing summary up to a certain point: {existing_answer} Below you find the transcript of a podcast: -------- {text} -------- Given the new context, refine the summary and example questions. The transcript of the podcast will also be used as the basis for a question and answer bot. Provide some examples questions and answers that could be asked about the podcast. Make these questions very specific. If the context isn't useful, return the original summary and questions. Total output will be a summary of the video and a list of example questions the user could ask of the video. SUMMARY AND QUESTIONS: `; const SUMMARY_REFINE_PROMPT = PromptTemplate.fromTemplate( summaryRefineTemplate ); const summarizeChain = loadSummarizationChain(llmSummary, { type: "refine", verbose: true, questionPrompt: SUMMARY_PROMPT, refinePrompt: SUMMARY_REFINE_PROMPT, }); const summary = await summarizeChain.run(docsSummary); console.log(summary); /* Here is a summary of the key points from the podcast transcript: - Jimmy helps provide hearing aids and cochlear implants to deaf and hard-of-hearing people who can't afford them. He helps over 1,000 people hear again. - Jimmy surprises recipients with $10,000 cash gifts in addition to the hearing aids. He also gifts things like jet skis, basketball game tickets, and trips to concerts. - Jimmy travels internationally to provide hearing aids, visiting places like Mexico, Guatemala, Brazil, South Africa, Malawi, and Indonesia. - Jimmy donates $100,000 to organizations around the world that teach sign language. - The recipients are very emotional and grateful to be able to hear their loved ones again. Here are some example questions and answers about the podcast: Q: How many people did Jimmy help regain their hearing? A: Jimmy helped over 1,000 people regain their hearing. Q: What types of hearing devices did Jimmy provide to the recipients? A: Jimmy provided cutting-edge hearing aids and cochlear implants. Q: In addition to the hearing devices, what surprise gifts did Jimmy give some recipients? A: In addition to hearing devices, Jimmy surprised some recipients with $10,000 cash gifts, jet skis, basketball game tickets, and concert tickets. Q: What countries did Jimmy travel to in order to help people? A: Jimmy traveled to places like Mexico, Guatemala, Brazil, South Africa, Malawi, and Indonesia. Q: How much money did Jimmy donate to organizations that teach sign language? A: Jimmy donated $100,000 to sign language organizations around the world. Q: How did the recipients react when they were able to hear again? A: The recipients were very emotional and grateful, with many crying tears of joy at being able to hear their loved ones again. */
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/media/audio.ts
import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { ChatVertexAI } from "@langchain/google-vertexai"; import { HumanMessage } from "@langchain/core/messages"; import fs from "fs"; import { z } from "zod"; function fileToBase64(filePath: string): string { return fs.readFileSync(filePath, "base64"); } const mozartMp3File = "Mozart_Requiem_D_minor.mp3"; const mozartInBase64 = fileToBase64(mozartMp3File); const tool = z.object({ instruments: z .array(z.string()) .describe("A list of instruments found in the audio."), }); const model = new ChatVertexAI({ model: "gemini-1.5-pro-preview-0409", temperature: 0, }).withStructuredOutput(tool, { name: "instruments_list_tool", }); const prompt = ChatPromptTemplate.fromMessages([ new MessagesPlaceholder("audio"), ]); const chain = prompt.pipe(model); const response = await chain.invoke({ audio: new HumanMessage({ content: [ { type: "media", mimeType: "audio/mp3", data: mozartInBase64, }, { type: "text", text: `The following audio is a song by Mozart. Respond with a list of instruments you hear in the song. Rules: Use the "instruments_list_tool" to return a list of tasks.`, }, ], }), }); console.log("response", response); /* response { instruments: [ 'violin', 'viola', 'cello', 'double bass', 'flute', 'oboe', 'clarinet', 'bassoon', 'horn', 'trumpet', 'timpani' ] } */
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/media/video.ts
import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { ChatVertexAI } from "@langchain/google-vertexai"; import { HumanMessage } from "@langchain/core/messages"; import fs from "fs"; import { z } from "zod"; function fileToBase64(filePath: string): string { return fs.readFileSync(filePath, "base64"); } const lanceLsEvalsVideo = "lance_ls_eval_video.mp4"; const lanceInBase64 = fileToBase64(lanceLsEvalsVideo); const tool = z.object({ tasks: z.array(z.string()).describe("A list of tasks."), }); const model = new ChatVertexAI({ model: "gemini-1.5-pro-preview-0409", temperature: 0, }).withStructuredOutput(tool, { name: "tasks_list_tool", }); const prompt = ChatPromptTemplate.fromMessages([ new MessagesPlaceholder("video"), ]); const chain = prompt.pipe(model); const response = await chain.invoke({ video: new HumanMessage({ content: [ { type: "media", mimeType: "video/mp4", data: lanceInBase64, }, { type: "text", text: `The following video is an overview of how to build datasets in LangSmith. Given the following video, come up with three tasks I should do to further improve my knowledge around using datasets in LangSmith. Only reference features that were outlined or described in the video. Rules: Use the "tasks_list_tool" to return a list of tasks. Your tasks should be tailored for an engineer who is looking to improve their knowledge around using datasets and evaluations, specifically with LangSmith.`, }, ], }), }); console.log("response", response); /* response { tasks: [ 'Explore the LangSmith SDK documentation for in-depth understanding of dataset creation, manipulation, and versioning functionalities.', 'Experiment with different dataset types like Key-Value, Chat, and LLM to understand their structures and use cases.', 'Try uploading a CSV file containing question-answer pairs to LangSmith and create a new dataset from it.' ] } */
0
lc_public_repos/langchainjs/examples/src/use_cases/query_analysis
lc_public_repos/langchainjs/examples/src/use_cases/query_analysis/quickstart/docs.ts
import { DocumentInterface } from "@langchain/core/documents"; import { YoutubeLoader } from "@langchain/community/document_loaders/web/youtube"; import { getYear } from "date-fns"; const urls = [ "https://www.youtube.com/watch?v=HAn9vnJy6S4", // Jan 31, 2024 "https://www.youtube.com/watch?v=dA1cHGACXCo", // Jan 26, 2024 "https://www.youtube.com/watch?v=ZcEMLz27sL4", // Jan 24, 2024 "https://www.youtube.com/watch?v=hvAPnpSfSGo", // Jan 23, 2024 "https://www.youtube.com/watch?v=EhlPDL4QrWY", // Jan 16, 2024 "https://www.youtube.com/watch?v=mmBo8nlu2j0", // Jan 5, 2024 "https://www.youtube.com/watch?v=rQdibOsL1ps", // Jan 2, 2024 "https://www.youtube.com/watch?v=28lC4fqukoc", // Dec 20, 2023 "https://www.youtube.com/watch?v=es-9MgxB-uc", // Dec 19, 2023 "https://www.youtube.com/watch?v=wLRHwKuKvOE", // Nov 27, 2023 "https://www.youtube.com/watch?v=ObIltMaRJvY", // Nov 22, 2023 "https://www.youtube.com/watch?v=DjuXACWYkkU", // Nov 16, 2023 "https://www.youtube.com/watch?v=o7C9ld6Ln-M", // Nov 2, 2023 ]; const dates = [ new Date("Jan 31, 2024"), new Date("Jan 26, 2024"), new Date("Jan 24, 2024"), new Date("Jan 23, 2024"), new Date("Jan 16, 2024"), new Date("Jan 5, 2024"), new Date("Jan 2, 2024"), new Date("Dec 20, 2023"), new Date("Dec 19, 2023"), new Date("Nov 27, 2023"), new Date("Nov 22, 2023"), new Date("Nov 16, 2023"), new Date("Nov 2, 2023"), ]; const getDocs = async () => { const docs: Array<DocumentInterface> = []; for await (const url of urls) { const doc = await YoutubeLoader.createFromUrl(url, { language: "en", addVideoInfo: true, }).load(); docs.push(...doc); } docs.forEach((doc, idx) => { // eslint-disable-next-line no-param-reassign doc.metadata.publish_year = getYear(dates[idx]); // eslint-disable-next-line no-param-reassign doc.metadata.publish_date = dates[idx]; }); return docs; }; export { getDocs };
0
lc_public_repos/langchainjs/examples/src/use_cases/query_analysis
lc_public_repos/langchainjs/examples/src/use_cases/query_analysis/quickstart/load_yt_videos.ts
import { DocumentInterface } from "@langchain/core/documents"; import { YoutubeLoader } from "@langchain/community/document_loaders/web/youtube"; import { getYear } from "date-fns"; const urls = [ "https://www.youtube.com/watch?v=HAn9vnJy6S4", "https://www.youtube.com/watch?v=dA1cHGACXCo", "https://www.youtube.com/watch?v=ZcEMLz27sL4", "https://www.youtube.com/watch?v=hvAPnpSfSGo", "https://www.youtube.com/watch?v=EhlPDL4QrWY", "https://www.youtube.com/watch?v=mmBo8nlu2j0", "https://www.youtube.com/watch?v=rQdibOsL1ps", "https://www.youtube.com/watch?v=28lC4fqukoc", "https://www.youtube.com/watch?v=es-9MgxB-uc", "https://www.youtube.com/watch?v=wLRHwKuKvOE", "https://www.youtube.com/watch?v=ObIltMaRJvY", "https://www.youtube.com/watch?v=DjuXACWYkkU", "https://www.youtube.com/watch?v=o7C9ld6Ln-M", ]; let docs: Array<DocumentInterface> = []; for await (const url of urls) { const doc = await YoutubeLoader.createFromUrl(url, { language: "en", addVideoInfo: true, }).load(); docs = docs.concat(doc); } console.log(docs.length); /* 13 */ // Add some additional metadata: what year the video was published // The JS API does not provide publish date, so we can use a // hardcoded array with the dates instead. const dates = [ new Date("Jan 31, 2024"), new Date("Jan 26, 2024"), new Date("Jan 24, 2024"), new Date("Jan 23, 2024"), new Date("Jan 16, 2024"), new Date("Jan 5, 2024"), new Date("Jan 2, 2024"), new Date("Dec 20, 2023"), new Date("Dec 19, 2023"), new Date("Nov 27, 2023"), new Date("Nov 22, 2023"), new Date("Nov 16, 2023"), new Date("Nov 2, 2023"), ]; docs.forEach((doc, idx) => { // eslint-disable-next-line no-param-reassign doc.metadata.publish_year = getYear(dates[idx]); // eslint-disable-next-line no-param-reassign doc.metadata.publish_date = dates[idx]; }); // Here are the titles of the videos we've loaded: console.log(docs.map((doc) => doc.metadata.title)); /* [ 'OpenGPTs', 'Building a web RAG chatbot: using LangChain, Exa (prev. Metaphor), LangSmith, and Hosted Langserve', 'Streaming Events: Introducing a new `stream_events` method', 'LangGraph: Multi-Agent Workflows', 'Build and Deploy a RAG app with Pinecone Serverless', 'Auto-Prompt Builder (with Hosted LangServe)', 'Build a Full Stack RAG App With TypeScript', 'Getting Started with Multi-Modal LLMs', 'SQL Research Assistant', 'Skeleton-of-Thought: Building a New Template from Scratch', 'Benchmarking RAG over LangChain Docs', 'Building a Research Assistant from Scratch', 'LangServe and LangChain Templates Webinar' ] */
0
lc_public_repos/langchainjs/examples/src/use_cases/query_analysis
lc_public_repos/langchainjs/examples/src/use_cases/query_analysis/quickstart/metadata.ts
import { getDocs } from "./docs.js"; const docs = await getDocs(); console.log(docs[0].metadata); /** { source: 'HAn9vnJy6S4', description: 'OpenGPTs is an open-source platform aimed at recreating an experience like the GPT Store - but with any model, any tools, and that you can self-host.\n' + '\n' + 'This video covers both how to use it as well as how to build it.\n' + '\n' + 'GitHub: https://github.com/langchain-ai/opengpts', title: 'OpenGPTs', view_count: 7262, author: 'LangChain' } */ // And here's a sample from a document's contents: console.log(docs[0].pageContent.slice(0, 500)); /* hello today I want to talk about open gpts open gpts is a project that we built here at linkchain uh that replicates the GPT store in a few ways so it creates uh end user-facing friendly interface to create different Bots and these Bots can have access to different tools and they can uh be given files to retrieve things over and basically it's a way to create a variety of bots and expose the configuration of these Bots to end users it's all open source um it can be used with open AI it can be us */
0
lc_public_repos/langchainjs/examples/src/use_cases/query_analysis
lc_public_repos/langchainjs/examples/src/use_cases/query_analysis/quickstart/index_docs.ts
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import { OpenAIEmbeddings } from "@langchain/openai"; import { Chroma } from "@langchain/community/vectorstores/chroma"; import { getDocs } from "./docs.js"; const docs = await getDocs(); const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 2000 }); const chunkedDocs = await textSplitter.splitDocuments(docs); const embeddings = new OpenAIEmbeddings({ model: "text-embedding-3-small", }); const vectorStore = await Chroma.fromDocuments(chunkedDocs, embeddings, { collectionName: "yt-videos", });
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/chatbots/retrieval.ts
/* eslint-disable import/first */ /* eslint-disable arrow-body-style */ /* eslint-disable import/no-duplicates */ import { ChatOpenAI } from "@langchain/openai"; const chat = new ChatOpenAI({ model: "gpt-3.5-turbo-1106", temperature: 0.2, }); import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio"; const loader = new CheerioWebBaseLoader( "https://docs.smith.langchain.com/user_guide" ); const rawDocs = await loader.load(); import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 500, chunkOverlap: 0, }); const allSplits = await textSplitter.splitDocuments(rawDocs); import { OpenAIEmbeddings } from "@langchain/openai"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; const vectorstore = await MemoryVectorStore.fromDocuments( allSplits, new OpenAIEmbeddings() ); const retriever = vectorstore.asRetriever(4); const docs = await retriever.invoke("how can langsmith help with testing?"); console.log(docs); import { createStuffDocumentsChain } from "langchain/chains/combine_documents"; import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; const SYSTEM_TEMPLATE = `Answer the user's questions based on the below context. If the context doesn't contain any relevant information to the question, don't make something up and just say "I don't know": <context> {context} </context> `; const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([ ["system", SYSTEM_TEMPLATE], new MessagesPlaceholder("messages"), ]); const documentChain = await createStuffDocumentsChain({ llm: chat, prompt: questionAnsweringPrompt, }); import { HumanMessage, AIMessage } from "@langchain/core/messages"; console.log( await documentChain.invoke({ messages: [ new HumanMessage("Can LangSmith help test my LLM applications?"), ], context: docs, }) ); console.log( await documentChain.invoke({ messages: [ new HumanMessage("Can LangSmith help test my LLM applications?"), ], context: [], }) ); import type { BaseMessage } from "@langchain/core/messages"; import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; const parseRetrieverInput = (params: { messages: BaseMessage[] }) => { return params.messages[params.messages.length - 1].content; }; const retrievalChain = RunnablePassthrough.assign({ context: RunnableSequence.from([parseRetrieverInput, retriever]), }).assign({ answer: documentChain, }); console.log( await retrievalChain.invoke({ messages: [ new HumanMessage("Can LangSmith help test my LLM applications?"), ], }) ); console.log(await retriever.invoke("Tell me more!")); const queryTransformPrompt = ChatPromptTemplate.fromMessages([ new MessagesPlaceholder("messages"), [ "user", "Given the above conversation, generate a search query to look up in order to get information relevant to the conversation. Only respond with the query, nothing else.", ], ]); const queryTransformationChain = queryTransformPrompt.pipe(chat); console.log( await queryTransformationChain.invoke({ messages: [ new HumanMessage("Can LangSmith help test my LLM applications?"), new AIMessage( "Yes, LangSmith can help test and evaluate your LLM applications. It allows you to quickly edit examples and add them to datasets to expand the surface area of your evaluation sets or to fine-tune a model for improved quality or reduced costs. Additionally, LangSmith can be used to monitor your application, log all traces, visualize latency and token usage statistics, and troubleshoot specific issues as they arise." ), new HumanMessage("Tell me more!"), ], }) ); import { RunnableBranch } from "@langchain/core/runnables"; import { StringOutputParser } from "@langchain/core/output_parsers"; const queryTransformingRetrieverChain = RunnableBranch.from([ [ (params: { messages: BaseMessage[] }) => params.messages.length === 1, RunnableSequence.from([parseRetrieverInput, retriever]), ], queryTransformPrompt .pipe(chat) .pipe(new StringOutputParser()) .pipe(retriever), ]).withConfig({ runName: "chat_retriever_chain" }); const conversationalRetrievalChain = RunnablePassthrough.assign({ context: queryTransformingRetrieverChain, }).assign({ answer: documentChain, }); console.log( await conversationalRetrievalChain.invoke({ messages: [ new HumanMessage("Can LangSmith help test my LLM applications?"), ], }) ); console.log( await conversationalRetrievalChain.invoke({ messages: [ new HumanMessage("Can LangSmith help test my LLM applications?"), new AIMessage( "Yes, LangSmith can help test and evaluate your LLM applications. It allows you to quickly edit examples and add them to datasets to expand the surface area of your evaluation sets or to fine-tune a model for improved quality or reduced costs. Additionally, LangSmith can be used to monitor your application, log all traces, visualize latency and token usage statistics, and troubleshoot specific issues as they arise." ), new HumanMessage("Tell me more!"), ], }) ); const stream = await conversationalRetrievalChain.stream({ messages: [ new HumanMessage("Can LangSmith help test my LLM applications?"), new AIMessage( "Yes, LangSmith can help test and evaluate your LLM applications. It allows you to quickly edit examples and add them to datasets to expand the surface area of your evaluation sets or to fine-tune a model for improved quality or reduced costs. Additionally, LangSmith can be used to monitor your application, log all traces, visualize latency and token usage statistics, and troubleshoot specific issues as they arise." ), new HumanMessage("Tell me more!"), ], }); for await (const chunk of stream) { console.log(chunk); }
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/chatbots/tool_usage.ts
/* eslint-disable import/first */ /* eslint-disable arrow-body-style */ /* eslint-disable import/no-duplicates */ import { TavilySearchResults } from "@langchain/community/tools/tavily_search"; import { ChatOpenAI } from "@langchain/openai"; const tools = [ new TavilySearchResults({ maxResults: 1, }), ]; const chat = new ChatOpenAI({ model: "gpt-3.5-turbo-1106", temperature: 0, }); import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; // Adapted from https://smith.langchain.com/hub/hwchase17/openai-tools-agent const prompt = ChatPromptTemplate.fromMessages([ [ "system", "You are a helpful assistant. You may not need to use tools for every query - the user may just want to chat!", ], new MessagesPlaceholder("messages"), new MessagesPlaceholder("agent_scratchpad"), ]); import { AgentExecutor, createOpenAIToolsAgent } from "langchain/agents"; const agent = await createOpenAIToolsAgent({ llm: chat, tools, prompt, }); const agentExecutor = new AgentExecutor({ agent, tools }); import { HumanMessage } from "@langchain/core/messages"; console.log( await agentExecutor.invoke({ messages: [new HumanMessage("I'm Nemo!")] }) ); console.log( await agentExecutor.invoke({ messages: [ new HumanMessage( "What is the current conservation status of the Great Barrier Reef?" ), ], }) ); import { AIMessage } from "@langchain/core/messages"; console.log( await agentExecutor.invoke({ messages: [ new HumanMessage("I'm Nemo!"), new AIMessage("Hello Nemo! How can I assist you today?"), new HumanMessage("What is my name?"), ], }) ); // Adapted from https://smith.langchain.com/hub/hwchase17/openai-tools-agent const prompt2 = ChatPromptTemplate.fromMessages([ [ "system", "You are a helpful assistant. You may not need to use tools for every query - the user may just want to chat!", ], new MessagesPlaceholder("chat_history"), ["human", "{input}"], new MessagesPlaceholder("agent_scratchpad"), ]); const agent2 = await createOpenAIToolsAgent({ llm: chat, tools, prompt: prompt2, }); const agentExecutor2 = new AgentExecutor({ agent: agent2, tools }); import { ChatMessageHistory } from "@langchain/community/stores/message/in_memory"; import { RunnableWithMessageHistory } from "@langchain/core/runnables"; const demoEphemeralChatMessageHistory = new ChatMessageHistory(); const conversationalAgentExecutor = new RunnableWithMessageHistory({ runnable: agentExecutor2, getMessageHistory: (_sessionId) => demoEphemeralChatMessageHistory, inputMessagesKey: "input", outputMessagesKey: "output", historyMessagesKey: "chat_history", }); console.log( await conversationalAgentExecutor.invoke( { input: "I'm Nemo!" }, { configurable: { sessionId: "unused" } } ) ); console.log( await conversationalAgentExecutor.invoke( { input: "What is my name?" }, { configurable: { sessionId: "unused" } } ) );
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/chatbots/memory_management.ts
/* eslint-disable import/first */ /* eslint-disable arrow-body-style */ /* eslint-disable import/no-duplicates */ import { ChatOpenAI } from "@langchain/openai"; const chat = new ChatOpenAI({ model: "gpt-3.5-turbo-1106", }); import { HumanMessage, AIMessage } from "@langchain/core/messages"; import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; const prompt = ChatPromptTemplate.fromMessages([ [ "system", "You are a helpful assistant. Answer all questions to the best of your ability.", ], new MessagesPlaceholder("messages"), ]); const chain = prompt.pipe(chat); console.log( await chain.invoke({ messages: [ new HumanMessage( "Translate this sentence from English to French: I love programming." ), new AIMessage("J'adore la programmation."), new HumanMessage("What did you just say?"), ], }) ); import { ChatMessageHistory } from "@langchain/community/stores/message/in_memory"; const demoEphemeralChatMessageHistory = new ChatMessageHistory(); await demoEphemeralChatMessageHistory.addMessage(new HumanMessage("hi!")); await demoEphemeralChatMessageHistory.addMessage(new AIMessage("whats up?")); console.log(await demoEphemeralChatMessageHistory.getMessages()); await demoEphemeralChatMessageHistory.clear(); const input1 = "Translate this sentence from English to French: I love programming."; await demoEphemeralChatMessageHistory.addMessage(new HumanMessage(input1)); const response = await chain.invoke({ messages: await demoEphemeralChatMessageHistory.getMessages(), }); await demoEphemeralChatMessageHistory.addMessage(response); const input2 = "What did I just ask you?"; await demoEphemeralChatMessageHistory.addMessage(new HumanMessage(input2)); console.log( await chain.invoke({ messages: await demoEphemeralChatMessageHistory.getMessages(), }) ); const runnableWithMessageHistoryPrompt = ChatPromptTemplate.fromMessages([ [ "system", "You are a helpful assistant. Answer all questions to the best of your ability.", ], new MessagesPlaceholder("chat_history"), ["human", "{input}"], ]); const chain2 = runnableWithMessageHistoryPrompt.pipe(chat); import { RunnableWithMessageHistory } from "@langchain/core/runnables"; const demoEphemeralChatMessageHistoryForChain = new ChatMessageHistory(); const chainWithMessageHistory = new RunnableWithMessageHistory({ runnable: chain2, getMessageHistory: (_sessionId) => demoEphemeralChatMessageHistoryForChain, inputMessagesKey: "input", historyMessagesKey: "chat_history", }); console.log( await chainWithMessageHistory.invoke( { input: "Translate this sentence from English to French: I love programming.", }, { configurable: { sessionId: "unused" } } ) ); console.log( await chainWithMessageHistory.invoke( { input: "What did I just ask you?", }, { configurable: { sessionId: "unused" } } ) ); await demoEphemeralChatMessageHistory.clear(); await demoEphemeralChatMessageHistory.addMessage( new HumanMessage("Hey there! I'm Nemo.") ); await demoEphemeralChatMessageHistory.addMessage(new AIMessage("Hello!")); await demoEphemeralChatMessageHistory.addMessage( new HumanMessage("How are you today?") ); await demoEphemeralChatMessageHistory.addMessage(new AIMessage("Fine thanks!")); console.log(await demoEphemeralChatMessageHistory.getMessages()); const chainWithMessageHistory2 = new RunnableWithMessageHistory({ runnable: chain2, getMessageHistory: (_sessionId) => demoEphemeralChatMessageHistory, inputMessagesKey: "input", historyMessagesKey: "chat_history", }); console.log( await chainWithMessageHistory2.invoke( { input: "What's my name?", }, { configurable: { sessionId: "unused" } } ) ); import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; const trimMessages = async (_chainInput: Record<string, any>) => { const storedMessages = await demoEphemeralChatMessageHistory.getMessages(); if (storedMessages.length <= 2) { return false; } await demoEphemeralChatMessageHistory.clear(); for (const message of storedMessages.slice(-2)) { demoEphemeralChatMessageHistory.addMessage(message); } return true; }; const chainWithTrimming = RunnableSequence.from([ RunnablePassthrough.assign({ messages_trimmed: trimMessages }), chainWithMessageHistory2, ]); console.log( await chainWithTrimming.invoke( { input: "Where does P. Sherman live?", }, { configurable: { sessionId: "unused" } } ) ); console.log(await demoEphemeralChatMessageHistory.getMessages()); console.log( await chainWithTrimming.invoke( { input: "What is my name?", }, { configurable: { sessionId: "unused" } } ) ); console.log(await demoEphemeralChatMessageHistory.getMessages()); await demoEphemeralChatMessageHistory.clear(); await demoEphemeralChatMessageHistory.addMessage( new HumanMessage("Hey there! I'm Nemo.") ); await demoEphemeralChatMessageHistory.addMessage(new AIMessage("Hello!")); await demoEphemeralChatMessageHistory.addMessage( new HumanMessage("How are you today?") ); await demoEphemeralChatMessageHistory.addMessage(new AIMessage("Fine thanks!")); console.log(await demoEphemeralChatMessageHistory.getMessages()); const runnableWithSummaryMemoryPrompt = ChatPromptTemplate.fromMessages([ [ "system", "You are a helpful assistant. Answer all questions to the best of your ability. The provided chat history includes facts about the user you are speaking with.", ], new MessagesPlaceholder("chat_history"), ["human", "{input}"], ]); const summaryMemoryChain = runnableWithSummaryMemoryPrompt.pipe(chat); const chainWithMessageHistory3 = new RunnableWithMessageHistory({ runnable: summaryMemoryChain, getMessageHistory: (_sessionId) => demoEphemeralChatMessageHistory, inputMessagesKey: "input", historyMessagesKey: "chat_history", }); const summarizeMessages = async (_chainInput: Record<string, any>) => { const storedMessages = await demoEphemeralChatMessageHistory.getMessages(); if (storedMessages.length === 0) { return false; } const summarizationPrompt = ChatPromptTemplate.fromMessages([ new MessagesPlaceholder("chat_history"), [ "user", "Distill the above chat messages into a single summary message. Include as many specific details as you can.", ], ]); const summarizationChain = summarizationPrompt.pipe(chat); const summaryMessage = await summarizationChain.invoke({ chat_history: storedMessages, }); await demoEphemeralChatMessageHistory.clear(); demoEphemeralChatMessageHistory.addMessage(summaryMessage); return true; }; const chainWithSummarization = RunnableSequence.from([ RunnablePassthrough.assign({ messages_summarized: summarizeMessages, }), chainWithMessageHistory3, ]); console.log( await chainWithSummarization.invoke( { input: "What did I say my name was?", }, { configurable: { sessionId: "unused" }, } ) );
0
lc_public_repos/langchainjs/examples/src/use_cases
lc_public_repos/langchainjs/examples/src/use_cases/chatbots/quickstart.ts
/* eslint-disable import/first */ /* eslint-disable arrow-body-style */ /* eslint-disable import/no-duplicates */ import { ChatOpenAI } from "@langchain/openai"; const chat = new ChatOpenAI({ model: "gpt-3.5-turbo-1106", temperature: 0.2, }); import { HumanMessage } from "@langchain/core/messages"; await chat.invoke([ new HumanMessage( "Translate this sentence from English to French: I love programming." ), ]); await chat.invoke([new HumanMessage("What did you just say?")]); import { AIMessage } from "@langchain/core/messages"; await chat.invoke([ new HumanMessage( "Translate this sentence from English to French: I love programming." ), new AIMessage("J'adore la programmation."), new HumanMessage("What did you just say?"), ]); import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; const prompt = ChatPromptTemplate.fromMessages([ [ "system", "You are a helpful assistant. Answer all questions to the best of your ability.", ], new MessagesPlaceholder("messages"), ]); const chain = prompt.pipe(chat); await chain.invoke({ messages: [ new HumanMessage( "Translate this sentence from English to French: I love programming." ), new AIMessage("J'adore la programmation."), new HumanMessage("What did you just say?"), ], }); import { ChatMessageHistory } from "@langchain/community/stores/message/in_memory"; const demoEphemeralChatMessageHistory = new ChatMessageHistory(); await demoEphemeralChatMessageHistory.addMessage(new HumanMessage("hi!")); await demoEphemeralChatMessageHistory.addMessage(new AIMessage("whats up?")); console.log(await demoEphemeralChatMessageHistory.getMessages()); await demoEphemeralChatMessageHistory.addMessage( new HumanMessage( "Translate this sentence from English to French: I love programming." ) ); const responseMessage = await chain.invoke({ messages: await demoEphemeralChatMessageHistory.getMessages(), }); await demoEphemeralChatMessageHistory.addMessage(responseMessage); await demoEphemeralChatMessageHistory.addMessage( new HumanMessage("What did you just say?") ); const responseMessage2 = await chain.invoke({ messages: await demoEphemeralChatMessageHistory.getMessages(), }); console.log(responseMessage2); import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio"; const loader = new CheerioWebBaseLoader( "https://docs.smith.langchain.com/user_guide" ); const rawDocs = await loader.load(); import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 500, chunkOverlap: 0, }); const allSplits = await textSplitter.splitDocuments(rawDocs); import { OpenAIEmbeddings } from "@langchain/openai"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; const vectorstore = await MemoryVectorStore.fromDocuments( allSplits, new OpenAIEmbeddings() ); const retriever = vectorstore.asRetriever(4); const docs = await retriever.invoke("how can langsmith help with testing?"); console.log(docs); import { createStuffDocumentsChain } from "langchain/chains/combine_documents"; const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([ [ "system", "Answer the user's questions based on the below context:\n\n{context}", ], new MessagesPlaceholder("messages"), ]); const documentChain = await createStuffDocumentsChain({ llm: chat, prompt: questionAnsweringPrompt, }); const demoEphemeralChatMessageHistory2 = new ChatMessageHistory(); await demoEphemeralChatMessageHistory2.addMessage( new HumanMessage("how can langsmith help with testing?") ); console.log( await documentChain.invoke({ messages: await demoEphemeralChatMessageHistory2.getMessages(), context: docs, }) ); import type { BaseMessage } from "@langchain/core/messages"; import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; const parseRetrieverInput = (params: { messages: BaseMessage[] }) => { return params.messages[params.messages.length - 1].content; }; const retrievalChain = RunnablePassthrough.assign({ context: RunnableSequence.from([parseRetrieverInput, retriever]), }).assign({ answer: documentChain, }); const response3 = await retrievalChain.invoke({ messages: await demoEphemeralChatMessageHistory2.getMessages(), }); console.log(response3); await demoEphemeralChatMessageHistory2.addMessage( new AIMessage(response3.answer) ); await demoEphemeralChatMessageHistory2.addMessage( new HumanMessage("tell me more about that!") ); console.log( await retrievalChain.invoke({ messages: await demoEphemeralChatMessageHistory2.getMessages(), }) ); const retrievalChainWithOnlyAnswer = RunnablePassthrough.assign({ context: RunnableSequence.from([parseRetrieverInput, retriever]), }).pipe(documentChain); console.log( await retrievalChainWithOnlyAnswer.invoke({ messages: await demoEphemeralChatMessageHistory2.getMessages(), }) ); console.log(await retriever.invoke("how can langsmith help with testing?")); console.log(await retriever.invoke("tell me more about that!")); import { RunnableBranch } from "@langchain/core/runnables"; import { StringOutputParser } from "@langchain/core/output_parsers"; const queryTransformPrompt = ChatPromptTemplate.fromMessages([ new MessagesPlaceholder("messages"), [ "user", "Given the above conversation, generate a search query to look up in order to get information relevant to the conversation. Only respond with the query, nothing else.", ], ]); const queryTransformingRetrieverChain = RunnableBranch.from([ [ (params: { messages: BaseMessage[] }) => params.messages.length === 1, RunnableSequence.from([parseRetrieverInput, retriever]), ], queryTransformPrompt .pipe(chat) .pipe(new StringOutputParser()) .pipe(retriever), ]).withConfig({ runName: "chat_retriever_chain" }); const conversationalRetrievalChain = RunnablePassthrough.assign({ context: queryTransformingRetrieverChain, }).assign({ answer: documentChain, }); const demoEphemeralChatMessageHistory3 = new ChatMessageHistory(); await demoEphemeralChatMessageHistory3.addMessage( new HumanMessage("how can langsmith help with testing?") ); const response4 = await conversationalRetrievalChain.invoke({ messages: await demoEphemeralChatMessageHistory3.getMessages(), }); await demoEphemeralChatMessageHistory3.addMessage( new AIMessage(response4.answer) ); console.log(response4); await demoEphemeralChatMessageHistory3.addMessage( new HumanMessage("tell me more about that!") ); console.log( await conversationalRetrievalChain.invoke({ messages: await demoEphemeralChatMessageHistory3.getMessages(), }) );
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/llms/replicate.ts
import { Replicate } from "@langchain/community/llms/replicate"; export const run = async () => { const model = new Replicate({ model: "replicate/flan-t5-xl:3ae0799123a1fe11f8c89fd99632f843fc5f7a761630160521c4253149754523", }); const res = await model.invoke( "Question: What would be a good company name a company that makes colorful socks?\nAnswer:" ); console.log({ res }); };
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/llms/hf.ts
import { HuggingFaceInference } from "@langchain/community/llms/hf"; export const run = async () => { const model = new HuggingFaceInference({ model: "gpt2", temperature: 0.7, maxTokens: 50, }); const res = await model.invoke( "Question: What would be a good company name a company that makes colorful socks?\nAnswer:" ); console.log({ res }); };
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/llms/gradient_ai-base.ts
import { GradientLLM } from "@langchain/community/llms/gradient_ai"; // Note that inferenceParameters are optional const model = new GradientLLM({ modelSlug: "llama2-7b-chat", inferenceParameters: { maxGeneratedTokenCount: 20, temperature: 0, }, }); const res = await model.invoke( "What would be a good company name for a company that makes colorful socks?" ); console.log({ res });
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/llms/googlevertexai-streaming.ts
import { VertexAI } from "@langchain/google-vertexai"; // Or, if using the web entrypoint: // import { VertexAI } from "@langchain/google-vertexai-web"; const model = new VertexAI({ temperature: 0.7, }); const stream = await model.stream( "What would be a good company name for a company that makes colorful socks?" ); for await (const chunk of stream) { console.log("\n---------\nChunk:\n---------\n", chunk); } /* --------- Chunk: --------- * Kaleidoscope Toes * Huephoria * Soleful Spectrum * --------- Chunk: --------- Colorwave Hosiery * Chromatic Threads * Rainbow Rhapsody * Vibrant Soles * Toe-tally Colorful * Socktacular Hues * --------- Chunk: --------- Threads of Joy --------- Chunk: --------- */
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/llms/bedrock.js
import { Bedrock } from "@langchain/community/llms/bedrock"; async function test() { const model = new Bedrock({ model: "bedrock-model-name", region: "aws-region", }); const res = await model.invoke( "Question: What would be a good company name a company that makes colorful socks?\nAnswer:" ); console.log(res); } test();
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/llms/azure_openai-chat.ts
import { AzureChatOpenAI } from "@langchain/openai"; export const run = async () => { const model = new AzureChatOpenAI({ prefixMessages: [ { role: "system", content: "You are a helpful assistant that answers in pirate language", }, ], maxTokens: 50, }); const res = await model.invoke( "What would be a good company name for a company that makes colorful socks?" ); console.log({ res }); };
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/llms/layerup_security.ts
import { LayerupSecurity, LayerupSecurityOptions, } from "@langchain/community/llms/layerup_security"; import { GuardrailResponse } from "@layerup/layerup-security"; import { OpenAI } from "@langchain/openai"; // Create an instance of your favorite LLM const openai = new OpenAI({ modelName: "gpt-3.5-turbo", openAIApiKey: process.env.OPENAI_API_KEY, }); // Configure Layerup Security const layerupSecurityOptions: LayerupSecurityOptions = { // Specify a LLM that Layerup Security will wrap around llm: openai, // Layerup API key, from the Layerup dashboard layerupApiKey: process.env.LAYERUP_API_KEY, // Custom base URL, if self hosting layerupApiBaseUrl: "https://api.uselayerup.com/v1", // List of guardrails to run on prompts before the LLM is invoked promptGuardrails: [], // List of guardrails to run on responses from the LLM responseGuardrails: ["layerup.hallucination"], // Whether or not to mask the prompt for PII & sensitive data before it is sent to the LLM mask: false, // Metadata for abuse tracking, customer tracking, and scope tracking. metadata: { customer: "example@uselayerup.com" }, // Handler for guardrail violations on the response guardrails handlePromptGuardrailViolation: (violation: GuardrailResponse) => { if (violation.offending_guardrail === "layerup.sensitive_data") { // Custom logic goes here } return { role: "assistant", content: `There was sensitive data! I cannot respond. Here's a dynamic canned response. Current date: ${Date.now()}`, }; }, // Handler for guardrail violations on the response guardrails handleResponseGuardrailViolation: (violation: GuardrailResponse) => ({ role: "assistant", content: `Custom canned response with dynamic data! The violation rule was ${violation.offending_guardrail}.`, }), }; const layerupSecurity = new LayerupSecurity(layerupSecurityOptions); const response = await layerupSecurity.invoke( "Summarize this message: my name is Bob Dylan. My SSN is 123-45-6789." );
0
lc_public_repos/langchainjs/examples/src
lc_public_repos/langchainjs/examples/src/llms/watsonx_ai.ts
import { WatsonxAI } from "@langchain/community/llms/watsonx_ai"; // Note that modelParameters are optional const model = new WatsonxAI({ modelId: "meta-llama/llama-2-70b-chat", modelParameters: { max_new_tokens: 100, min_new_tokens: 0, stop_sequences: [], repetition_penalty: 1, }, }); const res = await model.invoke( "What would be a good company name for a company that makes colorful socks?" ); console.log({ res });