index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/azure_openai.ts | import { AzureOpenAIEmbeddings } from "@langchain/openai";
const model = new AzureOpenAIEmbeddings({
azureOpenAIApiKey: "<your_key>", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY
azureOpenAIApiInstanceName: "<your_instance_name>", // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME
azureOpenAIApiEmbeddingsDeploymentName: "<your_embeddings_deployment_name>", // In Node.js defaults to process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME
azureOpenAIApiVersion: "<api_version>", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION
});
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/embeddings/mistral.ts | import { MistralAIEmbeddings } from "@langchain/mistralai";
/* Embed queries */
const embeddings = new MistralAIEmbeddings({
apiKey: process.env.MISTRAL_API_KEY,
});
const res = await embeddings.embedQuery("Hello world");
console.log(res);
/* Embed documents */
const documentRes = await embeddings.embedDocuments(["Hello world", "Bye bye"]);
console.log({ documentRes });
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/replicate.ts | import { Replicate } from "@langchain/community/llms/replicate";
const modelA = new Replicate({
model:
"a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
});
// `call` is a simple string-in, string-out method for interacting with the model.
const resA = await modelA.invoke(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ resA });
/*
{
resA: 'Color Box'
}
*/
// `generate` allows you to generate multiple completions for multiple prompts (in a single request for some models).
const resB = await modelA.invoke([
"What would be a good company name a company that makes colorful socks?",
"What would be a good company name a company that makes colorful sweaters?",
]);
// `resB` is a `LLMResult` object with a `generations` field and `llmOutput` field.
// `generations` is a `Generation[][]`, each `Generation` having a `text` field.
// Each input to the LLM could have multiple generations (depending on the `n` parameter), hence the list of lists.
console.log(JSON.stringify(resB, null, 2));
/*
{
"generations": [
[
{
"text": "apron string"
}
],
[
{
"text": "Kulut"
}
]
]
}
*/
const text2image = new Replicate({
model:
"stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf",
});
const image = await text2image.invoke("A cat");
console.log({ image });
/*
{
"image": "https://replicate.delivery/pbxt/Nc8qkJ8zkdpDPdNSYuMaDErImcXVMUAybFrLk9Kane7IKOWIA/out-0.png"
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/llm.ts | import { OpenAI } from "@langchain/openai";
export const run = async () => {
const modelA = new OpenAI();
// `call` is a simple string-in, string-out method for interacting with the model.
const resA = await modelA.invoke(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ resA });
// { resA: '\n\nSocktastic Colors' }
// `generate` allows you to generate multiple completions for multiple prompts (in a single request for some models).
const resB = await modelA.invoke([
"What would be a good company name a company that makes colorful socks?",
"What would be a good company name a company that makes colorful sweaters?",
]);
// `resB` is a `LLMResult` object with a `generations` field and `llmOutput` field.
// `generations` is a `Generation[][]`, each `Generation` having a `text` field.
// Each input to the LLM could have multiple generations (depending on the `n` parameter), hence the list of lists.
console.log(JSON.stringify(resB, null, 2));
/*
{
"generations": [
[{
"text": "\n\nVibrant Socks Co.",
"generationInfo": {
"finishReason": "stop",
"logprobs": null
}
}],
[{
"text": "\n\nRainbow Knitworks.",
"generationInfo": {
"finishReason": "stop",
"logprobs": null
}
}]
],
"llmOutput": {
"tokenUsage": {
"completionTokens": 17,
"promptTokens": 29,
"totalTokens": 46
}
}
}
*/
// We can specify additional parameters the specific model provider supports, like `temperature`:
const modelB = new OpenAI({ temperature: 0.9 });
const resC = await modelA.invoke(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ resC });
// { resC: '\n\nKaleidoSox' }
// We can get the number of tokens for a given input for a specific model.
const numTokens = modelB.getNumTokens("How many tokens are in this input?");
console.log({ numTokens });
// { numTokens: 8 }
};
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/token_usage_tracking.ts | import { OpenAI } from "@langchain/openai";
const llm = new OpenAI({
model: "gpt-3.5-turbo-instruct",
callbacks: [
{
handleLLMEnd(output) {
console.log(JSON.stringify(output, null, 2));
},
},
],
});
await llm.invoke("Tell me a joke.");
/*
{
"generations": [
[
{
"text": "\n\nWhy don't scientists trust atoms?\n\nBecause they make up everything.",
"generationInfo": {
"finishReason": "stop",
"logprobs": null
}
}
]
],
"llmOutput": {
"tokenUsage": {
"completionTokens": 14,
"promptTokens": 5,
"totalTokens": 19
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/llama_cpp.ts | import { LlamaCpp } from "@langchain/community/llms/llama_cpp";
const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin";
const question = "Where do Llamas come from?";
const model = await LlamaCpp.initialize({ modelPath: llamaPath });
console.log(`You: ${question}`);
const response = await model.invoke(question);
console.log(`AI : ${response}`);
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/bedrock.ts | import { Bedrock } from "@langchain/community/llms/bedrock";
// Or, from web environments:
// import { Bedrock } from "@langchain/community/llms/bedrock/web";
// If no credentials are provided, the default credentials from
// @aws-sdk/credential-provider-node will be used.
const model = new Bedrock({
model: "ai21.j2-grande-instruct", // You can also do e.g. "anthropic.claude-v2"
region: "us-east-1",
// endpointUrl: "custom.amazonaws.com",
// credentials: {
// accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
// secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
// },
// modelKwargs: {},
});
const res = await model.invoke("Tell me a joke");
console.log(res);
/*
Why was the math book unhappy?
Because it had too many problems!
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/llama_cpp_stream.ts | import { LlamaCpp } from "@langchain/community/llms/llama_cpp";
const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin";
const model = await LlamaCpp.initialize({
modelPath: llamaPath,
temperature: 0.7,
});
const prompt = "Tell me a short story about a happy Llama.";
const stream = await model.stream(prompt);
for await (const chunk of stream) {
console.log(chunk);
}
/*
Once
upon
a
time
,
in
the
rolling
hills
of
Peru
...
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/llm_advanced.ts | import { OpenAI } from "@langchain/openai";
const model = new OpenAI({
// customize openai model that's used, `gpt-3.5-turbo-instruct` is the default
model: "gpt-3.5-turbo-instruct",
// `max_tokens` supports a magic -1 param where the max token length for the specified modelName
// is calculated and included in the request to OpenAI as the `max_tokens` param
maxTokens: -1,
// use `modelKwargs` to pass params directly to the openai call
// note that OpenAI uses snake_case instead of camelCase
modelKwargs: {
user: "me",
},
// for additional logging for debugging purposes
verbose: true,
});
const resA = await model.invoke(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ resA });
// { resA: '\n\nSocktastic Colors' }
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/llm_debugging.ts | import { OpenAI } from "@langchain/openai";
import type { Serialized } from "@langchain/core/load/serializable";
import { LLMResult } from "@langchain/core/outputs";
// We can pass in a list of CallbackHandlers to the LLM constructor to get callbacks for various events.
const model = new OpenAI({
callbacks: [
{
handleLLMStart: async (llm: Serialized, prompts: string[]) => {
console.log(JSON.stringify(llm, null, 2));
console.log(JSON.stringify(prompts, null, 2));
},
handleLLMEnd: async (output: LLMResult) => {
console.log(JSON.stringify(output, null, 2));
},
handleLLMError: async (err: Error) => {
console.error(err);
},
},
],
});
await model.invoke(
"What would be a good company name a company that makes colorful socks?"
);
// {
// "name": "openai"
// }
// [
// "What would be a good company name a company that makes colorful socks?"
// ]
// {
// "generations": [
// [
// {
// "text": "\n\nSocktastic Splashes.",
// "generationInfo": {
// "finishReason": "stop",
// "logprobs": null
// }
// }
// ]
// ],
// "llmOutput": {
// "tokenUsage": {
// "completionTokens": 9,
// "promptTokens": 14,
// "totalTokens": 23
// }
// }
// }
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/yandex.ts | import { YandexGPT } from "@langchain/yandex/llms";
const model = new YandexGPT();
const res = await model.invoke(['Translate "I love programming" into French.']);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/custom_advanced.ts | import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { LLMResult } from "@langchain/core/outputs";
import {
BaseLLM,
BaseLLMCallOptions,
BaseLLMParams,
} from "@langchain/core/language_models/llms";
export interface AdvancedCustomLLMCallOptions extends BaseLLMCallOptions {}
export interface AdvancedCustomLLMParams extends BaseLLMParams {
n: number;
}
export class AdvancedCustomLLM extends BaseLLM<AdvancedCustomLLMCallOptions> {
n: number;
constructor(fields: AdvancedCustomLLMParams) {
super(fields);
this.n = fields.n;
}
_llmType() {
return "advanced_custom_llm";
}
async _generate(
inputs: string[],
_options: this["ParsedCallOptions"],
_runManager?: CallbackManagerForLLMRun
): Promise<LLMResult> {
const outputs = inputs.map((input) => input.slice(0, this.n));
// One input could generate multiple outputs.
const generations = outputs.map((output) => [
{
text: output,
// Optional additional metadata for the generation
generationInfo: { outputCount: 1 },
},
]);
const tokenUsage = {
usedTokens: this.n,
};
return {
generations,
llmOutput: { tokenUsage },
};
}
}
const llm = new AdvancedCustomLLM({ n: 4 });
console.log(await llm.invoke("I am an LLM"));
const eventStream = llm.streamEvents("I am an LLM", {
version: "v2",
});
for await (const event of eventStream) {
if (event.event === "on_llm_end") {
console.log(JSON.stringify(event, null, 2));
}
}
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/azure_openai-managed_identity.ts | import {
DefaultAzureCredential,
getBearerTokenProvider,
} from "@azure/identity";
import { AzureOpenAI } from "@langchain/openai";
const credentials = new DefaultAzureCredential();
const azureADTokenProvider = getBearerTokenProvider(
credentials,
"https://cognitiveservices.azure.com/.default"
);
const model = new AzureOpenAI({
azureADTokenProvider,
azureOpenAIApiInstanceName: "<your_instance_name>",
azureOpenAIApiDeploymentName: "<your_deployment_name>",
azureOpenAIApiVersion: "<api_version>",
});
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/openai_basePath.ts | import { OpenAI } from "@langchain/openai";
const model = new OpenAI(
{ temperature: 0 },
{ baseURL: "https://oai.hconeai.com/v1" }
);
const res = await model.invoke(
"What would be a good company name a company that makes colorful socks?"
);
console.log(res);
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/ai21.ts | import { AI21 } from "@langchain/community/llms/ai21";
const model = new AI21({
ai21ApiKey: "YOUR_AI21_API_KEY", // Or set as process.env.AI21_API_KEY
});
const res = await model.invoke(`Translate "I love programming" into German.`);
console.log({ res });
/*
{
res: "\nIch liebe das Programmieren."
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/aleph_alpha.ts | import { AlephAlpha } from "@langchain/community/llms/aleph_alpha";
const model = new AlephAlpha({
aleph_alpha_api_key: "YOUR_ALEPH_ALPHA_API_KEY", // Or set as process.env.ALEPH_ALPHA_API_KEY
});
const res = await model.invoke(`Is cereal soup?`);
console.log({ res });
/*
{
res: "\nIs soup a cereal? I don’t think so, but it is delicious."
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/togetherai.ts | import { TogetherAI } from "@langchain/community/llms/togetherai";
import { PromptTemplate } from "@langchain/core/prompts";
const model = new TogetherAI({
model: "mistralai/Mixtral-8x7B-Instruct-v0.1",
});
const prompt = PromptTemplate.fromTemplate(`System: You are a helpful assistant.
User: {input}.
Assistant:`);
const chain = prompt.pipe(model);
const response = await chain.invoke({
input: `Tell me a joke about bears`,
});
console.log("response", response);
/**
response Sure, here's a bear joke for you: Why do bears hate shoes so much? Because they like to run around in their bear feet!
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/llm_streaming_stream_method.ts | import { OpenAI } from "@langchain/openai";
const model = new OpenAI({
maxTokens: 25,
});
const stream = await model.stream("Tell me a joke.");
for await (const chunk of stream) {
console.log(chunk);
}
/*
Q
:
What
did
the
fish
say
when
it
hit
the
wall
?
A
:
Dam
!
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/llm_with_tracing.ts | import { OpenAI, ChatOpenAI } from "@langchain/openai";
import * as process from "process";
import { SystemMessage, HumanMessage } from "@langchain/core/messages";
export const run = async () => {
process.env.LANGCHAIN_HANDLER = "langchain";
const model = new OpenAI({ temperature: 0.9 });
const resA = await model.invoke(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ resA });
const chat = new ChatOpenAI({ temperature: 0 });
const system_message = new SystemMessage("You are to chat with a user.");
const message = new HumanMessage("Hello!");
const resB = await chat.invoke([system_message, message]);
console.log({ resB });
};
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/cloudflare_workersai.ts | import { CloudflareWorkersAI } from "@langchain/cloudflare";
const model = new CloudflareWorkersAI({
model: "@cf/meta/llama-2-7b-chat-int8", // Default value
cloudflareAccountId: process.env.CLOUDFLARE_ACCOUNT_ID,
cloudflareApiToken: process.env.CLOUDFLARE_API_TOKEN,
// Pass a custom base URL to use Cloudflare AI Gateway
// baseUrl: `https://gateway.ai.cloudflare.com/v1/{YOUR_ACCOUNT_ID}/{GATEWAY_NAME}/workers-ai/`,
});
const response = await model.invoke(
`Translate "I love programming" into German.`
);
console.log(response);
/*
Here are a few options:
1. "Ich liebe Programmieren" - This is the most common way to say "I love programming" in German. "Liebe" means "love" in German, and "Programmieren" means "programming".
2. "Programmieren macht mir Spaß" - This means "Programming makes me happy". This is a more casual way to express your love for programming in German.
3. "Ich bin ein großer Fan von Programmieren" - This means "I'm a big fan of programming". This is a more formal way to express your love for programming in German.
4. "Programmieren ist mein Hobby" - This means "Programming is my hobby". This is a more casual way to express your love for programming in German.
5. "Ich liebe es, Programme zu schreiben" - This means "I love writing programs". This is a more formal way to express your love for programming in German.
*/
const stream = await model.stream(
`Translate "I love programming" into German.`
);
for await (const chunk of stream) {
console.log(chunk);
}
/*
Here
are
a
few
options
:
1
.
"
I
ch
lie
be
Program
...
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/llm_streaming.ts | import { OpenAI } from "@langchain/openai";
// To enable streaming, we pass in `streaming: true` to the LLM constructor.
// Additionally, we pass in a handler for the `handleLLMNewToken` event.
const model = new OpenAI({
maxTokens: 25,
streaming: true,
});
const response = await model.invoke("Tell me a joke.", {
callbacks: [
{
handleLLMNewToken(token: string) {
console.log({ token });
},
},
],
});
console.log(response);
/*
{ token: '\n' }
{ token: '\n' }
{ token: 'Q' }
{ token: ':' }
{ token: ' Why' }
{ token: ' did' }
{ token: ' the' }
{ token: ' chicken' }
{ token: ' cross' }
{ token: ' the' }
{ token: ' playground' }
{ token: '?' }
{ token: '\n' }
{ token: 'A' }
{ token: ':' }
{ token: ' To' }
{ token: ' get' }
{ token: ' to' }
{ token: ' the' }
{ token: ' other' }
{ token: ' slide' }
{ token: '.' }
Q: Why did the chicken cross the playground?
A: To get to the other slide.
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/deepinfra.ts | import { DeepInfraLLM } from "@langchain/community/llms/deepinfra";
const apiKey = process.env.DEEPINFRA_API_TOKEN;
const model = "meta-llama/Meta-Llama-3-70B-Instruct";
const llm = new DeepInfraLLM({
temperature: 0.7,
maxTokens: 20,
model,
apiKey,
maxRetries: 5,
});
const res = await llm.invoke(
"What is the next step in the process of making a good game?"
);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/sagemaker_endpoint.ts | import {
SageMakerEndpoint,
SageMakerLLMContentHandler,
} from "@langchain/community/llms/sagemaker_endpoint";
interface ResponseJsonInterface {
generation: {
content: string;
};
}
// Custom for whatever model you'll be using
class LLama213BHandler implements SageMakerLLMContentHandler {
contentType = "application/json";
accepts = "application/json";
async transformInput(
prompt: string,
modelKwargs: Record<string, unknown>
): Promise<Uint8Array> {
const payload = {
inputs: [[{ role: "user", content: prompt }]],
parameters: modelKwargs,
};
const stringifiedPayload = JSON.stringify(payload);
return new TextEncoder().encode(stringifiedPayload);
}
async transformOutput(output: Uint8Array): Promise<string> {
const response_json = JSON.parse(
new TextDecoder("utf-8").decode(output)
) as ResponseJsonInterface[];
const content = response_json[0]?.generation.content ?? "";
return content;
}
}
const contentHandler = new LLama213BHandler();
const model = new SageMakerEndpoint({
endpointName: "aws-llama-2-13b-chat",
modelKwargs: {
temperature: 0.5,
max_new_tokens: 700,
top_p: 0.9,
},
endpointKwargs: {
CustomAttributes: "accept_eula=true",
},
contentHandler,
clientOptions: {
region: "YOUR AWS ENDPOINT REGION",
credentials: {
accessKeyId: "YOUR AWS ACCESS ID",
secretAccessKey: "YOUR AWS SECRET ACCESS KEY",
},
},
});
const res = await model.invoke(
"Hello, my name is John Doe, tell me a joke about llamas "
);
console.log(res);
/*
[
{
content: "Hello, John Doe! Here's a llama joke for you:
Why did the llama become a gardener?
Because it was great at llama-scaping!"
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/azure_openai-base_path.ts | import { AzureOpenAI } from "@langchain/openai";
const model = new AzureOpenAI({
azureOpenAIApiKey: "<your_key>", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY
azureOpenAIApiDeploymentName: "<your_deployment_name>", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME
azureOpenAIApiVersion: "<api_version>", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION
azureOpenAIBasePath:
"https://westeurope.api.microsoft.com/openai/deployments", // In Node.js defaults to process.env.AZURE_OPENAI_BASE_PATH
});
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/llm_quick_start.ts | import { OpenAI } from "@langchain/openai";
export const run = async () => {
const model = new OpenAI();
// `call` is a simple string-in, string-out method for interacting with the model.
const resA = await model.invoke(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ resA });
// { resA: '\n\nSocktastic Colors' }
};
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/openai-batch.ts | import { OpenAI, ChatOpenAI } from "@langchain/openai";
import process from "process";
import { HumanMessage } from "@langchain/core/messages";
process.env.LANGCHAIN_TRACING_V2 = "true";
const model = new OpenAI({});
const prompts = [
"Say hello to Bob.",
"Say hello to Alice.",
"Say hello to John.",
"Say hello to Mary.",
];
const res = await model.invoke(prompts);
console.log({ res });
const chat = new ChatOpenAI({
model: "gpt-3.5-turbo",
});
const messages = prompts.map((prompt) => new HumanMessage(prompt));
const res2 = await chat.invoke(messages);
console.log({ res2 });
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/writer.ts | import { Writer } from "@langchain/community/llms/writer";
const model = new Writer({
maxTokens: 20,
apiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.WRITER_API_KEY
orgId: "YOUR-ORGANIZATION-ID", // In Node.js defaults to process.env.WRITER_ORG_ID
});
const res = await model.invoke(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/ollama.ts | import { Ollama } from "@langchain/community/llms/ollama";
const ollama = new Ollama({
baseUrl: "http://localhost:11434", // Default value
model: "llama2", // Default value
});
const stream = await ollama.stream(
`Translate "I love programming" into German.`
);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
console.log(chunks.join(""));
/*
I'm glad to help! "I love programming" can be translated to German as "Ich liebe Programmieren."
It's important to note that the translation of "I love" in German is "ich liebe," which is a more formal and polite way of saying "I love." In informal situations, people might use "mag ich" or "möchte ich" instead.
Additionally, the word "Programmieren" is the correct term for "programming" in German. It's a combination of two words: "Programm" and "-ieren," which means "to do something." So, the full translation of "I love programming" would be "Ich liebe Programmieren.
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/fireworks.ts | import { Fireworks } from "@langchain/community/llms/fireworks";
const model = new Fireworks({
temperature: 0.9,
// In Node.js defaults to process.env.FIREWORKS_API_KEY
apiKey: "YOUR-API-KEY",
});
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/togetherai_stream.ts | import { TogetherAI } from "@langchain/community/llms/togetherai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
const model = new TogetherAI({
model: "mistralai/Mixtral-8x7B-Instruct-v0.1",
streaming: true,
});
const prompt = ChatPromptTemplate.fromMessages([
["ai", "You are a helpful assistant."],
[
"human",
`Tell me a joke about bears.
Assistant:`,
],
]);
const chain = prompt.pipe(model);
const result = await chain.stream({});
let fullText = "";
for await (const item of result) {
console.log("stream item:", item);
fullText += item;
}
console.log(fullText);
/**
stream item: Sure
stream item: ,
stream item: here
stream item: '
stream item: s
stream item: a
stream item: light
stream item: -
stream item: heart
stream item: ed
stream item: bear
stream item: joke
stream item: for
stream item: you
stream item: :
stream item:
stream item:
stream item: Why
stream item: do
stream item: bears
stream item: hate
stream item: shoes
stream item: so
stream item: much
stream item: ?
stream item:
stream item:
stream item: Because
stream item: they
stream item: like
stream item: to
stream item: run
stream item: around
stream item: in
stream item: their
stream item: bear
stream item: feet
stream item: !
stream item: </s>
Sure, here's a light-hearted bear joke for you:
Why do bears hate shoes so much?
Because they like to run around in their bear feet!</s>
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/llm_cancellation.ts | import { OpenAI } from "@langchain/openai";
const model = new OpenAI({ temperature: 1 });
const controller = new AbortController();
// Call `controller.abort()` somewhere to cancel the request.
const res = await model.invoke(
"What would be a good name for a company that makes colorful socks?",
{ signal: controller.signal }
);
console.log(res);
/*
'\n\nSocktastic Colors'
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/ollama_multimodal.ts | import { Ollama } from "@langchain/community/llms/ollama";
import * as fs from "node:fs/promises";
const imageData = await fs.readFile("./hotdog.jpg");
const model = new Ollama({
model: "llava",
baseUrl: "http://127.0.0.1:11434",
}).bind({
images: [imageData.toString("base64")],
});
const res = await model.invoke("What's in this image?");
console.log({ res });
/*
{
res: ' The image displays a hot dog sitting on top of a bun, which is placed directly on the table. The hot dog has a striped pattern on it and looks ready to be eaten.'
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/custom.ts | import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { GenerationChunk } from "@langchain/core/outputs";
export interface CustomLLMInput extends BaseLLMParams {
n: number;
}
export class CustomLLM extends LLM {
n: number;
constructor(fields: CustomLLMInput) {
super(fields);
this.n = fields.n;
}
_llmType() {
return "custom";
}
async _call(
prompt: string,
_options: this["ParsedCallOptions"],
// Can pass runManager into sub runs for tracing
_runManager: CallbackManagerForLLMRun
): Promise<string> {
return prompt.slice(0, this.n);
}
async *_streamResponseChunks(
prompt: string,
_options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
for (const letter of prompt.slice(0, this.n)) {
yield new GenerationChunk({
text: letter,
});
await runManager?.handleLLMNewToken(letter);
}
}
}
const llm = new CustomLLM({ n: 4 });
await llm.invoke("I am an LLM");
const stream = await llm.stream("I am an LLM");
for await (const chunk of stream) {
console.log(chunk);
}
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/replicate_llama2.ts | import { Replicate } from "@langchain/community/llms/replicate";
const model = new Replicate({
model:
"a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
});
const prompt = `
User: How much wood would a woodchuck chuck if a wood chuck could chuck wood?
Assistant:`;
const res = await model.invoke(prompt);
console.log({ res });
/*
{
res: "I'm happy to help! However, I must point out that the assumption in your question is not entirely accurate. " +
+ "Woodchucks, also known as groundhogs, do not actually chuck wood. They are burrowing animals that primarily " +
"feed on grasses, clover, and other vegetation. They do not have the physical ability to chuck wood.\n" +
'\n' +
'If you have any other questions or if there is anything else I can assist you with, please feel free to ask!'
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/friendli.ts | import { Friendli } from "@langchain/community/llms/friendli";
const model = new Friendli({
model: "mixtral-8x7b-instruct-v0-1", // Default value
friendliToken: process.env.FRIENDLI_TOKEN,
friendliTeam: process.env.FRIENDLI_TEAM,
maxTokens: 18,
temperature: 0.75,
topP: 0.25,
frequencyPenalty: 0,
stop: [],
});
const response = await model.invoke(
"Check the Grammar: She dont like to eat vegetables, but she loves fruits."
);
console.log(response);
/*
Correct: She doesn't like to eat vegetables, but she loves fruits
*/
const stream = await model.stream(
"Check the Grammar: She dont like to eat vegetables, but she loves fruits."
);
for await (const chunk of stream) {
console.log(chunk);
}
/*
Cor
rect
:
She
doesn
...
she
loves
fruits
*/
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/llm_timeout.ts | import { OpenAI } from "@langchain/openai";
const model = new OpenAI({ temperature: 1 });
const resA = await model.invoke(
"What would be a good company name a company that makes colorful socks?",
{ timeout: 1000 } // 1s timeout
);
console.log({ resA });
// '\n\nSocktastic Colors' }
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/cohere.ts | import { Cohere } from "@langchain/cohere";
const model = new Cohere({
maxTokens: 20,
apiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.COHERE_API_KEY
});
const res = await model.invoke(
"What would be a good company name a company that makes colorful socks?"
);
console.log({ res });
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/openai_userid.ts | import { OpenAI } from "@langchain/openai";
const model = new OpenAI({ temperature: 0 });
const res = await model.invoke(
"What would be a good company name a company that makes colorful socks?",
{
options: {
headers: {
"User-Id": "123",
},
},
}
);
console.log(res);
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/azure_openai.ts | import { AzureOpenAI } from "@langchain/openai";
const model = new AzureOpenAI({
azureOpenAIApiKey: "<your_key>", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY
azureOpenAIApiInstanceName: "<your_instance_name>", // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME
azureOpenAIApiDeploymentName: "<your_deployment_name>", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME
azureOpenAIApiVersion: "<api_version>", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION
});
|
0 | lc_public_repos/langchainjs/examples/src/models | lc_public_repos/langchainjs/examples/src/models/llm/llm_streaming_stdout.ts | import { OpenAI } from "@langchain/openai";
// To enable streaming, we pass in `streaming: true` to the LLM constructor.
// Additionally, we pass in a handler for the `handleLLMNewToken` event.
const chat = new OpenAI({
streaming: true,
callbacks: [
{
handleLLMNewToken(token: string) {
process.stdout.write(token);
},
},
],
});
await chat.invoke("Write me a song about sparkling water.");
/*
Verse 1
Crystal clear and made with care
Sparkling water on my lips, so refreshing in the air
Fizzy bubbles, light and sweet
My favorite beverage I can’t help but repeat
Chorus
A toast to sparkling water, I’m feeling so alive
Let’s take a sip, and let’s take a drive
A toast to sparkling water, it’s the best I’ve had in my life
It’s the best way to start off the night
Verse 2
It’s the perfect drink to quench my thirst
It’s the best way to stay hydrated, it’s the first
A few ice cubes, a splash of lime
It will make any day feel sublime
...
*/
|
0 | lc_public_repos/langchainjs/examples/src/experimental | lc_public_repos/langchainjs/examples/src/experimental/babyagi/weather_with_tools.ts | import { BabyAGI } from "langchain/experimental/babyagi";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { OpenAIEmbeddings, OpenAI } from "@langchain/openai";
import { LLMChain } from "langchain/chains";
import { ChainTool } from "langchain/tools";
import { initializeAgentExecutorWithOptions } from "langchain/agents";
import { PromptTemplate } from "@langchain/core/prompts";
import { Tool } from "@langchain/core/tools";
import { SerpAPI } from "@langchain/community/tools/serpapi";
// First, we create a custom agent which will serve as execution chain.
const todoPrompt = PromptTemplate.fromTemplate(
"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}"
);
const tools: Tool[] = [
new SerpAPI(process.env.SERPAPI_API_KEY, {
location: "San Francisco,California,United States",
hl: "en",
gl: "us",
}),
new ChainTool({
name: "TODO",
chain: new LLMChain({
llm: new OpenAI({ temperature: 0 }),
prompt: todoPrompt,
}),
description:
"useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!",
}),
];
const agentExecutor = await initializeAgentExecutorWithOptions(
tools,
new OpenAI({ temperature: 0 }),
{
agentType: "zero-shot-react-description",
agentArgs: {
prefix: `You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.`,
suffix: `Question: {task}
{agent_scratchpad}`,
inputVariables: ["objective", "task", "context", "agent_scratchpad"],
},
}
);
const vectorStore = new MemoryVectorStore(new OpenAIEmbeddings());
// Then, we create a BabyAGI instance.
const babyAGI = BabyAGI.fromLLM({
llm: new OpenAI({ temperature: 0 }),
executionChain: agentExecutor, // an agent executor is a chain
vectorstore: vectorStore,
maxIterations: 10,
});
await babyAGI.invoke({
objective: "Write a short weather report for SF today",
});
/*
*****TASK LIST*****
1: Make a todo list
*****NEXT TASK*****
1: Make a todo list
*****TASK RESULT*****
Today in San Francisco, the weather is sunny with a temperature of 70 degrees Fahrenheit, light winds, and low humidity. The forecast for the next few days is expected to be similar.
*****TASK LIST*****
2: Find the forecasted temperature for the next few days in San Francisco
3: Find the forecasted wind speed for the next few days in San Francisco
4: Find the forecasted humidity for the next few days in San Francisco
5: Create a graph showing the forecasted temperature, wind speed, and humidity for San Francisco over the next few days
6: Research the average temperature for San Francisco in the past week
7: Research the average wind speed for San Francisco in the past week
8: Research the average humidity for San Francisco in the past week
9: Create a graph showing the temperature, wind speed, and humidity for San Francisco over the past week
*****NEXT TASK*****
2: Find the forecasted temperature for the next few days in San Francisco
*****TASK RESULT*****
The forecasted temperature for the next few days in San Francisco is 63°, 65°, 71°, 73°, and 66°.
*****TASK LIST*****
3: Find the forecasted wind speed for the next few days in San Francisco
4: Find the forecasted humidity for the next few days in San Francisco
5: Create a graph showing the forecasted temperature, wind speed, and humidity for San Francisco over the next few days
6: Research the average temperature for San Francisco in the past week
7: Research the average wind speed for San Francisco in the past week
8: Research the average humidity for San Francisco in the past week
9: Create a graph showing the temperature, wind speed, and humidity for San Francisco over the past week
10: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to the average temperature, wind speed, and humidity for San Francisco over the past week
11: Find the forecasted precipitation for the next few days in San Francisco
12: Research the average wind direction for San Francisco in the past week
13: Create a graph showing the forecasted temperature, wind speed, and humidity for San Francisco over the past week
14: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to
*****NEXT TASK*****
3: Find the forecasted wind speed for the next few days in San Francisco
*****TASK RESULT*****
West winds 10 to 20 mph. Gusts up to 35 mph in the evening. Tuesday. Sunny. Highs in the 60s to upper 70s. West winds 5 to 15 mph.
*****TASK LIST*****
4: Research the average precipitation for San Francisco in the past week
5: Research the average temperature for San Francisco in the past week
6: Research the average wind speed for San Francisco in the past week
7: Research the average humidity for San Francisco in the past week
8: Research the average wind direction for San Francisco in the past week
9: Find the forecasted temperature, wind speed, and humidity for San Francisco over the next few days
10: Find the forecasted precipitation for the next few days in San Francisco
11: Create a graph showing the forecasted temperature, wind speed, and humidity for San Francisco over the next few days
12: Create a graph showing the temperature, wind speed, and humidity for San Francisco over the past week
13: Create a graph showing the forecasted temperature, wind speed, and humidity for San Francisco over the past month
14: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to the average temperature, wind speed, and humidity for San Francisco over the past week
15: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to the
*****NEXT TASK*****
4: Research the average precipitation for San Francisco in the past week
*****TASK RESULT*****
According to Weather Underground, the forecasted precipitation for San Francisco in the next few days is 7-hour rain and snow with 24-hour rain accumulation.
*****TASK LIST*****
5: Research the average wind speed for San Francisco over the past month
6: Create a graph showing the forecasted temperature, wind speed, and humidity for San Francisco over the past month
7: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to the average temperature, wind speed, and humidity for San Francisco over the past month
8: Research the average temperature for San Francisco over the past month
9: Research the average wind direction for San Francisco over the past month
10: Create a graph showing the forecasted precipitation for San Francisco over the next few days
11: Compare the forecasted precipitation for San Francisco over the next few days to the average precipitation for San Francisco over the past week
12: Find the forecasted temperature, wind speed, and humidity for San Francisco over the next few days
13: Find the forecasted precipitation for the next few days in San Francisco
14: Create a graph showing the temperature, wind speed, and humidity for San Francisco over the past week
15: Create a graph showing the forecasted temperature, wind speed, and humidity for San Francisco over the next few days
16: Compare the forecast
*****NEXT TASK*****
5: Research the average wind speed for San Francisco over the past month
*****TASK RESULT*****
The average wind speed for San Francisco over the past month is 3.2 meters per second.
*****TASK LIST*****
6: Find the forecasted temperature, wind speed, and humidity for San Francisco over the next few days,
7: Find the forecasted precipitation for the next few days in San Francisco,
8: Create a graph showing the temperature, wind speed, and humidity for San Francisco over the past week,
9: Create a graph showing the forecasted temperature, wind speed, and humidity for San Francisco over the next few days,
10: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to the average wind speed for San Francisco over the past month,
11: Research the average wind speed for San Francisco over the past week,
12: Create a graph showing the forecasted precipitation for San Francisco over the next few days,
13: Compare the forecasted precipitation for San Francisco over the next few days to the average precipitation for San Francisco over the past month,
14: Research the average temperature for San Francisco over the past month,
15: Research the average humidity for San Francisco over the past month,
16: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to the average temperature,
*****NEXT TASK*****
6: Find the forecasted temperature, wind speed, and humidity for San Francisco over the next few days,
*****TASK RESULT*****
The forecast for San Francisco over the next few days is mostly sunny, with a high near 64. West wind 7 to 12 mph increasing to 13 to 18 mph in the afternoon. Winds could gust as high as 22 mph. Humidity will be around 50%.
*****TASK LIST*****
7: Find the forecasted precipitation for the next few days in San Francisco,
8: Create a graph showing the temperature, wind speed, and humidity for San Francisco over the past week,
9: Create a graph showing the forecasted temperature, wind speed, and humidity for San Francisco over the next few days,
10: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to the average wind speed for San Francisco over the past month,
11: Research the average wind speed for San Francisco over the past week,
12: Create a graph showing the forecasted precipitation for San Francisco over the next few days,
13: Compare the forecasted precipitation for San Francisco over the next few days to the average precipitation for San Francisco over the past month,
14: Research the average temperature for San Francisco over the past month,
15: Research the average humidity for San Francisco over the past month,
16: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to the average temperature
*****NEXT TASK*****
7: Find the forecasted precipitation for the next few days in San Francisco,
*****TASK RESULT*****
According to Weather Underground, the forecasted precipitation for the next few days in San Francisco is 7-hour rain and snow with 24-hour rain accumulation, radar and satellite maps of precipitation.
*****TASK LIST*****
8: Create a graph showing the temperature, wind speed, and humidity for San Francisco over the past week,
9: Create a graph showing the forecasted temperature, wind speed, and humidity for San Francisco over the next few days,
10: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to the average wind speed for San Francisco over the past month,
11: Research the average wind speed for San Francisco over the past week,
12: Create a graph showing the forecasted precipitation for San Francisco over the next few days,
13: Compare the forecasted precipitation for San Francisco over the next few days to the average precipitation for San Francisco over the past month,
14: Research the average temperature for San Francisco over the past month,
15: Research the average humidity for San Francisco over the past month,
16: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to the average temperature
*****NEXT TASK*****
8: Create a graph showing the temperature, wind speed, and humidity for San Francisco over the past week,
*****TASK RESULT*****
A graph showing the temperature, wind speed, and humidity for San Francisco over the past week.
*****TASK LIST*****
9: Create a graph showing the forecasted temperature, wind speed, and humidity for San Francisco over the next few days
10: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to the average wind speed for San Francisco over the past month
11: Research the average wind speed for San Francisco over the past week
12: Create a graph showing the forecasted precipitation for San Francisco over the next few days
13: Compare the forecasted precipitation for San Francisco over the next few days to the average precipitation for San Francisco over the past month
14: Research the average temperature for San Francisco over the past month
15: Research the average humidity for San Francisco over the past month
16: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to the average temperature
*****NEXT TASK*****
9: Create a graph showing the forecasted temperature, wind speed, and humidity for San Francisco over the next few days
*****TASK RESULT*****
The forecasted temperature, wind speed, and humidity for San Francisco over the next few days can be seen in the graph created.
*****TASK LIST*****
10: Research the average wind speed for San Francisco over the past month
11: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to the average humidity for San Francisco over the past month
12: Create a graph showing the forecasted precipitation for San Francisco over the next few days
13: Compare the forecasted precipitation for San Francisco over the next few days to the average precipitation for San Francisco over the past month
14: Research the average temperature for San Francisco over the past week
15: Compare the forecasted temperature, wind speed, and humidity for San Francisco over the next few days to the average wind speed for San Francisco over the past week
*****NEXT TASK*****
10: Research the average wind speed for San Francisco over the past month
*****TASK RESULT*****
The average wind speed for San Francisco over the past month is 2.7 meters per second.
[...]
*/
|
0 | lc_public_repos/langchainjs/examples/src/experimental | lc_public_repos/langchainjs/examples/src/experimental/babyagi/weather.ts | import { BabyAGI } from "langchain/experimental/babyagi";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { OpenAIEmbeddings, OpenAI } from "@langchain/openai";
const vectorStore = new MemoryVectorStore(new OpenAIEmbeddings());
const babyAGI = BabyAGI.fromLLM({
llm: new OpenAI({ temperature: 0 }),
vectorstore: vectorStore,
maxIterations: 3,
});
await babyAGI.invoke({ objective: "Write a weather report for SF today" });
/*
*****TASK LIST*****
1: Make a todo list
*****NEXT TASK*****
1: Make a todo list
*****TASK RESULT*****
1. Check the weather forecast for San Francisco today
2. Make note of the temperature, humidity, wind speed, and other relevant weather conditions
3. Write a weather report summarizing the forecast
4. Check for any weather alerts or warnings
5. Share the report with the relevant stakeholders
*****TASK LIST*****
2: Check the current temperature in San Francisco
3: Check the current humidity in San Francisco
4: Check the current wind speed in San Francisco
5: Check for any weather alerts or warnings in San Francisco
6: Check the forecast for the next 24 hours in San Francisco
7: Check the forecast for the next 48 hours in San Francisco
8: Check the forecast for the next 72 hours in San Francisco
9: Check the forecast for the next week in San Francisco
10: Check the forecast for the next month in San Francisco
11: Check the forecast for the next 3 months in San Francisco
1: Write a weather report for SF today
*****NEXT TASK*****
2: Check the current temperature in San Francisco
*****TASK RESULT*****
I will check the current temperature in San Francisco. I will use an online weather service to get the most up-to-date information.
*****TASK LIST*****
3: Check the current UV index in San Francisco
4: Check the current air quality in San Francisco
5: Check the current precipitation levels in San Francisco
6: Check the current cloud cover in San Francisco
7: Check the current barometric pressure in San Francisco
8: Check the current dew point in San Francisco
9: Check the current wind direction in San Francisco
10: Check the current humidity levels in San Francisco
1: Check the current temperature in San Francisco to the average temperature for this time of year
2: Check the current visibility in San Francisco
11: Write a weather report for SF today
*****NEXT TASK*****
3: Check the current UV index in San Francisco
*****TASK RESULT*****
The current UV index in San Francisco is moderate, with a value of 5. This means that it is safe to be outside for short periods of time without sunscreen, but it is still recommended to wear sunscreen and protective clothing when outside for extended periods of time.
*/
|
0 | lc_public_repos/langchainjs/examples/src/experimental | lc_public_repos/langchainjs/examples/src/experimental/generative_agents/generative_agents.ts | import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { TimeWeightedVectorStoreRetriever } from "langchain/retrievers/time_weighted";
import {
GenerativeAgentMemory,
GenerativeAgent,
} from "langchain/experimental/generative_agents";
const Simulation = async () => {
const userName = "USER";
const llm = new OpenAI({
temperature: 0.9,
maxTokens: 1500,
});
const createNewMemoryRetriever = async () => {
// Create a new, demo in-memory vector store retriever unique to the agent.
// Better results can be achieved with a more sophisticatd vector store.
const vectorStore = new MemoryVectorStore(new OpenAIEmbeddings());
const retriever = new TimeWeightedVectorStoreRetriever({
vectorStore,
otherScoreKeys: ["importance"],
k: 15,
});
return retriever;
};
// Initializing Tommie
const tommiesMemory: GenerativeAgentMemory = new GenerativeAgentMemory(
llm,
await createNewMemoryRetriever(),
{ reflectionThreshold: 8 }
);
const tommie: GenerativeAgent = new GenerativeAgent(llm, tommiesMemory, {
name: "Tommie",
age: 25,
traits: "anxious, likes design, talkative",
status: "looking for a job",
});
console.log("Tommie's first summary:\n", await tommie.getSummary());
/*
Tommie's first summary:
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie is an individual with no specific core characteristics described.
*/
// Let's give Tommie some memories!
const tommieObservations = [
"Tommie remembers his dog, Bruno, from when he was a kid",
"Tommie feels tired from driving so far",
"Tommie sees the new home",
"The new neighbors have a cat",
"The road is noisy at night",
"Tommie is hungry",
"Tommie tries to get some rest.",
];
for (const observation of tommieObservations) {
await tommie.addMemory(observation, new Date());
}
// Checking Tommie's summary again after giving him some memories
console.log(
"Tommie's second summary:\n",
await tommie.getSummary({ forceRefresh: true })
);
/*
Tommie's second summary:
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie remembers his dog, is tired from driving, sees a new home with neighbors who have a cat, is aware of the noisy road at night, is hungry, and tries to get some rest.
*/
const interviewAgent = async (
agent: GenerativeAgent,
message: string
): Promise<string> => {
// Simple wrapper helping the user interact with the agent
const newMessage = `${userName} says ${message}`;
const response = await agent.generateDialogueResponse(newMessage);
return response[1];
};
// Let's have Tommie start going through a day in his life.
const observations = [
"Tommie wakes up to the sound of a noisy construction site outside his window.",
"Tommie gets out of bed and heads to the kitchen to make himself some coffee.",
"Tommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.",
"Tommie finally finds the filters and makes himself a cup of coffee.",
"The coffee tastes bitter, and Tommie regrets not buying a better brand.",
"Tommie checks his email and sees that he has no job offers yet.",
"Tommie spends some time updating his resume and cover letter.",
"Tommie heads out to explore the city and look for job openings.",
"Tommie sees a sign for a job fair and decides to attend.",
"The line to get in is long, and Tommie has to wait for an hour.",
"Tommie meets several potential employers at the job fair but doesn't receive any offers.",
"Tommie leaves the job fair feeling disappointed.",
"Tommie stops by a local diner to grab some lunch.",
"The service is slow, and Tommie has to wait for 30 minutes to get his food.",
"Tommie overhears a conversation at the next table about a job opening.",
"Tommie asks the diners about the job opening and gets some information about the company.",
"Tommie decides to apply for the job and sends his resume and cover letter.",
"Tommie continues his search for job openings and drops off his resume at several local businesses.",
"Tommie takes a break from his job search to go for a walk in a nearby park.",
"A dog approaches and licks Tommie's feet, and he pets it for a few minutes.",
"Tommie sees a group of people playing frisbee and decides to join in.",
"Tommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.",
"Tommie goes back to his apartment to rest for a bit.",
"A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.",
"Tommie starts to feel frustrated with his job search.",
"Tommie calls his best friend to vent about his struggles.",
"Tommie's friend offers some words of encouragement and tells him to keep trying.",
"Tommie feels slightly better after talking to his friend.",
];
// Let's send Tommie on his way. We'll check in on his summary every few observations to watch him evolve
for (let i = 0; i < observations.length; i += 1) {
const observation = observations[i];
const [, reaction] = await tommie.generateReaction(observation);
console.log("\x1b[32m", observation, "\x1b[0m", reaction);
if ((i + 1) % 20 === 0) {
console.log("*".repeat(40));
console.log(
"\x1b[34m",
`After ${
i + 1
} observations, Tommie's summary is:\n${await tommie.getSummary({
forceRefresh: true,
})}`,
"\x1b[0m"
);
console.log("*".repeat(40));
}
}
/*
Tommie wakes up to the sound of a noisy construction site outside his window. Tommie REACT: Tommie groans in frustration and covers his ears with his pillow.
Tommie gets out of bed and heads to the kitchen to make himself some coffee. Tommie REACT: Tommie rubs his tired eyes before heading to the kitchen to make himself some coffee.
Tommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some. Tommie REACT: Tommie groans and looks through his moving boxes in search of coffee filters.
Tommie finally finds the filters and makes himself a cup of coffee. Tommie REACT: Tommie sighs in relief and prepares himself a much-needed cup of coffee.
The coffee tastes bitter, and Tommie regrets not buying a better brand. Tommie REACT: Tommie frowns in disappointment as he takes a sip of the bitter coffee.
Tommie checks his email and sees that he has no job offers yet. Tommie REACT: Tommie sighs in disappointment before pushing himself away from the computer with a discouraged look on his face.
Tommie spends some time updating his resume and cover letter. Tommie REACT: Tommie takes a deep breath and stares at the computer screen as he updates his resume and cover letter.
Tommie heads out to explore the city and look for job openings. Tommie REACT: Tommie takes a deep breath and steps out into the city, ready to find the perfect job opportunity.
Tommie sees a sign for a job fair and decides to attend. Tommie REACT: Tommie takes a deep breath and marches towards the job fair, determination in his eyes.
The line to get in is long, and Tommie has to wait for an hour. Tommie REACT: Tommie groans in frustration as he notices the long line.
Tommie meets several potential employers at the job fair but doesn't receive any offers. Tommie REACT: Tommie's face falls as he listens to each potential employer's explanation as to why they can't hire him.
Tommie leaves the job fair feeling disappointed. Tommie REACT: Tommie's face falls as he walks away from the job fair, disappointment evident in his expression.
Tommie stops by a local diner to grab some lunch. Tommie REACT: Tommie smiles as he remembers Bruno as he walks into the diner, feeling both a sense of nostalgia and excitement.
The service is slow, and Tommie has to wait for 30 minutes to get his food. Tommie REACT: Tommie sighs in frustration and taps his fingers on the table, growing increasingly impatient.
Tommie overhears a conversation at the next table about a job opening. Tommie REACT: Tommie leans in closer, eager to hear the conversation.
Tommie asks the diners about the job opening and gets some information about the company. Tommie REACT: Tommie eagerly listens to the diner's description of the company, feeling hopeful about the job opportunity.
Tommie decides to apply for the job and sends his resume and cover letter. Tommie REACT: Tommie confidently sends in his resume and cover letter, determined to get the job.
Tommie continues his search for job openings and drops off his resume at several local businesses. Tommie REACT: Tommie confidently drops his resume off at the various businesses, determined to find a job.
Tommie takes a break from his job search to go for a walk in a nearby park. Tommie REACT: Tommie takes a deep breath of the fresh air and smiles in appreciation as he strolls through the park.
A dog approaches and licks Tommie's feet, and he pets it for a few minutes. Tommie REACT: Tommie smiles in surprise as he pets the dog, feeling a sense of comfort and nostalgia.
****************************************
After 20 observations, Tommie's summary is:
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie is a determined and resilient individual who remembers his dog from when he was a kid. Despite feeling tired from driving, he has the courage to explore the city, looking for job openings. He persists in updating his resume and cover letter in the pursuit of finding the perfect job opportunity, even attending job fairs when necessary, and is disappointed when he's not offered a job.
****************************************
Tommie sees a group of people playing frisbee and decides to join in. Tommie REACT: Tommie smiles and approaches the group, eager to take part in the game.
Tommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose. Tommie REACT: Tommie grimaces in pain and raises his hand to his nose, checking to see if it's bleeding.
Tommie goes back to his apartment to rest for a bit. Tommie REACT: Tommie yawns and trudges back to his apartment, feeling exhausted from his busy day.
A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor. Tommie REACT: Tommie shakes his head in annoyance as he surveys the mess.
Tommie starts to feel frustrated with his job search. Tommie REACT: Tommie sighs in frustration and shakes his head, feeling discouraged from his lack of progress.
Tommie calls his best friend to vent about his struggles. Tommie REACT: Tommie runs his hands through his hair and sighs heavily, overwhelmed by his job search.
Tommie's friend offers some words of encouragement and tells him to keep trying. Tommie REACT: Tommie gives his friend a grateful smile, feeling comforted by the words of encouragement.
Tommie feels slightly better after talking to his friend. Tommie REACT: Tommie gives a small smile of appreciation to his friend, feeling grateful for the words of encouragement.
*/
// Interview after the day
console.log(
await interviewAgent(tommie, "Tell me about how your day has been going")
);
/*
Tommie said "My day has been pretty hectic. I've been driving around looking for job openings, attending job fairs, and updating my resume and cover letter. It's been really exhausting, but I'm determined to find the perfect job for me."
*/
console.log(await interviewAgent(tommie, "How do you feel about coffee?"));
/*
Tommie said "I actually love coffee - it's one of my favorite things. I try to drink it every day, especially when I'm stressed from job searching."
*/
console.log(
await interviewAgent(tommie, "Tell me about your childhood dog!")
);
/*
Tommie said "My childhood dog was named Bruno. He was an adorable black Labrador Retriever who was always full of energy. Every time I came home he'd be so excited to see me, it was like he never stopped smiling. He was always ready for adventure and he was always my shadow. I miss him every day."
*/
console.log(
"Tommie's second summary:\n",
await tommie.getSummary({ forceRefresh: true })
);
/*
Tommie's second summary:
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie is a hardworking individual who is looking for new opportunities. Despite feeling tired, he is determined to find the perfect job. He remembers his dog from when he was a kid, is hungry, and is frustrated at times. He shows resilience when searching for his coffee filters, disappointment when checking his email and finding no job offers, and determination when attending the job fair.
*/
// Let’s add a second character to have a conversation with Tommie. Feel free to configure different traits.
const evesMemory: GenerativeAgentMemory = new GenerativeAgentMemory(
llm,
await createNewMemoryRetriever(),
{
verbose: false,
reflectionThreshold: 5,
}
);
const eve: GenerativeAgent = new GenerativeAgent(llm, evesMemory, {
name: "Eve",
age: 34,
traits: "curious, helpful",
status:
"just started her new job as a career counselor last week and received her first assignment, a client named Tommie.",
// dailySummaries: [
// "Eve started her new job as a career counselor last week and received her first assignment, a client named Tommie."
// ]
});
const eveObservations = [
"Eve overhears her colleague say something about a new client being hard to work with",
"Eve wakes up and hears the alarm",
"Eve eats a boal of porridge",
"Eve helps a coworker on a task",
"Eve plays tennis with her friend Xu before going to work",
"Eve overhears her colleague say something about Tommie being hard to work with",
];
for (const observation of eveObservations) {
await eve.addMemory(observation, new Date());
}
const eveInitialSummary: string = await eve.getSummary({
forceRefresh: true,
});
console.log("Eve's initial summary\n", eveInitialSummary);
/*
Eve's initial summary
Name: Eve (age: 34)
Innate traits: curious, helpful
Eve is an attentive listener, helpful colleague, and sociable friend who enjoys playing tennis.
*/
// Let’s “Interview” Eve before she speaks with Tommie.
console.log(await interviewAgent(eve, "How are you feeling about today?"));
/*
Eve said "I'm feeling a bit anxious about meeting my new client, but I'm sure it will be fine! How about you?".
*/
console.log(await interviewAgent(eve, "What do you know about Tommie?"));
/*
Eve said "I know that Tommie is a recent college graduate who's been struggling to find a job. I'm looking forward to figuring out how I can help him move forward."
*/
console.log(
await interviewAgent(
eve,
"Tommie is looking to find a job. What are are some things you'd like to ask him?"
)
);
/*
Eve said: "I'd really like to get to know more about Tommie's professional background and experience, and why he is looking for a job. And I'd also like to know more about his strengths and passions and what kind of work he would be best suited for. That way I can help him find the right job to fit his needs."
*/
// Generative agents are much more complex when they interact with a virtual environment or with each other.
// Below, we run a simple conversation between Tommie and Eve.
const runConversation = async (
agents: GenerativeAgent[],
initialObservation: string
): Promise<void> => {
// Starts the conversation bewteen two agents
let [, observation] = await agents[1].generateReaction(initialObservation);
console.log("Initial reply:", observation);
// eslint-disable-next-line no-constant-condition
while (true) {
let breakDialogue = false;
for (const agent of agents) {
const [stayInDialogue, agentObservation] =
await agent.generateDialogueResponse(observation);
console.log("Next reply:", agentObservation);
observation = agentObservation;
if (!stayInDialogue) {
breakDialogue = true;
}
}
if (breakDialogue) {
break;
}
}
};
const agents: GenerativeAgent[] = [tommie, eve];
await runConversation(
agents,
"Tommie said: Hi, Eve. Thanks for agreeing to meet with me today. I have a bunch of questions and am not sure where to start. Maybe you could first share about your experience?"
);
/*
Initial reply: Eve said "Of course, Tommie. I'd be happy to share about my experience. What specific questions do you have?"
Next reply: Tommie said "Thank you, Eve. I'm curious about what strategies you used in your own job search. Did you have any specific tactics that helped you stand out to employers?"
Next reply: Eve said "Sure, Tommie. I found that networking and reaching out to professionals in my field was really helpful. I also made sure to tailor my resume and cover letter to each job I applied to. Do you have any specific questions about those strategies?"
Next reply: Tommie said "Thank you, Eve. That's really helpful advice. Did you have any specific ways of networking that worked well for you?"
Next reply: Eve said "Sure, Tommie. I found that attending industry events and connecting with professionals on LinkedIn were both great ways to network. Do you have any specific questions about those tactics?"
Next reply: Tommie said "That's really helpful, thank you for sharing. Did you find that you were able to make meaningful connections through LinkedIn?"
Next reply: Eve said "Yes, definitely. I was able to connect with several professionals in my field and even landed a job through a LinkedIn connection. Have you had any luck with networking on LinkedIn?"
Next reply: Tommie said "That's really impressive! I haven't had much luck yet, but I'll definitely keep trying. Thank you for the advice, Eve."
Next reply: Eve said "Glad I could help, Tommie. Is there anything else you want to know?"
Next reply: Tommie said "Thanks again, Eve. I really appreciate your advice and I'll definitely put it into practice. Have a great day!"
Next reply: Eve said "You're welcome, Tommie! Don't hesitate to reach out if you have any more questions. Have a great day too!"
*/
// Since the generative agents retain their memories from the day, we can ask them about their plans, conversations, and other memories.
const tommieSummary: string = await tommie.getSummary({
forceRefresh: true,
});
console.log("Tommie's third and final summary\n", tommieSummary);
/*
Tommie's third and final summary
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie is a determined individual, who demonstrates resilience in the face of disappointment. He is also a nostalgic person, remembering fondly his childhood pet, Bruno. He is resourceful, searching through his moving boxes to find what he needs, and takes initiative to attend job fairs to look for job openings.
*/
const eveSummary: string = await eve.getSummary({ forceRefresh: true });
console.log("Eve's final summary\n", eveSummary);
/*
Eve's final summary
Name: Eve (age: 34)
Innate traits: curious, helpful
Eve is a helpful and encouraging colleague who actively listens to her colleagues and offers advice on how to move forward. She is willing to take time to understand her clients and their goals, and is committed to helping them succeed.
*/
const interviewOne: string = await interviewAgent(
tommie,
"How was your conversation with Eve?"
);
console.log("USER: How was your conversation with Eve?\n");
console.log(interviewOne);
/*
Tommie said "It was great. She was really helpful and knowledgeable. I'm thankful that she took the time to answer all my questions."
*/
const interviewTwo: string = await interviewAgent(
eve,
"How was your conversation with Tommie?"
);
console.log("USER: How was your conversation with Tommie?\n");
console.log(interviewTwo);
/*
Eve said "The conversation went very well. We discussed his goals and career aspirations, what kind of job he is looking for, and his experience and qualifications. I'm confident I can help him find the right job."
*/
const interviewThree: string = await interviewAgent(
eve,
"What do you wish you would have said to Tommie?"
);
console.log("USER: What do you wish you would have said to Tommie?\n");
console.log(interviewThree);
/*
Eve said "It's ok if you don't have all the answers yet. Let's take some time to learn more about your experience and qualifications, so I can help you find a job that fits your goals."
*/
return {
tommieFinalSummary: tommieSummary,
eveFinalSummary: eveSummary,
interviewOne,
interviewTwo,
interviewThree,
};
};
const runSimulation = async () => {
try {
await Simulation();
} catch (error) {
console.log("error running simulation:", error);
throw error;
}
};
await runSimulation();
|
0 | lc_public_repos/langchainjs/examples/src/experimental | lc_public_repos/langchainjs/examples/src/experimental/autogpt/weather_browser.ts | import { AutoGPT } from "langchain/experimental/autogpt";
import { ReadFileTool, WriteFileTool } from "langchain/tools";
import { InMemoryFileStore } from "langchain/stores/file/in_memory";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { SerpAPI } from "@langchain/community/tools/serpapi";
const store = new InMemoryFileStore();
const tools = [
new ReadFileTool({ store }),
new WriteFileTool({ store }),
new SerpAPI(process.env.SERPAPI_API_KEY, {
location: "San Francisco,California,United States",
hl: "en",
gl: "us",
}),
];
const vectorStore = new MemoryVectorStore(new OpenAIEmbeddings());
const autogpt = AutoGPT.fromLLMAndTools(
new ChatOpenAI({ temperature: 0 }),
tools,
{
memory: vectorStore.asRetriever(),
aiName: "Tom",
aiRole: "Assistant",
}
);
await autogpt.run(["write a weather report for SF today"]);
/*
{
"thoughts": {
"text": "I need to write a weather report for SF today. I should use a search engine to find the current weather conditions.",
"reasoning": "I don't have the current weather information for SF in my short term memory, so I need to use a search engine to find it.",
"plan": "- Use the search command to find the current weather conditions for SF\n- Write a weather report based on the information found",
"criticism": "I need to make sure that the information I find is accurate and up-to-date.",
"speak": "I will use the search command to find the current weather conditions for SF."
},
"command": {
"name": "search",
"args": {
"input": "current weather conditions San Francisco"
}
}
}
{
"thoughts": {
"text": "I have found the current weather conditions for SF. I need to write a weather report based on this information.",
"reasoning": "I have the information I need to write a weather report, so I should use the write_file command to save it to a file.",
"plan": "- Use the write_file command to save the weather report to a file",
"criticism": "I need to make sure that the weather report is clear and concise.",
"speak": "I will use the write_file command to save the weather report to a file."
},
"command": {
"name": "write_file",
"args": {
"file_path": "weather_report.txt",
"text": "San Francisco Weather Report:\n\nMorning: 53°, Chance of Rain 1%\nAfternoon: 59°, Chance of Rain 0%\nEvening: 52°, Chance of Rain 3%\nOvernight: 48°, Chance of Rain 2%"
}
}
}
{
"thoughts": {
"text": "I have completed all my objectives. I will use the finish command to signal that I am done.",
"reasoning": "I have completed the task of writing a weather report for SF today, so I don't need to do anything else.",
"plan": "- Use the finish command to signal that I am done",
"criticism": "I need to make sure that I have completed all my objectives before using the finish command.",
"speak": "I will use the finish command to signal that I am done."
},
"command": {
"name": "finish",
"args": {
"response": "I have completed all my objectives."
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/experimental | lc_public_repos/langchainjs/examples/src/experimental/autogpt/weather.ts | import { AutoGPT } from "langchain/experimental/autogpt";
import { ReadFileTool, WriteFileTool } from "langchain/tools";
import { NodeFileStore } from "langchain/stores/file/node";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { SerpAPI } from "@langchain/community/tools/serpapi";
const store = new NodeFileStore();
const tools = [
new ReadFileTool({ store }),
new WriteFileTool({ store }),
new SerpAPI(process.env.SERPAPI_API_KEY, {
location: "San Francisco,California,United States",
hl: "en",
gl: "us",
}),
];
const vectorStore = new HNSWLib(new OpenAIEmbeddings(), {
space: "cosine",
numDimensions: 1536,
});
const autogpt = AutoGPT.fromLLMAndTools(
new ChatOpenAI({ temperature: 0 }),
tools,
{
memory: vectorStore.asRetriever(),
aiName: "Tom",
aiRole: "Assistant",
}
);
await autogpt.run(["write a weather report for SF today"]);
/*
{
"thoughts": {
"text": "I need to write a weather report for SF today. I should use a search engine to find the current weather conditions.",
"reasoning": "I don't have the current weather information for SF in my short term memory, so I need to use a search engine to find it.",
"plan": "- Use the search command to find the current weather conditions for SF\n- Write a weather report based on the information found",
"criticism": "I need to make sure that the information I find is accurate and up-to-date.",
"speak": "I will use the search command to find the current weather conditions for SF."
},
"command": {
"name": "search",
"args": {
"input": "current weather conditions San Francisco"
}
}
}
{
"thoughts": {
"text": "I have found the current weather conditions for SF. I need to write a weather report based on this information.",
"reasoning": "I have the information I need to write a weather report, so I should use the write_file command to save it to a file.",
"plan": "- Use the write_file command to save the weather report to a file",
"criticism": "I need to make sure that the weather report is clear and concise.",
"speak": "I will use the write_file command to save the weather report to a file."
},
"command": {
"name": "write_file",
"args": {
"file_path": "weather_report.txt",
"text": "San Francisco Weather Report:\n\nMorning: 53°, Chance of Rain 1%\nAfternoon: 59°, Chance of Rain 0%\nEvening: 52°, Chance of Rain 3%\nOvernight: 48°, Chance of Rain 2%"
}
}
}
{
"thoughts": {
"text": "I have completed all my objectives. I will use the finish command to signal that I am done.",
"reasoning": "I have completed the task of writing a weather report for SF today, so I don't need to do anything else.",
"plan": "- Use the finish command to signal that I am done",
"criticism": "I need to make sure that I have completed all my objectives before using the finish command.",
"speak": "I will use the finish command to signal that I am done."
},
"command": {
"name": "finish",
"args": {
"response": "I have completed all my objectives."
}
}
}
*/
|
0 | lc_public_repos/langchainjs/examples/src/experimental | lc_public_repos/langchainjs/examples/src/experimental/prompts/handlebars.ts | import { HandlebarsPromptTemplate } from "langchain/experimental/prompts/handlebars";
import { ChatAnthropic } from "@langchain/anthropic";
import { StringOutputParser } from "@langchain/core/output_parsers";
const template = `Tell me a joke about {{topic}}`;
const prompt = HandlebarsPromptTemplate.fromTemplate(template);
const formattedResult = await prompt.invoke({ topic: "bears" });
console.log(formattedResult);
/*
StringPromptValue {
value: 'Tell me a joke about bears'
}
*/
const model = new ChatAnthropic();
const chain = prompt.pipe(model).pipe(new StringOutputParser());
const result = await chain.invoke({
topic: "bears",
});
console.log(result);
/*
Why did the bears dissolve their hockey team? Because there were too many grizzly fights!
*/
|
0 | lc_public_repos/langchainjs/examples/src/experimental | lc_public_repos/langchainjs/examples/src/experimental/masking/kitchen_sink.ts | import {
MaskingParser,
RegexMaskingTransformer,
} from "langchain/experimental/masking";
// A simple hash function for demonstration purposes
function simpleHash(input: string): string {
let hash = 0;
for (let i = 0; i < input.length; i += 1) {
const char = input.charCodeAt(i);
hash = (hash << 5) - hash + char;
hash |= 0; // Convert to 32bit integer
}
return hash.toString(16);
}
const emailMask = (match: string) => `[email-${simpleHash(match)}]`;
const phoneMask = (match: string) => `[phone-${simpleHash(match)}]`;
const nameMask = (match: string) => `[name-${simpleHash(match)}]`;
const ssnMask = (match: string) => `[ssn-${simpleHash(match)}]`;
const creditCardMask = (match: string) => `[creditcard-${simpleHash(match)}]`;
const passportMask = (match: string) => `[passport-${simpleHash(match)}]`;
const licenseMask = (match: string) => `[license-${simpleHash(match)}]`;
const addressMask = (match: string) => `[address-${simpleHash(match)}]`;
const dobMask = (match: string) => `[dob-${simpleHash(match)}]`;
const bankAccountMask = (match: string) => `[bankaccount-${simpleHash(match)}]`;
// Regular expressions for different types of PII
const patterns = {
email: { regex: /\S+@\S+\.\S+/g, mask: emailMask },
phone: { regex: /\b\d{3}-\d{3}-\d{4}\b/g, mask: phoneMask },
name: { regex: /\b[A-Z][a-z]+ [A-Z][a-z]+\b/g, mask: nameMask },
ssn: { regex: /\b\d{3}-\d{2}-\d{4}\b/g, mask: ssnMask },
creditCard: { regex: /\b(?:\d{4}[ -]?){3}\d{4}\b/g, mask: creditCardMask },
passport: { regex: /(?i)\b[A-Z]{1,2}\d{6,9}\b/g, mask: passportMask },
license: { regex: /(?i)\b[A-Z]{1,2}\d{6,8}\b/g, mask: licenseMask },
address: {
regex: /\b\d{1,5}\s[A-Z][a-z]+(?:\s[A-Z][a-z]+)\*\b/g,
mask: addressMask,
},
dob: { regex: /\b\d{4}-\d{2}-\d{2}\b/g, mask: dobMask },
bankAccount: { regex: /\b\d{8,17}\b/g, mask: bankAccountMask },
};
// Create a RegexMaskingTransformer with multiple patterns
const piiMaskingTransformer = new RegexMaskingTransformer(patterns);
// Hooks for different stages of masking and rehydrating
const onMaskingStart = (message: string) =>
console.log(`Starting to mask message: ${message}`);
const onMaskingEnd = (maskedMessage: string) =>
console.log(`Masked message: ${maskedMessage}`);
const onRehydratingStart = (message: string) =>
console.log(`Starting to rehydrate message: ${message}`);
const onRehydratingEnd = (rehydratedMessage: string) =>
console.log(`Rehydrated message: ${rehydratedMessage}`);
// Initialize MaskingParser with the transformer and hooks
const maskingParser = new MaskingParser({
transformers: [piiMaskingTransformer],
onMaskingStart,
onMaskingEnd,
onRehydratingStart,
onRehydratingEnd,
});
// Example message containing multiple types of PII
const message =
"Contact Jane Doe at jane.doe@email.com or 555-123-4567. Her SSN is 123-45-6789 and her credit card number is 1234-5678-9012-3456. Passport number: AB1234567, Driver's License: X1234567, Address: 123 Main St, Date of Birth: 1990-01-01, Bank Account: 12345678901234567.";
// Mask and rehydrate the message
maskingParser
.mask(message)
.then((maskedMessage: string) => {
console.log(`Masked message: ${maskedMessage}`);
return maskingParser.rehydrate(maskedMessage);
})
.then((rehydratedMessage: string) => {
console.log(`Final rehydrated message: ${rehydratedMessage}`);
});
|
0 | lc_public_repos/langchainjs/examples/src/experimental | lc_public_repos/langchainjs/examples/src/experimental/masking/basic.ts | import {
MaskingParser,
RegexMaskingTransformer,
} from "langchain/experimental/masking";
// Define masking strategy
const emailMask = () => `[email-${Math.random().toString(16).slice(2)}]`;
const phoneMask = () => `[phone-${Math.random().toString(16).slice(2)}]`;
// Configure pii transformer
const piiMaskingTransformer = new RegexMaskingTransformer({
email: { regex: /\S+@\S+\.\S+/g, mask: emailMask },
phone: { regex: /\d{3}-\d{3}-\d{4}/g, mask: phoneMask },
});
const maskingParser = new MaskingParser({
transformers: [piiMaskingTransformer],
});
maskingParser.addTransformer(piiMaskingTransformer);
const input =
"Contact me at jane.doe@email.com or 555-123-4567. Also reach me at john.smith@email.com";
const masked = await maskingParser.mask(input);
console.log(masked);
// Contact me at [email-a31e486e324f6] or [phone-da8fc1584f224]. Also reach me at [email-d5b6237633d95]
const rehydrated = await maskingParser.rehydrate(masked);
console.log(rehydrated);
// Contact me at jane.doe@email.com or 555-123-4567. Also reach me at john.smith@email.com
|
0 | lc_public_repos/langchainjs/examples/src/experimental | lc_public_repos/langchainjs/examples/src/experimental/masking/next.ts | // app/api/chat
import {
MaskingParser,
RegexMaskingTransformer,
} from "langchain/experimental/masking";
import { ChatOpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
import { BytesOutputParser } from "@langchain/core/output_parsers";
export const runtime = "edge";
// Function to format chat messages for consistency
const formatMessage = (message: any) => `${message.role}: ${message.content}`;
const CUSTOMER_SUPPORT = `You are a customer support summarizer agent. Always include masked PII in your response.
Current conversation:
{chat_history}
User: {input}
AI:`;
// Configure Masking Parser
const maskingParser = new MaskingParser();
// Define transformations for masking emails and phone numbers using regular expressions
const piiMaskingTransformer = new RegexMaskingTransformer({
email: { regex: /\S+@\S+\.\S+/g }, // If a regex is provided without a mask we fallback to a simple default hashing function
phone: { regex: /\d{3}-\d{3}-\d{4}/g },
});
maskingParser.addTransformer(piiMaskingTransformer);
export async function POST(req: Request) {
try {
const body = await req.json();
const messages = body.messages ?? [];
const formattedPreviousMessages = messages.slice(0, -1).map(formatMessage);
const currentMessageContent = messages[messages.length - 1].content; // Extract the content of the last message
// Mask sensitive information in the current message
const guardedMessageContent = await maskingParser.mask(
currentMessageContent
);
// Mask sensitive information in the chat history
const guardedHistory = await maskingParser.mask(
formattedPreviousMessages.join("\n")
);
const prompt = PromptTemplate.fromTemplate(CUSTOMER_SUPPORT);
const model = new ChatOpenAI({ temperature: 0.8 });
// Initialize an output parser that handles serialization and byte-encoding for streaming
const outputParser = new BytesOutputParser();
const chain = prompt.pipe(model).pipe(outputParser); // Chain the prompt, model, and output parser together
console.log("[GUARDED INPUT]", guardedMessageContent); // Contact me at -1157967895 or -1626926859.
console.log("[GUARDED HISTORY]", guardedHistory); // user: Contact me at -1157967895 or -1626926859. assistant: Thank you for providing your contact information.
console.log("[STATE]", maskingParser.getState()); // { '-1157967895' => 'jane.doe@email.com', '-1626926859' => '555-123-4567'}
// Stream the AI response based on the masked chat history and current message
const stream = await chain.stream({
chat_history: guardedHistory,
input: guardedMessageContent,
});
return new Response(stream, {
headers: { "content-type": "text/plain; charset=utf-8" },
});
} catch (e: any) {
return new Response(JSON.stringify({ error: e.message }), {
status: 500,
headers: {
"content-type": "application/json",
},
});
}
}
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/weaviate_self_query.ts | import weaviate from "weaviate-ts-client";
import { OpenAIEmbeddings, OpenAI } from "@langchain/openai";
import { SelfQueryRetriever } from "langchain/retrievers/self_query";
import { WeaviateStore, WeaviateTranslator } from "@langchain/weaviate";
import { Document } from "@langchain/core/documents";
import { AttributeInfo } from "langchain/chains/query_constructor";
/**
* First, we create a bunch of documents. You can load your own documents here instead.
* Each document has a pageContent and a metadata field. Make sure your metadata matches the AttributeInfo below.
*/
const docs = [
new Document({
pageContent:
"A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata: { year: 1993, rating: 7.7, genre: "science fiction" },
}),
new Document({
pageContent:
"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 },
}),
new Document({
pageContent:
"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 },
}),
new Document({
pageContent:
"A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 },
}),
new Document({
pageContent: "Toys come alive and have a blast doing so",
metadata: { year: 1995, genre: "animated" },
}),
new Document({
pageContent: "Three men walk into the Zone, three men walk out of the Zone",
metadata: {
year: 1979,
director: "Andrei Tarkovsky",
genre: "science fiction",
rating: 9.9,
},
}),
];
/**
* Next, we define the attributes we want to be able to query on.
* in this case, we want to be able to query on the genre, year, director, rating, and length of the movie.
* We also provide a description of each attribute and the type of the attribute.
* This is used to generate the query prompts.
*/
const attributeInfo: AttributeInfo[] = [
{
name: "genre",
description: "The genre of the movie",
type: "string or array of strings",
},
{
name: "year",
description: "The year the movie was released",
type: "number",
},
{
name: "director",
description: "The director of the movie",
type: "string",
},
{
name: "rating",
description: "The rating of the movie (1-10)",
type: "number",
},
{
name: "length",
description: "The length of the movie in minutes",
type: "number",
},
];
/**
* Next, we instantiate a vector store. This is where we store the embeddings of the documents.
*/
const embeddings = new OpenAIEmbeddings();
const llm = new OpenAI();
const documentContents = "Brief summary of a movie";
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const client = (weaviate as any).client({
scheme: process.env.WEAVIATE_SCHEME || "https",
host: process.env.WEAVIATE_HOST || "localhost",
apiKey: process.env.WEAVIATE_API_KEY
? // eslint-disable-next-line @typescript-eslint/no-explicit-any
new (weaviate as any).ApiKey(process.env.WEAVIATE_API_KEY)
: undefined,
});
const vectorStore = await WeaviateStore.fromDocuments(docs, embeddings, {
client,
indexName: "Test",
textKey: "text",
metadataKeys: ["year", "director", "rating", "genre"],
});
const selfQueryRetriever = SelfQueryRetriever.fromLLM({
llm,
vectorStore,
documentContents,
attributeInfo,
/**
* We need to use a translator that translates the queries into a
* filter format that the vector store can understand. LangChain provides one here.
*/
structuredQueryTranslator: new WeaviateTranslator<WeaviateStore>(),
});
/**
* Now we can query the vector store.
* We can ask questions like "Which movies are less than 90 minutes?" or "Which movies are rated higher than 8.5?".
* We can also ask questions like "Which movies are either comedy or drama and are less than 90 minutes?".
* The retriever will automatically convert these questions into queries that can be used to retrieve documents.
*
* Note that unlike other vector stores, you have to make sure each metadata keys are actually presnt in the database,
* meaning that Weaviate will throw an error if the self query chain generate a query with a metadata key that does
* not exist in your Weaviate database.
*/
const query1 = await selfQueryRetriever.invoke(
"Which movies are rated higher than 8.5?"
);
const query2 = await selfQueryRetriever.invoke(
"Which movies are directed by Greta Gerwig?"
);
console.log(query1, query2);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/zep.ts | import { ZepRetriever } from "@langchain/community/retrievers/zep";
import { ZepMemory } from "@langchain/community/memory/zep";
import { Memory as MemoryModel, Message } from "@getzep/zep-js";
import { randomUUID } from "crypto";
function sleep(ms: number) {
// eslint-disable-next-line no-promise-executor-return
return new Promise((resolve) => setTimeout(resolve, ms));
}
export const run = async () => {
const zepConfig = {
url: process.env.ZEP_URL || "http://localhost:8000",
sessionId: `session_${randomUUID()}`,
};
console.log(`Zep Config: ${JSON.stringify(zepConfig)}`);
const memory = new ZepMemory({
baseURL: zepConfig.url,
sessionId: zepConfig.sessionId,
});
// Generate chat messages about traveling to France
const chatMessages = [
{
role: "AI",
message: "Bonjour! How can I assist you with your travel plans today?",
},
{ role: "User", message: "I'm planning a trip to France." },
{
role: "AI",
message: "That sounds exciting! What cities are you planning to visit?",
},
{ role: "User", message: "I'm thinking of visiting Paris and Nice." },
{
role: "AI",
message: "Great choices! Are you interested in any specific activities?",
},
{ role: "User", message: "I would love to visit some vineyards." },
{
role: "AI",
message:
"France has some of the best vineyards in the world. I can help you find some.",
},
{ role: "User", message: "That would be great!" },
{ role: "AI", message: "Do you prefer red or white wine?" },
{ role: "User", message: "I prefer red wine." },
{
role: "AI",
message:
"Perfect! I'll find some vineyards that are known for their red wines.",
},
{ role: "User", message: "Thank you, that would be very helpful." },
{
role: "AI",
message:
"You're welcome! I'll also look up some French wine etiquette for you.",
},
{
role: "User",
message: "That sounds great. I can't wait to start my trip!",
},
{
role: "AI",
message:
"I'm sure you'll have a fantastic time. Do you have any other questions about your trip?",
},
{ role: "User", message: "Not at the moment, thank you for your help!" },
];
const zepClient = await memory.zepClientPromise;
if (!zepClient) {
throw new Error("ZepClient is not initialized");
}
// Add chat messages to memory
for (const chatMessage of chatMessages) {
let m: MemoryModel;
if (chatMessage.role === "AI") {
m = new MemoryModel({
messages: [new Message({ role: "ai", content: chatMessage.message })],
});
} else {
m = new MemoryModel({
messages: [
new Message({ role: "human", content: chatMessage.message }),
],
});
}
await zepClient.memory.addMemory(zepConfig.sessionId, m);
}
// Wait for messages to be summarized, enriched, embedded and indexed.
await sleep(10000);
// Simple similarity search
const query = "Can I drive red cars in France?";
const retriever = new ZepRetriever({ ...zepConfig, topK: 3 });
const docs = await retriever.invoke(query);
console.log("Simple similarity search");
console.log(JSON.stringify(docs, null, 2));
// mmr reranking search
const mmrRetriever = new ZepRetriever({
...zepConfig,
topK: 3,
searchType: "mmr",
mmrLambda: 0.5,
});
const mmrDocs = await mmrRetriever.invoke(query);
console.log("MMR reranking search");
console.log(JSON.stringify(mmrDocs, null, 2));
// summary search with mmr reranking
const mmrSummaryRetriever = new ZepRetriever({
...zepConfig,
topK: 3,
searchScope: "summary",
searchType: "mmr",
mmrLambda: 0.5,
});
const mmrSummaryDocs = await mmrSummaryRetriever.invoke(query);
console.log("Summary search with MMR reranking");
console.log(JSON.stringify(mmrSummaryDocs, null, 2));
// Filtered search
const filteredRetriever = new ZepRetriever({
...zepConfig,
topK: 3,
filter: {
where: { jsonpath: '$.system.entities[*] ? (@.Label == "GPE")' },
},
});
const filteredDocs = await filteredRetriever.invoke(query);
console.log("Filtered search");
console.log(JSON.stringify(filteredDocs, null, 2));
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/supabase_self_query.ts | import { createClient } from "@supabase/supabase-js";
import { AttributeInfo } from "langchain/schema/query_constructor";
import { OpenAIEmbeddings, OpenAI } from "@langchain/openai";
import { SelfQueryRetriever } from "langchain/retrievers/self_query";
import { SupabaseTranslator } from "@langchain/community/structured_query/supabase";
import { SupabaseVectorStore } from "@langchain/community/vectorstores/supabase";
import { Document } from "@langchain/core/documents";
/**
* First, we create a bunch of documents. You can load your own documents here instead.
* Each document has a pageContent and a metadata field. Make sure your metadata matches the AttributeInfo below.
*/
const docs = [
new Document({
pageContent:
"A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata: { year: 1993, rating: 7.7, genre: "science fiction" },
}),
new Document({
pageContent:
"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 },
}),
new Document({
pageContent:
"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 },
}),
new Document({
pageContent:
"A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 },
}),
new Document({
pageContent: "Toys come alive and have a blast doing so",
metadata: { year: 1995, genre: "animated" },
}),
new Document({
pageContent: "Three men walk into the Zone, three men walk out of the Zone",
metadata: {
year: 1979,
director: "Andrei Tarkovsky",
genre: "science fiction",
rating: 9.9,
},
}),
];
/**
* Next, we define the attributes we want to be able to query on.
* in this case, we want to be able to query on the genre, year, director, rating, and length of the movie.
* We also provide a description of each attribute and the type of the attribute.
* This is used to generate the query prompts.
*/
const attributeInfo: AttributeInfo[] = [
{
name: "genre",
description: "The genre of the movie",
type: "string or array of strings",
},
{
name: "year",
description: "The year the movie was released",
type: "number",
},
{
name: "director",
description: "The director of the movie",
type: "string",
},
{
name: "rating",
description: "The rating of the movie (1-10)",
type: "number",
},
{
name: "length",
description: "The length of the movie in minutes",
type: "number",
},
];
/**
* Next, we instantiate a vector store. This is where we store the embeddings of the documents.
*/
if (!process.env.SUPABASE_URL || !process.env.SUPABASE_PRIVATE_KEY) {
throw new Error(
"Supabase URL or private key not set. Please set it in the .env file"
);
}
const embeddings = new OpenAIEmbeddings();
const llm = new OpenAI();
const documentContents = "Brief summary of a movie";
const client = createClient(
process.env.SUPABASE_URL,
process.env.SUPABASE_PRIVATE_KEY
);
const vectorStore = await SupabaseVectorStore.fromDocuments(docs, embeddings, {
client,
});
const selfQueryRetriever = SelfQueryRetriever.fromLLM({
llm,
vectorStore,
documentContents,
attributeInfo,
/**
* We need to use a translator that translates the queries into a
* filter format that the vector store can understand. LangChain provides one here.
*/
structuredQueryTranslator: new SupabaseTranslator(),
});
/**
* Now we can query the vector store.
* We can ask questions like "Which movies are less than 90 minutes?" or "Which movies are rated higher than 8.5?".
* We can also ask questions like "Which movies are either comedy or drama and are less than 90 minutes?".
* The retriever will automatically convert these questions into queries that can be used to retrieve documents.
*/
const query1 = await selfQueryRetriever.invoke(
"Which movies are less than 90 minutes?"
);
const query2 = await selfQueryRetriever.invoke(
"Which movies are rated higher than 8.5?"
);
const query3 = await selfQueryRetriever.invoke(
"Which movies are directed by Greta Gerwig?"
);
const query4 = await selfQueryRetriever.invoke(
"Which movies are either comedy or drama and are less than 90 minutes?"
);
console.log(query1, query2, query3, query4);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/chaindesk.ts | import { ChaindeskRetriever } from "@langchain/community/retrievers/chaindesk";
const retriever = new ChaindeskRetriever({
datastoreId: "DATASTORE_ID",
apiKey: "CHAINDESK_API_KEY", // optional: needed for private datastores
topK: 8, // optional: default value is 3
});
const docs = await retriever.invoke("hello");
console.log(docs);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/multi_query.ts | import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { CohereEmbeddings } from "@langchain/cohere";
import { MultiQueryRetriever } from "langchain/retrievers/multi_query";
import { ChatAnthropic } from "@langchain/anthropic";
const vectorstore = await MemoryVectorStore.fromTexts(
[
"Buildings are made out of brick",
"Buildings are made out of wood",
"Buildings are made out of stone",
"Cars are made out of metal",
"Cars are made out of plastic",
"mitochondria is the powerhouse of the cell",
"mitochondria is made of lipids",
],
[{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],
new CohereEmbeddings({ model: "embed-english-v3.0" })
);
const model = new ChatAnthropic({});
const retriever = MultiQueryRetriever.fromLLM({
llm: model,
retriever: vectorstore.asRetriever(),
verbose: true,
});
const query = "What are mitochondria made of?";
const retrievedDocs = await retriever.invoke(query);
/*
Generated queries: What are the components of mitochondria?,What substances comprise the mitochondria organelle? ,What is the molecular composition of mitochondria?
*/
console.log(retrievedDocs);
/*
[
Document {
pageContent: 'mitochondria is the powerhouse of the cell',
metadata: {}
},
Document {
pageContent: 'mitochondria is made of lipids',
metadata: {}
},
Document {
pageContent: 'Buildings are made out of brick',
metadata: { id: 1 }
},
Document {
pageContent: 'Buildings are made out of wood',
metadata: { id: 2 }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/time-weighted-retriever.ts | import { TimeWeightedVectorStoreRetriever } from "langchain/retrievers/time_weighted";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { OpenAIEmbeddings } from "@langchain/openai";
const vectorStore = new MemoryVectorStore(new OpenAIEmbeddings());
const retriever = new TimeWeightedVectorStoreRetriever({
vectorStore,
memoryStream: [],
searchKwargs: 2,
});
const documents = [
"My name is John.",
"My name is Bob.",
"My favourite food is pizza.",
"My favourite food is pasta.",
"My favourite food is sushi.",
].map((pageContent) => ({ pageContent, metadata: {} }));
// All documents must be added using this method on the retriever (not the vector store!)
// so that the correct access history metadata is populated
await retriever.addDocuments(documents);
const results1 = await retriever.invoke("What is my favourite food?");
console.log(results1);
/*
[
Document { pageContent: 'My favourite food is pasta.', metadata: {} }
]
*/
const results2 = await retriever.invoke("What is my favourite food?");
console.log(results2);
/*
[
Document { pageContent: 'My favourite food is pasta.', metadata: {} }
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/similarity_score_threshold.ts | import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { OpenAIEmbeddings } from "@langchain/openai";
import { ScoreThresholdRetriever } from "langchain/retrievers/score_threshold";
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Buildings are made out of brick",
"Buildings are made out of wood",
"Buildings are made out of stone",
"Buildings are made out of atoms",
"Buildings are made out of building materials",
"Cars are made out of metal",
"Cars are made out of plastic",
],
[{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const retriever = ScoreThresholdRetriever.fromVectorStore(vectorStore, {
minSimilarityScore: 0.9, // Finds results with at least this similarity score
maxK: 100, // The maximum K value to use. Use it based to your chunk size to make sure you don't run out of tokens
kIncrement: 2, // How much to increase K by each time. It'll fetch N results, then N + kIncrement, then N + kIncrement * 2, etc.
});
const result = await retriever.invoke("What are buildings made out of?");
console.log(result);
/*
[
Document {
pageContent: 'Buildings are made out of building materials',
metadata: { id: 5 }
},
Document {
pageContent: 'Buildings are made out of wood',
metadata: { id: 2 }
},
Document {
pageContent: 'Buildings are made out of brick',
metadata: { id: 1 }
},
Document {
pageContent: 'Buildings are made out of stone',
metadata: { id: 3 }
},
Document {
pageContent: 'Buildings are made out of atoms',
metadata: { id: 4 }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/zep_cloud.ts | import { ZepCloudRetriever } from "@langchain/community/retrievers/zep_cloud";
import { randomUUID } from "crypto";
import { ZepClient } from "@getzep/zep-cloud";
import { Message } from "@getzep/zep-cloud/api";
function sleep(ms: number) {
// eslint-disable-next-line no-promise-executor-return
return new Promise((resolve) => setTimeout(resolve, ms));
}
const zepConfig = {
// Your Zep Cloud Project API key https://help.getzep.com/projects
apiKey: "<Zep Api Key>",
sessionId: `session_${randomUUID()}`,
};
console.log(`Zep Config: ${JSON.stringify(zepConfig)}`);
// Generate chat messages about traveling to France
const chatMessages = [
{
role: "AI",
message: "Bonjour! How can I assist you with your travel plans today?",
},
{ role: "User", message: "I'm planning a trip to France." },
{
role: "AI",
message: "That sounds exciting! What cities are you planning to visit?",
},
{ role: "User", message: "I'm thinking of visiting Paris and Nice." },
{
role: "AI",
message: "Great choices! Are you interested in any specific activities?",
},
{ role: "User", message: "I would love to visit some vineyards." },
{
role: "AI",
message:
"France has some of the best vineyards in the world. I can help you find some.",
},
{ role: "User", message: "That would be great!" },
{ role: "AI", message: "Do you prefer red or white wine?" },
{ role: "User", message: "I prefer red wine." },
{
role: "AI",
message:
"Perfect! I'll find some vineyards that are known for their red wines.",
},
{ role: "User", message: "Thank you, that would be very helpful." },
{
role: "AI",
message:
"You're welcome! I'll also look up some French wine etiquette for you.",
},
{
role: "User",
message: "That sounds great. I can't wait to start my trip!",
},
{
role: "AI",
message:
"I'm sure you'll have a fantastic time. Do you have any other questions about your trip?",
},
{ role: "User", message: "Not at the moment, thank you for your help!" },
];
const zepClient = new ZepClient({
apiKey: zepConfig.apiKey,
});
// Add chat messages to memory
for (const chatMessage of chatMessages) {
let m: Message;
if (chatMessage.role === "AI") {
m = { role: "ai", roleType: "assistant", content: chatMessage.message };
} else {
m = { role: "human", roleType: "user", content: chatMessage.message };
}
await zepClient.memory.add(zepConfig.sessionId, { messages: [m] });
}
// Wait for messages to be summarized, enriched, embedded and indexed.
await sleep(10000);
// Simple similarity search
const query = "Can I drive red cars in France?";
const retriever = new ZepCloudRetriever({ ...zepConfig, topK: 3 });
const docs = await retriever.invoke(query);
console.log("Simple similarity search");
console.log(JSON.stringify(docs, null, 2));
// mmr reranking search
const mmrRetriever = new ZepCloudRetriever({
...zepConfig,
topK: 3,
searchType: "mmr",
mmrLambda: 0.5,
});
const mmrDocs = await mmrRetriever.invoke(query);
console.log("MMR reranking search");
console.log(JSON.stringify(mmrDocs, null, 2));
// summary search with mmr reranking
const mmrSummaryRetriever = new ZepCloudRetriever({
...zepConfig,
topK: 3,
searchScope: "summary",
searchType: "mmr",
mmrLambda: 0.5,
});
const mmrSummaryDocs = await mmrSummaryRetriever.invoke(query);
console.log("Summary search with MMR reranking");
console.log(JSON.stringify(mmrSummaryDocs, null, 2));
// Filtered search
const filteredRetriever = new ZepCloudRetriever({
...zepConfig,
topK: 3,
filter: {
where: { jsonpath: '$[*] ? (@.foo == "bar")' },
},
});
const filteredDocs = await filteredRetriever.invoke(query);
console.log("Filtered search");
console.log(JSON.stringify(filteredDocs, null, 2));
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/supabase_hybrid.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import { createClient } from "@supabase/supabase-js";
import { SupabaseHybridSearch } from "@langchain/community/retrievers/supabase";
export const run = async () => {
const client = createClient(
process.env.SUPABASE_URL || "",
process.env.SUPABASE_PRIVATE_KEY || ""
);
const embeddings = new OpenAIEmbeddings();
const retriever = new SupabaseHybridSearch(embeddings, {
client,
// Below are the defaults, expecting that you set up your supabase table and functions according to the guide above. Please change if necessary.
similarityK: 2,
keywordK: 2,
tableName: "documents",
similarityQueryName: "match_documents",
keywordQueryName: "kw_match_documents",
});
const results = await retriever.invoke("hello bye");
console.log(results);
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/pinecone_self_query.ts | import { Pinecone } from "@pinecone-database/pinecone";
import { OpenAIEmbeddings, OpenAI } from "@langchain/openai";
import { SelfQueryRetriever } from "langchain/retrievers/self_query";
import { PineconeStore, PineconeTranslator } from "@langchain/pinecone";
import { Document } from "@langchain/core/documents";
import type { AttributeInfo } from "langchain/chains/query_constructor";
/**
* First, we create a bunch of documents. You can load your own documents here instead.
* Each document has a pageContent and a metadata field. Make sure your metadata matches the AttributeInfo below.
*/
const docs = [
new Document({
pageContent:
"A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata: { year: 1993, rating: 7.7, genre: "science fiction" },
}),
new Document({
pageContent:
"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 },
}),
new Document({
pageContent:
"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 },
}),
new Document({
pageContent:
"A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 },
}),
new Document({
pageContent: "Toys come alive and have a blast doing so",
metadata: { year: 1995, genre: "animated" },
}),
new Document({
pageContent: "Three men walk into the Zone, three men walk out of the Zone",
metadata: {
year: 1979,
director: "Andrei Tarkovsky",
genre: "science fiction",
rating: 9.9,
},
}),
];
/**
* Next, we define the attributes we want to be able to query on.
* in this case, we want to be able to query on the genre, year, director, rating, and length of the movie.
* We also provide a description of each attribute and the type of the attribute.
* This is used to generate the query prompts.
*/
const attributeInfo: AttributeInfo[] = [
{
name: "genre",
description: "The genre of the movie",
type: "string or array of strings",
},
{
name: "year",
description: "The year the movie was released",
type: "number",
},
{
name: "director",
description: "The director of the movie",
type: "string",
},
{
name: "rating",
description: "The rating of the movie (1-10)",
type: "number",
},
{
name: "length",
description: "The length of the movie in minutes",
type: "number",
},
];
/**
* Next, we instantiate a vector store. This is where we store the embeddings of the documents.
* We also need to provide an embeddings object. This is used to embed the documents.
*/
if (!process.env.PINECONE_API_KEY || !process.env.PINECONE_INDEX) {
throw new Error("PINECONE_API_KEY and PINECONE_INDEX must be set");
}
const pinecone = new Pinecone();
const index = pinecone.Index(process.env.PINECONE_INDEX);
const embeddings = new OpenAIEmbeddings();
const llm = new OpenAI();
const documentContents = "Brief summary of a movie";
const vectorStore = await PineconeStore.fromDocuments(docs, embeddings, {
pineconeIndex: index,
});
const selfQueryRetriever = SelfQueryRetriever.fromLLM({
llm,
vectorStore,
documentContents,
attributeInfo,
/**
* We need to create a basic translator that translates the queries into a
* filter format that the vector store can understand. We provide a basic translator
* translator here, but you can create your own translator by extending BaseTranslator
* abstract class. Note that the vector store needs to support filtering on the metadata
* attributes you want to query on.
*/
structuredQueryTranslator: new PineconeTranslator(),
});
/**
* Now we can query the vector store.
* We can ask questions like "Which movies are less than 90 minutes?" or "Which movies are rated higher than 8.5?".
* We can also ask questions like "Which movies are either comedy or drama and are less than 90 minutes?".
* The retriever will automatically convert these questions into queries that can be used to retrieve documents.
*/
const query1 = await selfQueryRetriever.invoke(
"Which movies are less than 90 minutes?"
);
const query2 = await selfQueryRetriever.invoke(
"Which movies are rated higher than 8.5?"
);
const query3 = await selfQueryRetriever.invoke(
"Which movies are directed by Greta Gerwig?"
);
const query4 = await selfQueryRetriever.invoke(
"Which movies are either comedy or drama and are less than 90 minutes?"
);
console.log(query1, query2, query3, query4);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/hyde.ts | import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { HydeRetriever } from "langchain/retrievers/hyde";
import { Document } from "@langchain/core/documents";
const embeddings = new OpenAIEmbeddings();
const vectorStore = new MemoryVectorStore(embeddings);
const llm = new OpenAI();
const retriever = new HydeRetriever({
vectorStore,
llm,
k: 1,
});
await vectorStore.addDocuments(
[
"My name is John.",
"My name is Bob.",
"My favourite food is pizza.",
"My favourite food is pasta.",
].map((pageContent) => new Document({ pageContent }))
);
const results = await retriever.invoke("What is my favourite food?");
console.log(results);
/*
[
Document { pageContent: 'My favourite food is pasta.', metadata: {} }
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/parent_document_retriever.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { ParentDocumentRetriever } from "langchain/retrievers/parent_document";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { InMemoryStore } from "@langchain/core/stores";
const vectorstore = new MemoryVectorStore(new OpenAIEmbeddings());
const byteStore = new InMemoryStore<Uint8Array>();
const retriever = new ParentDocumentRetriever({
vectorstore,
byteStore,
// Optional, not required if you're already passing in split documents
parentSplitter: new RecursiveCharacterTextSplitter({
chunkOverlap: 0,
chunkSize: 500,
}),
childSplitter: new RecursiveCharacterTextSplitter({
chunkOverlap: 0,
chunkSize: 50,
}),
// Optional `k` parameter to search for more child documents in VectorStore.
// Note that this does not exactly correspond to the number of final (parent) documents
// retrieved, as multiple child documents can point to the same parent.
childK: 20,
// Optional `k` parameter to limit number of final, parent documents returned from this
// retriever and sent to LLM. This is an upper-bound, and the final count may be lower than this.
parentK: 5,
});
const textLoader = new TextLoader("../examples/state_of_the_union.txt");
const parentDocuments = await textLoader.load();
// We must add the parent documents via the retriever's addDocuments method
await retriever.addDocuments(parentDocuments);
const retrievedDocs = await retriever.invoke("justice breyer");
// Retrieved chunks are the larger parent chunks
console.log(retrievedDocs);
/*
[
Document {
pageContent: 'Tonight, I call on the Senate to pass — pass the Freedom to Vote Act. Pass the John Lewis Act — Voting Rights Act. And while you’re at it, pass the DISCLOSE Act so Americans know who is funding our elections.\n' +
'\n' +
'Look, tonight, I’d — I’d like to honor someone who has dedicated his life to serve this country: Justice Breyer — an Army veteran, Constitutional scholar, retiring Justice of the United States Supreme Court.',
metadata: { source: '../examples/state_of_the_union.txt', loc: [Object] }
},
Document {
pageContent: 'As I did four days ago, I’ve nominated a Circuit Court of Appeals — Ketanji Brown Jackson. One of our nation’s top legal minds who will continue in just Brey- — Justice Breyer’s legacy of excellence. A former top litigator in private practice, a former federal public defender from a family of public-school educators and police officers — she’s a consensus builder.',
metadata: { source: '../examples/state_of_the_union.txt', loc: [Object] }
},
Document {
pageContent: 'Justice Breyer, thank you for your service. Thank you, thank you, thank you. I mean it. Get up. Stand — let me see you. Thank you.\n' +
'\n' +
'And we all know — no matter what your ideology, we all know one of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.',
metadata: { source: '../examples/state_of_the_union.txt', loc: [Object] }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/amazon_knowledge_bases.ts | import { AmazonKnowledgeBaseRetriever } from "@langchain/aws";
const retriever = new AmazonKnowledgeBaseRetriever({
topK: 10,
knowledgeBaseId: "YOUR_KNOWLEDGE_BASE_ID",
region: "us-east-2",
clientOptions: {
credentials: {
accessKeyId: "YOUR_ACCESS_KEY_ID",
secretAccessKey: "YOUR_SECRET_ACCESS_KEY",
},
},
});
const docs = await retriever.invoke("How are clouds formed?");
console.log(docs);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/memory_self_query.ts | import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { OpenAIEmbeddings, OpenAI } from "@langchain/openai";
import { SelfQueryRetriever } from "langchain/retrievers/self_query";
import { FunctionalTranslator } from "@langchain/core/structured_query";
import { Document } from "@langchain/core/documents";
import type { AttributeInfo } from "langchain/chains/query_constructor";
/**
* First, we create a bunch of documents. You can load your own documents here instead.
* Each document has a pageContent and a metadata field. Make sure your metadata matches the AttributeInfo below.
*/
const docs = [
new Document({
pageContent:
"A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata: { year: 1993, rating: 7.7, genre: "science fiction" },
}),
new Document({
pageContent:
"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 },
}),
new Document({
pageContent:
"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 },
}),
new Document({
pageContent:
"A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 },
}),
new Document({
pageContent: "Toys come alive and have a blast doing so",
metadata: { year: 1995, genre: "animated" },
}),
new Document({
pageContent: "Three men walk into the Zone, three men walk out of the Zone",
metadata: {
year: 1979,
director: "Andrei Tarkovsky",
genre: "science fiction",
rating: 9.9,
},
}),
];
/**
* Next, we define the attributes we want to be able to query on.
* in this case, we want to be able to query on the genre, year, director, rating, and length of the movie.
* We also provide a description of each attribute and the type of the attribute.
* This is used to generate the query prompts.
*/
const attributeInfo: AttributeInfo[] = [
{
name: "genre",
description: "The genre of the movie",
type: "string or array of strings",
},
{
name: "year",
description: "The year the movie was released",
type: "number",
},
{
name: "director",
description: "The director of the movie",
type: "string",
},
{
name: "rating",
description: "The rating of the movie (1-10)",
type: "number",
},
{
name: "length",
description: "The length of the movie in minutes",
type: "number",
},
];
/**
* Next, we instantiate a vector store. This is where we store the embeddings of the documents.
* We also need to provide an embeddings object. This is used to embed the documents.
*/
const embeddings = new OpenAIEmbeddings();
const llm = new OpenAI();
const documentContents = "Brief summary of a movie";
const vectorStore = await MemoryVectorStore.fromDocuments(docs, embeddings);
const selfQueryRetriever = SelfQueryRetriever.fromLLM({
llm,
vectorStore,
documentContents,
attributeInfo,
/**
* We need to use a translator that translates the queries into a
* filter format that the vector store can understand. We provide a basic translator
* translator here, but you can create your own translator by extending BaseTranslator
* abstract class. Note that the vector store needs to support filtering on the metadata
* attributes you want to query on.
*/
structuredQueryTranslator: new FunctionalTranslator(),
});
/**
* Now we can query the vector store.
* We can ask questions like "Which movies are less than 90 minutes?" or "Which movies are rated higher than 8.5?".
* We can also ask questions like "Which movies are either comedy or drama and are less than 90 minutes?".
* The retriever will automatically convert these questions into queries that can be used to retrieve documents.
*/
const query1 = await selfQueryRetriever.invoke(
"Which movies are less than 90 minutes?"
);
const query2 = await selfQueryRetriever.invoke(
"Which movies are rated higher than 8.5?"
);
const query3 = await selfQueryRetriever.invoke(
"Which movies are directed by Greta Gerwig?"
);
const query4 = await selfQueryRetriever.invoke(
"Which movies are either comedy or drama and are less than 90 minutes?"
);
console.log(query1, query2, query3, query4);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/multi_vector_hypothetical.ts | import * as uuid from "uuid";
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { MultiVectorRetriever } from "langchain/retrievers/multi_vector";
import { FaissStore } from "@langchain/community/vectorstores/faiss";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { InMemoryStore } from "@langchain/core/stores";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { PromptTemplate } from "@langchain/core/prompts";
import { RunnableSequence } from "@langchain/core/runnables";
import { Document } from "@langchain/core/documents";
import { JsonKeyOutputFunctionsParser } from "@langchain/core/output_parsers/openai_functions";
const textLoader = new TextLoader("../examples/state_of_the_union.txt");
const parentDocuments = await textLoader.load();
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 10000,
chunkOverlap: 20,
});
const docs = await splitter.splitDocuments(parentDocuments);
const functionsSchema = [
{
name: "hypothetical_questions",
description: "Generate hypothetical questions",
parameters: {
type: "object",
properties: {
questions: {
type: "array",
items: {
type: "string",
},
},
},
required: ["questions"],
},
},
];
const functionCallingModel = new ChatOpenAI({
maxRetries: 0,
model: "gpt-4",
}).bind({
functions: functionsSchema,
function_call: { name: "hypothetical_questions" },
});
const chain = RunnableSequence.from([
{ content: (doc: Document) => doc.pageContent },
PromptTemplate.fromTemplate(
`Generate a list of 3 hypothetical questions that the below document could be used to answer:\n\n{content}`
),
functionCallingModel,
new JsonKeyOutputFunctionsParser<string[]>({ attrName: "questions" }),
]);
const hypotheticalQuestions = await chain.batch(docs, {
maxConcurrency: 5,
});
const idKey = "doc_id";
const docIds = docs.map((_) => uuid.v4());
const hypotheticalQuestionDocs = hypotheticalQuestions
.map((questionArray, i) => {
const questionDocuments = questionArray.map((question) => {
const questionDocument = new Document({
pageContent: question,
metadata: {
[idKey]: docIds[i],
},
});
return questionDocument;
});
return questionDocuments;
})
.flat();
// The byteStore to use to store the original chunks
const byteStore = new InMemoryStore<Uint8Array>();
// The vectorstore to use to index the child chunks
const vectorstore = await FaissStore.fromDocuments(
hypotheticalQuestionDocs,
new OpenAIEmbeddings()
);
const retriever = new MultiVectorRetriever({
vectorstore,
byteStore,
idKey,
});
const keyValuePairs: [string, Document][] = docs.map((originalDoc, i) => [
docIds[i],
originalDoc,
]);
// Use the retriever to add the original chunks to the document store
await retriever.docstore.mset(keyValuePairs);
// We could also add the original chunks to the vectorstore if we wish
// const taggedOriginalDocs = docs.map((doc, i) => {
// doc.metadata[idKey] = docIds[i];
// return doc;
// });
// retriever.vectorstore.addDocuments(taggedOriginalDocs);
// Vectorstore alone retrieves the small chunks
const vectorstoreResult = await retriever.vectorstore.similaritySearch(
"justice breyer"
);
console.log(vectorstoreResult[0].pageContent);
/*
"What measures will be taken to crack down on corporations overcharging American businesses and consumers?"
*/
// Retriever returns larger result
const retrieverResult = await retriever.invoke("justice breyer");
console.log(retrieverResult[0].pageContent.length);
/*
9770
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/multi_vector_summary.ts | import * as uuid from "uuid";
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { MultiVectorRetriever } from "langchain/retrievers/multi_vector";
import { FaissStore } from "@langchain/community/vectorstores/faiss";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { InMemoryStore } from "@langchain/core/stores";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { PromptTemplate } from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
import { RunnableSequence } from "@langchain/core/runnables";
import { Document } from "@langchain/core/documents";
const textLoader = new TextLoader("../examples/state_of_the_union.txt");
const parentDocuments = await textLoader.load();
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 10000,
chunkOverlap: 20,
});
const docs = await splitter.splitDocuments(parentDocuments);
const chain = RunnableSequence.from([
{ content: (doc: Document) => doc.pageContent },
PromptTemplate.fromTemplate(`Summarize the following document:\n\n{content}`),
new ChatOpenAI({
maxRetries: 0,
}),
new StringOutputParser(),
]);
const summaries = await chain.batch(docs, {
maxConcurrency: 5,
});
const idKey = "doc_id";
const docIds = docs.map((_) => uuid.v4());
const summaryDocs = summaries.map((summary, i) => {
const summaryDoc = new Document({
pageContent: summary,
metadata: {
[idKey]: docIds[i],
},
});
return summaryDoc;
});
// The byteStore to use to store the original chunks
const byteStore = new InMemoryStore<Uint8Array>();
// The vectorstore to use to index the child chunks
const vectorstore = await FaissStore.fromDocuments(
summaryDocs,
new OpenAIEmbeddings()
);
const retriever = new MultiVectorRetriever({
vectorstore,
byteStore,
idKey,
});
const keyValuePairs: [string, Document][] = docs.map((originalDoc, i) => [
docIds[i],
originalDoc,
]);
// Use the retriever to add the original chunks to the document store
await retriever.docstore.mset(keyValuePairs);
// We could also add the original chunks to the vectorstore if we wish
// const taggedOriginalDocs = docs.map((doc, i) => {
// doc.metadata[idKey] = docIds[i];
// return doc;
// });
// retriever.vectorstore.addDocuments(taggedOriginalDocs);
// Vectorstore alone retrieves the small chunks
const vectorstoreResult = await retriever.vectorstore.similaritySearch(
"justice breyer"
);
console.log(vectorstoreResult[0].pageContent.length);
/*
1118
*/
// Retriever returns larger result
const retrieverResult = await retriever.invoke("justice breyer");
console.log(retrieverResult[0].pageContent.length);
/*
9770
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/multi_query_custom.ts | import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { CohereEmbeddings } from "@langchain/cohere";
import { MultiQueryRetriever } from "langchain/retrievers/multi_query";
import { LLMChain } from "langchain/chains";
import { pull } from "langchain/hub";
import { BaseOutputParser } from "@langchain/core/output_parsers";
import { PromptTemplate } from "@langchain/core/prompts";
import { ChatAnthropic } from "@langchain/anthropic";
type LineList = {
lines: string[];
};
class LineListOutputParser extends BaseOutputParser<LineList> {
static lc_name() {
return "LineListOutputParser";
}
lc_namespace = ["langchain", "retrievers", "multiquery"];
async parse(text: string): Promise<LineList> {
const startKeyIndex = text.indexOf("<questions>");
const endKeyIndex = text.indexOf("</questions>");
const questionsStartIndex =
startKeyIndex === -1 ? 0 : startKeyIndex + "<questions>".length;
const questionsEndIndex = endKeyIndex === -1 ? text.length : endKeyIndex;
const lines = text
.slice(questionsStartIndex, questionsEndIndex)
.trim()
.split("\n")
.filter((line) => line.trim() !== "");
return { lines };
}
getFormatInstructions(): string {
throw new Error("Not implemented.");
}
}
// Default prompt is available at: https://smith.langchain.com/hub/jacob/multi-vector-retriever
const prompt: PromptTemplate = await pull(
"jacob/multi-vector-retriever-german"
);
const vectorstore = await MemoryVectorStore.fromTexts(
[
"Gebäude werden aus Ziegelsteinen hergestellt",
"Gebäude werden aus Holz hergestellt",
"Gebäude werden aus Stein hergestellt",
"Autos werden aus Metall hergestellt",
"Autos werden aus Kunststoff hergestellt",
"Mitochondrien sind die Energiekraftwerke der Zelle",
"Mitochondrien bestehen aus Lipiden",
],
[{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],
new CohereEmbeddings({ model: "embed-english-v3.0" })
);
const model = new ChatAnthropic({});
const llmChain = new LLMChain({
llm: model,
prompt,
outputParser: new LineListOutputParser(),
});
const retriever = new MultiQueryRetriever({
retriever: vectorstore.asRetriever(),
llmChain,
verbose: true,
});
const query = "What are mitochondria made of?";
const retrievedDocs = await retriever.invoke(query);
/*
Generated queries: Was besteht ein Mitochondrium?,Aus welchen Komponenten setzt sich ein Mitochondrium zusammen? ,Welche Moleküle finden sich in einem Mitochondrium?
*/
console.log(retrievedDocs);
/*
[
Document {
pageContent: 'Mitochondrien bestehen aus Lipiden',
metadata: {}
},
Document {
pageContent: 'Mitochondrien sind die Energiekraftwerke der Zelle',
metadata: {}
},
Document {
pageContent: 'Autos werden aus Metall hergestellt',
metadata: { id: 4 }
},
Document {
pageContent: 'Gebäude werden aus Holz hergestellt',
metadata: { id: 2 }
},
Document {
pageContent: 'Gebäude werden aus Ziegelsteinen hergestellt',
metadata: { id: 1 }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/vectara_self_query.ts | import { SelfQueryRetriever } from "langchain/retrievers/self_query";
import { OpenAI } from "@langchain/openai";
import { VectaraStore } from "@langchain/community/vectorstores/vectara";
import { FakeEmbeddings } from "@langchain/core/utils/testing";
import { Document } from "@langchain/core/documents";
import { VectaraTranslator } from "@langchain/community/structured_query/vectara";
import type { AttributeInfo } from "langchain/chains/query_constructor";
/**
* First, we create a bunch of documents. You can load your own documents here instead.
* Each document has a pageContent and a metadata field. Make sure your metadata matches the AttributeInfo below.
*/
const docs = [
new Document({
pageContent:
"A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata: { year: 1993, rating: 7.7, genre: "science fiction" },
}),
new Document({
pageContent:
"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 },
}),
new Document({
pageContent:
"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 },
}),
new Document({
pageContent:
"A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 },
}),
new Document({
pageContent: "Toys come alive and have a blast doing so",
metadata: { year: 1995, genre: "animated" },
}),
new Document({
pageContent: "Three men walk into the Zone, three men walk out of the Zone",
metadata: {
year: 1979,
rating: 9.9,
director: "Andrei Tarkovsky",
genre: "science fiction",
},
}),
];
/**
* Next, we define the attributes we want to be able to query on.
* in this case, we want to be able to query on the genre, year, director, rating, and length of the movie.
* We also provide a description of each attribute and the type of the attribute.
* This is used to generate the query prompts.
*
* We need to setup the filters in the vectara as well otherwise filter won't work.
* To setup the filter in vectara, go to Data -> {your_created_corpus} -> overview
* In the overview section edit the filters section and all the following attributes in
* the filters.
*/
const attributeInfo: AttributeInfo[] = [
{
name: "genre",
description: "The genre of the movie",
type: "string or array of strings",
},
{
name: "year",
description: "The year the movie was released",
type: "number",
},
{
name: "director",
description: "The director of the movie",
type: "string",
},
{
name: "rating",
description: "The rating of the movie (1-10)",
type: "number",
},
];
/**
* Next, we instantiate a vector store. This is where we store the embeddings of the documents.
* We also need to provide an embeddings object. This is used to embed the documents.
*/
const config = {
customerId: Number(process.env.VECTARA_CUSTOMER_ID),
corpusId: Number(process.env.VECTARA_CORPUS_ID),
apiKey: String(process.env.VECTARA_API_KEY),
verbose: true,
};
const vectorStore = await VectaraStore.fromDocuments(
docs,
new FakeEmbeddings(),
config
);
const llm = new OpenAI();
const documentContents = "Brief summary of a movie";
const selfQueryRetriever = SelfQueryRetriever.fromLLM({
llm,
vectorStore,
documentContents,
attributeInfo,
/**
* We need to create a basic translator that translates the queries into a
* filter format that the vector store can understand. We provide a basic translator
* here, but you can create your own translator by extending BaseTranslator
* abstract class. Note that the vector store needs to support filtering on the metadata
* attributes you want to query on.
*/
structuredQueryTranslator: new VectaraTranslator<VectaraStore>(),
});
/**
* Now we can query the vector store.
* We can ask questions like "Which movies are less than 90 minutes?" or "Which movies are rated higher than 8.5?".
* We can also ask questions like "Which movies are either comedy or drama and are less than 90 minutes?".
* The retriever will automatically convert these questions into queries that can be used to retrieve documents.
*/
const query1 = await selfQueryRetriever.invoke(
"What are some movies about dinosaurs"
);
const query2 = await selfQueryRetriever.invoke(
"I want to watch a movie rated higher than 8.5"
);
const query3 = await selfQueryRetriever.invoke(
"Which movies are directed by Greta Gerwig?"
);
const query4 = await selfQueryRetriever.invoke(
"Which movies are either comedy or science fiction and are rated higher than 8.5?"
);
console.log(query1, query2, query3, query4);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/tavily.ts | import { TavilySearchAPIRetriever } from "@langchain/community/retrievers/tavily_search_api";
const retriever = new TavilySearchAPIRetriever({
k: 3,
});
const retrievedDocs = await retriever.invoke(
"What did the speaker say about Justice Breyer in the 2022 State of the Union?"
);
console.log({ retrievedDocs });
/*
{
retrievedDocs: [
Document {
pageContent: `Shy Justice Br eyer. During his remarks, the president paid tribute to retiring Supreme Court Justice Stephen Breyer. "Tonight, I'd like to honor someone who dedicated his life to...`,
metadata: [Object]
},
Document {
pageContent: 'Fact Check. Ukraine. 56 Posts. Sort by. 10:16 p.m. ET, March 1, 2022. Biden recognized outgoing Supreme Court Justice Breyer during his speech. President Biden recognized outgoing...',
metadata: [Object]
},
Document {
pageContent: `In his State of the Union address on March 1, Biden thanked Breyer for his service. "I'd like to honor someone who has dedicated his life to serve this country: Justice Breyer — an Army...`,
metadata: [Object]
}
]
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/matryoshka_retriever.ts | import { MatryoshkaRetriever } from "langchain/retrievers/matryoshka_retriever";
import { Chroma } from "@langchain/community/vectorstores/chroma";
import { OpenAIEmbeddings } from "@langchain/openai";
import { Document } from "@langchain/core/documents";
import { faker } from "@faker-js/faker";
const smallEmbeddings = new OpenAIEmbeddings({
model: "text-embedding-3-small",
dimensions: 512, // Min number for small
});
const largeEmbeddings = new OpenAIEmbeddings({
model: "text-embedding-3-large",
dimensions: 3072, // Max number for large
});
const vectorStore = new Chroma(smallEmbeddings, {
numDimensions: 512,
});
const retriever = new MatryoshkaRetriever({
vectorStore,
largeEmbeddingModel: largeEmbeddings,
largeK: 5,
});
const irrelevantDocs = Array.from({ length: 250 }).map(
() =>
new Document({
pageContent: faker.lorem.word(7), // Similar length to the relevant docs
})
);
const relevantDocs = [
new Document({
pageContent: "LangChain is an open source github repo",
}),
new Document({
pageContent: "There are JS and PY versions of the LangChain github repos",
}),
new Document({
pageContent: "LangGraph is a new open source library by the LangChain team",
}),
new Document({
pageContent: "LangChain announced GA of LangSmith last week!",
}),
new Document({
pageContent: "I heart LangChain",
}),
];
const allDocs = [...irrelevantDocs, ...relevantDocs];
/**
* IMPORTANT:
* The `addDocuments` method on `MatryoshkaRetriever` will
* generate the small AND large embeddings for all documents.
*/
await retriever.addDocuments(allDocs);
const query = "What is LangChain?";
const results = await retriever.invoke(query);
console.log(results.map(({ pageContent }) => pageContent).join("\n"));
/**
I heart LangChain
LangGraph is a new open source library by the LangChain team
LangChain is an open source github repo
LangChain announced GA of LangSmith last week!
There are JS and PY versions of the LangChain github repos
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/kendra.ts | import { AmazonKendraRetriever } from "@langchain/aws";
const retriever = new AmazonKendraRetriever({
topK: 10,
indexId: "YOUR_INDEX_ID",
region: "us-east-2", // Your region
clientOptions: {
credentials: {
accessKeyId: "YOUR_ACCESS_KEY_ID",
secretAccessKey: "YOUR_SECRET_ACCESS_KEY",
},
},
});
const docs = await retriever.invoke("How are clouds formed?");
console.log(docs);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/parent_document_retriever_chunk_header.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { InMemoryStore } from "@langchain/core/stores";
import { ParentDocumentRetriever } from "langchain/retrievers/parent_document";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 1500,
chunkOverlap: 0,
});
const jimDocs = await splitter.createDocuments([`My favorite color is blue.`]);
const jimChunkHeaderOptions = {
chunkHeader: "DOC NAME: Jim Interview\n---\n",
appendChunkOverlapHeader: true,
};
const pamDocs = await splitter.createDocuments([`My favorite color is red.`]);
const pamChunkHeaderOptions = {
chunkHeader: "DOC NAME: Pam Interview\n---\n",
appendChunkOverlapHeader: true,
};
const vectorstore = await HNSWLib.fromDocuments([], new OpenAIEmbeddings());
const byteStore = new InMemoryStore<Uint8Array>();
const retriever = new ParentDocumentRetriever({
vectorstore,
byteStore,
// Very small chunks for demo purposes.
// Use a bigger chunk size for serious use-cases.
childSplitter: new RecursiveCharacterTextSplitter({
chunkSize: 10,
chunkOverlap: 0,
}),
childK: 50,
parentK: 5,
});
// We pass additional option `childDocChunkHeaderOptions`
// that will add the chunk header to child documents
await retriever.addDocuments(jimDocs, {
childDocChunkHeaderOptions: jimChunkHeaderOptions,
});
await retriever.addDocuments(pamDocs, {
childDocChunkHeaderOptions: pamChunkHeaderOptions,
});
// This will search child documents in vector store with the help of chunk header,
// returning the unmodified parent documents
const retrievedDocs = await retriever.invoke("What is Pam's favorite color?");
// Pam's favorite color is returned first!
console.log(JSON.stringify(retrievedDocs, null, 2));
/*
[
{
"pageContent": "My favorite color is red.",
"metadata": {
"loc": {
"lines": {
"from": 1,
"to": 1
}
}
}
},
{
"pageContent": "My favorite color is blue.",
"metadata": {
"loc": {
"lines": {
"from": 1,
"to": 1
}
}
}
}
]
*/
const rawDocs = await vectorstore.similaritySearch(
"What is Pam's favorite color?"
);
// Raw docs in vectorstore are short but have chunk headers
console.log(JSON.stringify(rawDocs, null, 2));
/*
[
{
"pageContent": "DOC NAME: Pam Interview\n---\n(cont'd) color is",
"metadata": {
"loc": {
"lines": {
"from": 1,
"to": 1
}
},
"doc_id": "affdcbeb-6bfb-42e9-afe5-80f4f2e9f6aa"
}
},
{
"pageContent": "DOC NAME: Pam Interview\n---\n(cont'd) favorite",
"metadata": {
"loc": {
"lines": {
"from": 1,
"to": 1
}
},
"doc_id": "affdcbeb-6bfb-42e9-afe5-80f4f2e9f6aa"
}
},
{
"pageContent": "DOC NAME: Pam Interview\n---\n(cont'd) red.",
"metadata": {
"loc": {
"lines": {
"from": 1,
"to": 1
}
},
"doc_id": "affdcbeb-6bfb-42e9-afe5-80f4f2e9f6aa"
}
},
{
"pageContent": "DOC NAME: Pam Interview\n---\nMy",
"metadata": {
"loc": {
"lines": {
"from": 1,
"to": 1
}
},
"doc_id": "affdcbeb-6bfb-42e9-afe5-80f4f2e9f6aa"
}
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/dria.ts | import { DriaRetriever } from "@langchain/community/retrievers/dria";
// contract of TypeScript Handbook v4.9 uploaded to Dria
// https://dria.co/knowledge/-B64DjhUtCwBdXSpsRytlRQCu-bie-vSTvTIT8Ap3g0
const contractId = "-B64DjhUtCwBdXSpsRytlRQCu-bie-vSTvTIT8Ap3g0";
const retriever = new DriaRetriever({
contractId, // a knowledge to connect to
apiKey: "DRIA_API_KEY", // if not provided, will check env for `DRIA_API_KEY`
topK: 15, // optional: default value is 10
});
const docs = await retriever.invoke("What is a union type?");
console.log(docs);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/qdrant_self_query.ts | import { OpenAIEmbeddings, OpenAI } from "@langchain/openai";
import { SelfQueryRetriever } from "langchain/retrievers/self_query";
import { QdrantVectorStore } from "@langchain/qdrant";
import { QdrantTranslator } from "@langchain/community/structured_query/qdrant";
import { Document } from "@langchain/core/documents";
import type { AttributeInfo } from "langchain/chains/query_constructor";
import { QdrantClient } from "@qdrant/js-client-rest";
/**
* First, we create a bunch of documents. You can load your own documents here instead.
* Each document has a pageContent and a metadata field. Make sure your metadata matches the AttributeInfo below.
*/
const docs = [
new Document({
pageContent:
"A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata: { year: 1993, rating: 7.7, genre: "science fiction" },
}),
new Document({
pageContent:
"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 },
}),
new Document({
pageContent:
"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 },
}),
new Document({
pageContent:
"A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 },
}),
new Document({
pageContent: "Toys come alive and have a blast doing so",
metadata: { year: 1995, genre: "animated" },
}),
new Document({
pageContent: "Three men walk into the Zone, three men walk out of the Zone",
metadata: {
year: 1979,
director: "Andrei Tarkovsky",
genre: "science fiction",
rating: 9.9,
},
}),
];
/**
* Next, we define the attributes we want to be able to query on.
* in this case, we want to be able to query on the genre, year, director, rating, and length of the movie.
* We also provide a description of each attribute and the type of the attribute.
* This is used to generate the query prompts.
*/
const attributeInfo: AttributeInfo[] = [
{
name: "genre",
description: "The genre of the movie",
type: "string or array of strings",
},
{
name: "year",
description: "The year the movie was released",
type: "number",
},
{
name: "director",
description: "The director of the movie",
type: "string",
},
{
name: "rating",
description: "The rating of the movie (1-10)",
type: "number",
},
{
name: "length",
description: "The length of the movie in minutes",
type: "number",
},
];
/**
* Next, we instantiate a vector store. This is where we store the embeddings of the documents.
* We also need to provide an embeddings object. This is used to embed the documents.
*/
const QDRANT_URL = "http://127.0.0.1:6333";
const QDRANT_COLLECTION_NAME = "some-collection-name";
const client = new QdrantClient({ url: QDRANT_URL });
const embeddings = new OpenAIEmbeddings();
const llm = new OpenAI();
const documentContents = "Brief summary of a movie";
const vectorStore = await QdrantVectorStore.fromDocuments(docs, embeddings, {
client,
collectionName: QDRANT_COLLECTION_NAME,
});
const selfQueryRetriever = SelfQueryRetriever.fromLLM({
llm,
vectorStore,
documentContents,
attributeInfo,
/**
* We need to create a basic translator that translates the queries into a
* filter format that the vector store can understand. We provide a basic translator
* translator here, but you can create your own translator by extending BaseTranslator
* abstract class. Note that the vector store needs to support filtering on the metadata
* attributes you want to query on.
*/
structuredQueryTranslator: new QdrantTranslator(),
});
/**
* Now we can query the vector store.
* We can ask questions like "Which movies are less than 90 minutes?" or "Which movies are rated higher than 8.5?".
* We can also ask questions like "Which movies are either comedy or drama and are less than 90 minutes?".
* The retriever will automatically convert these questions into queries that can be used to retrieve documents.
*/
const query1 = await selfQueryRetriever.invoke(
"Which movies are less than 90 minutes?"
);
const query2 = await selfQueryRetriever.invoke(
"Which movies are rated higher than 8.5?"
);
const query3 = await selfQueryRetriever.invoke(
"Which cool movies are directed by Greta Gerwig?"
);
const query4 = await selfQueryRetriever.invoke(
"Which movies are either comedy or drama and are less than 90 minutes?"
);
console.log(query1, query2, query3, query4);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/custom.ts | import {
BaseRetriever,
type BaseRetrieverInput,
} from "@langchain/core/retrievers";
import type { CallbackManagerForRetrieverRun } from "@langchain/core/callbacks/manager";
import { Document } from "@langchain/core/documents";
/**
* interface BaseRetrieverInput {
* callbacks?: Callbacks;
* tags?: string[];
* metadata?: Record<string, unknown>;
* verbose?: boolean;
* }
*/
export interface CustomRetrieverInput extends BaseRetrieverInput {}
export class CustomRetriever extends BaseRetriever {
lc_namespace = ["langchain", "retrievers"];
constructor(fields?: CustomRetrieverInput) {
super(fields);
}
async _getRelevantDocuments(
query: string,
// Use with sub runs for tracing
_runManager?: CallbackManagerForRetrieverRun
): Promise<Document[]> {
// You can invoke other runnables like this to pass tracing config through:
// const additionalDocs = await someOtherRunnable.invoke({}, runManager?.getChild());
return [
// ...additionalDocs,
new Document({
pageContent: `Some document pertaining to ${query}`,
metadata: {},
}),
new Document({
pageContent: `Some other document pertaining to ${query}`,
metadata: {},
}),
];
}
}
const retriever = new CustomRetriever({});
console.log(await retriever.invoke("LangChain docs"));
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/document_compressor_pipeline.ts | import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { OpenAIEmbeddings } from "@langchain/openai";
import { ContextualCompressionRetriever } from "langchain/retrievers/contextual_compression";
import { EmbeddingsFilter } from "langchain/retrievers/document_compressors/embeddings_filter";
import { TavilySearchAPIRetriever } from "@langchain/community/retrievers/tavily_search_api";
import { DocumentCompressorPipeline } from "langchain/retrievers/document_compressors";
const embeddingsFilter = new EmbeddingsFilter({
embeddings: new OpenAIEmbeddings(),
similarityThreshold: 0.8,
k: 5,
});
const textSplitter = new RecursiveCharacterTextSplitter({
chunkSize: 200,
chunkOverlap: 0,
});
const compressorPipeline = new DocumentCompressorPipeline({
transformers: [textSplitter, embeddingsFilter],
});
const baseRetriever = new TavilySearchAPIRetriever({
includeRawContent: true,
});
const retriever = new ContextualCompressionRetriever({
baseCompressor: compressorPipeline,
baseRetriever,
});
const retrievedDocs = await retriever.invoke(
"What did the speaker say about Justice Breyer in the 2022 State of the Union?"
);
console.log({ retrievedDocs });
/*
{
retrievedDocs: [
Document {
pageContent: 'Justice Stephen Breyer talks to President Joe Biden ahead of the State of the Union address on Tuesday. (jabin botsford/Agence France-Presse/Getty Images)',
metadata: [Object]
},
Document {
pageContent: 'President Biden recognized outgoing US Supreme Court Justice Stephen Breyer during his State of the Union on Tuesday.',
metadata: [Object]
},
Document {
pageContent: 'What we covered here\n' +
'Biden recognized outgoing Supreme Court Justice Breyer during his speech',
metadata: [Object]
},
Document {
pageContent: 'States Supreme Court. Justice Breyer, thank you for your service,” the president said.',
metadata: [Object]
},
Document {
pageContent: 'Court," Biden said. "Justice Breyer, thank you for your service."',
metadata: [Object]
}
]
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/multi_vector_small_chunks.ts | import * as uuid from "uuid";
import { MultiVectorRetriever } from "langchain/retrievers/multi_vector";
import { FaissStore } from "@langchain/community/vectorstores/faiss";
import { OpenAIEmbeddings } from "@langchain/openai";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { InMemoryStore } from "@langchain/core/stores";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { Document } from "@langchain/core/documents";
const textLoader = new TextLoader("../examples/state_of_the_union.txt");
const parentDocuments = await textLoader.load();
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 10000,
chunkOverlap: 20,
});
const docs = await splitter.splitDocuments(parentDocuments);
const idKey = "doc_id";
const docIds = docs.map((_) => uuid.v4());
const childSplitter = new RecursiveCharacterTextSplitter({
chunkSize: 400,
chunkOverlap: 0,
});
const subDocs = [];
for (let i = 0; i < docs.length; i += 1) {
const childDocs = await childSplitter.splitDocuments([docs[i]]);
const taggedChildDocs = childDocs.map((childDoc) => {
// eslint-disable-next-line no-param-reassign
childDoc.metadata[idKey] = docIds[i];
return childDoc;
});
subDocs.push(...taggedChildDocs);
}
// The byteStore to use to store the original chunks
const byteStore = new InMemoryStore<Uint8Array>();
// The vectorstore to use to index the child chunks
const vectorstore = await FaissStore.fromDocuments(
subDocs,
new OpenAIEmbeddings()
);
const retriever = new MultiVectorRetriever({
vectorstore,
byteStore,
idKey,
// Optional `k` parameter to search for more child documents in VectorStore.
// Note that this does not exactly correspond to the number of final (parent) documents
// retrieved, as multiple child documents can point to the same parent.
childK: 20,
// Optional `k` parameter to limit number of final, parent documents returned from this
// retriever and sent to LLM. This is an upper-bound, and the final count may be lower than this.
parentK: 5,
});
const keyValuePairs: [string, Document][] = docs.map((originalDoc, i) => [
docIds[i],
originalDoc,
]);
// Use the retriever to add the original chunks to the document store
await retriever.docstore.mset(keyValuePairs);
// Vectorstore alone retrieves the small chunks
const vectorstoreResult = await retriever.vectorstore.similaritySearch(
"justice breyer"
);
console.log(vectorstoreResult[0].pageContent.length);
/*
390
*/
// Retriever returns larger result
const retrieverResult = await retriever.invoke("justice breyer");
console.log(retrieverResult[0].pageContent.length);
/*
9770
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/exa.ts | import { ExaRetriever } from "@langchain/exa";
import Exa from "exa-js";
const retriever = new ExaRetriever({
// @ts-expect-error Some TS Config's will cause this to give a TypeScript error, even though it works.
client: new Exa(
process.env.EXASEARCH_API_KEY // default API key
),
});
const retrievedDocs = await retriever.invoke(
"What did the speaker say about Justice Breyer in the 2022 State of the Union?"
);
console.log(retrievedDocs);
/*
[
Document {
pageContent: undefined,
metadata: {
title: '2022 State of the Union Address | The White House',
url: 'https://www.whitehouse.gov/state-of-the-union-2022/',
publishedDate: '2022-02-25',
author: null,
id: 'SW3SLghgYTLQKnqBC-6ftQ',
score: 0.163949653506279
}
},
Document {
pageContent: undefined,
metadata: {
title: "Read: Justice Stephen Breyer's White House remarks after announcing his retirement | CNN Politics",
url: 'https://www.cnn.com/2022/01/27/politics/transcript-stephen-breyer-retirement-remarks/index.html',
publishedDate: '2022-01-27',
author: 'CNN',
id: 'rIeqmU1L9sd28wGrqefRPA',
score: 0.1638609766960144
}
},
Document {
pageContent: undefined,
metadata: {
title: 'Sunday, January 22, 2023 - How Appealing',
url: 'https://howappealing.abovethelaw.com/2023/01/22/',
publishedDate: '2023-01-22',
author: null,
id: 'aubLpkpZWoQSN-he-hwtRg',
score: 0.15869899094104767
}
},
Document {
pageContent: undefined,
metadata: {
title: "Noting Past Divisions Retiring Justice Breyer Says It's Up to Future Generations to Make American Experiment Work",
url: 'https://www.c-span.org/video/?517531-1/noting-past-divisions-retiring-justice-breyer-future-generations-make-american-experiment-work',
publishedDate: '2022-01-27',
author: null,
id: '8pNk76nbao23bryEMD0u5g',
score: 0.15786601603031158
}
},
Document {
pageContent: undefined,
metadata: {
title: 'Monday, January 24, 2022 - How Appealing',
url: 'https://howappealing.abovethelaw.com/2022/01/24/',
publishedDate: '2022-01-24',
author: null,
id: 'pt6xlioR4bdm8kSJUQoyPA',
score: 0.1542145311832428
}
},
Document {
pageContent: undefined,
metadata: {
title: "Full transcript of Biden's State of the Union address",
url: 'https://www.axios.com/2023/02/08/sotu-2023-biden-transcript?utm_source=twitter&utm_medium=social&utm_campaign=editorial&utm_content=politics',
publishedDate: '2023-02-08',
author: 'Axios',
id: 'Dg5JepEwPwAMjgnSA_Z_NA',
score: 0.15383175015449524
}
},
Document {
pageContent: undefined,
metadata: {
title: "Read Justice Breyer's remarks on retiring and his hope in the American 'experiment'",
url: 'https://www.npr.org/2022/01/27/1076162088/read-stephen-breyer-retirement-supreme-court',
publishedDate: '2022-01-27',
author: 'NPR Staff',
id: 'WDKA1biLMREo3BsOs95SIw',
score: 0.14877735078334808
}
},
Document {
pageContent: undefined,
metadata: {
title: 'Grading My 2021 Predictions',
url: 'https://astralcodexten.substack.com/p/grading-my-2021-predictions',
publishedDate: '2022-01-24',
author: 'Scott Alexander',
id: 'jPutj4IcqgAiKSs6-eqv3g',
score: 0.14813132584095
}
},
Document {
pageContent: undefined,
metadata: {
title: '',
url: 'https://www.supremecourt.gov/oral_arguments/argument_transcripts/2021/21a240_l537.pdf',
author: null,
id: 'p97vY-5yvA2kBB9nl-7B3A',
score: 0.14450226724147797
}
},
Document {
pageContent: undefined,
metadata: {
title: 'Remarks by President Biden at a Political Event | Charleston, SC',
url: 'https://www.whitehouse.gov/briefing-room/speeches-remarks/2024/01/08/remarks-by-president-biden-at-a-political-event-charleston-sc/',
publishedDate: '2024-01-08',
author: 'The White House',
id: 'ZdPbaacRn8bgwDWv_aA6zg',
score: 0.14446410536766052
}
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/chroma_self_query.ts | import { OpenAIEmbeddings, OpenAI } from "@langchain/openai";
import { SelfQueryRetriever } from "langchain/retrievers/self_query";
import { ChromaTranslator } from "@langchain/community/structured_query/chroma";
import { Chroma } from "@langchain/community/vectorstores/chroma";
import { Document } from "@langchain/core/documents";
import type { AttributeInfo } from "langchain/chains/query_constructor";
/**
* First, we create a bunch of documents. You can load your own documents here instead.
* Each document has a pageContent and a metadata field. Make sure your metadata matches the AttributeInfo below.
*/
const docs = [
new Document({
pageContent:
"A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata: { year: 1993, rating: 7.7, genre: "science fiction" },
}),
new Document({
pageContent:
"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 },
}),
new Document({
pageContent:
"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 },
}),
new Document({
pageContent:
"A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 },
}),
new Document({
pageContent: "Toys come alive and have a blast doing so",
metadata: { year: 1995, genre: "animated" },
}),
new Document({
pageContent: "Three men walk into the Zone, three men walk out of the Zone",
metadata: {
year: 1979,
director: "Andrei Tarkovsky",
genre: "science fiction",
rating: 9.9,
},
}),
];
/**
* Next, we define the attributes we want to be able to query on.
* in this case, we want to be able to query on the genre, year, director, rating, and length of the movie.
* We also provide a description of each attribute and the type of the attribute.
* This is used to generate the query prompts.
*/
const attributeInfo: AttributeInfo[] = [
{
name: "genre",
description: "The genre of the movie",
type: "string or array of strings",
},
{
name: "year",
description: "The year the movie was released",
type: "number",
},
{
name: "director",
description: "The director of the movie",
type: "string",
},
{
name: "rating",
description: "The rating of the movie (1-10)",
type: "number",
},
{
name: "length",
description: "The length of the movie in minutes",
type: "number",
},
];
/**
* Next, we instantiate a vector store. This is where we store the embeddings of the documents.
* We also need to provide an embeddings object. This is used to embed the documents.
*/
const embeddings = new OpenAIEmbeddings();
const llm = new OpenAI();
const documentContents = "Brief summary of a movie";
const vectorStore = await Chroma.fromDocuments(docs, embeddings, {
collectionName: "a-movie-collection",
});
const selfQueryRetriever = SelfQueryRetriever.fromLLM({
llm,
vectorStore,
documentContents,
attributeInfo,
/**
* We need to create a basic translator that translates the queries into a
* filter format that the vector store can understand. We provide a basic translator
* translator here, but you can create your own translator by extending BaseTranslator
* abstract class. Note that the vector store needs to support filtering on the metadata
* attributes you want to query on.
*/
structuredQueryTranslator: new ChromaTranslator(),
});
/**
* Now we can query the vector store.
* We can ask questions like "Which movies are less than 90 minutes?" or "Which movies are rated higher than 8.5?".
* We can also ask questions like "Which movies are either comedy or drama and are less than 90 minutes?".
* The retriever will automatically convert these questions into queries that can be used to retrieve documents.
*/
const query1 = await selfQueryRetriever.invoke(
"Which movies are less than 90 minutes?"
);
const query2 = await selfQueryRetriever.invoke(
"Which movies are rated higher than 8.5?"
);
const query3 = await selfQueryRetriever.invoke(
"Which movies are directed by Greta Gerwig?"
);
const query4 = await selfQueryRetriever.invoke(
"Which movies are either comedy or drama and are less than 90 minutes?"
);
console.log(query1, query2, query3, query4);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/metal.ts | /* eslint-disable @typescript-eslint/no-non-null-assertion */
import Metal from "@getmetal/metal-sdk";
import { MetalRetriever } from "@langchain/community/retrievers/metal";
export const run = async () => {
const MetalSDK = Metal;
const client = new MetalSDK(
process.env.METAL_API_KEY!,
process.env.METAL_CLIENT_ID!,
process.env.METAL_INDEX_ID
);
const retriever = new MetalRetriever({ client });
const docs = await retriever.invoke("hello");
console.log(docs);
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/contextual_compression.ts | import * as fs from "fs";
import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { ContextualCompressionRetriever } from "langchain/retrievers/contextual_compression";
import { LLMChainExtractor } from "langchain/retrievers/document_compressors/chain_extract";
const model = new OpenAI({
model: "gpt-3.5-turbo-instruct",
});
const baseCompressor = LLMChainExtractor.fromLLM(model);
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
// Create a vector store from the documents.
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
const retriever = new ContextualCompressionRetriever({
baseCompressor,
baseRetriever: vectorStore.asRetriever(),
});
const retrievedDocs = await retriever.invoke(
"What did the speaker say about Justice Breyer?"
);
console.log({ retrievedDocs });
/*
{
retrievedDocs: [
Document {
pageContent: 'One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.',
metadata: [Object]
},
Document {
pageContent: '"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service."',
metadata: [Object]
},
Document {
pageContent: 'The onslaught of state laws targeting transgender Americans and their families is wrong.',
metadata: [Object]
}
]
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/hnswlib_self_query.ts | import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { OpenAIEmbeddings, OpenAI } from "@langchain/openai";
import { SelfQueryRetriever } from "langchain/retrievers/self_query";
import { FunctionalTranslator } from "@langchain/core/structured_query";
import { Document } from "@langchain/core/documents";
import type { AttributeInfo } from "langchain/chains/query_constructor";
/**
* First, we create a bunch of documents. You can load your own documents here instead.
* Each document has a pageContent and a metadata field. Make sure your metadata matches the AttributeInfo below.
*/
const docs = [
new Document({
pageContent:
"A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata: { year: 1993, rating: 7.7, genre: "science fiction" },
}),
new Document({
pageContent:
"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 },
}),
new Document({
pageContent:
"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 },
}),
new Document({
pageContent:
"A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 },
}),
new Document({
pageContent: "Toys come alive and have a blast doing so",
metadata: { year: 1995, genre: "animated" },
}),
new Document({
pageContent: "Three men walk into the Zone, three men walk out of the Zone",
metadata: {
year: 1979,
director: "Andrei Tarkovsky",
genre: "science fiction",
rating: 9.9,
},
}),
];
/**
* Next, we define the attributes we want to be able to query on.
* in this case, we want to be able to query on the genre, year, director, rating, and length of the movie.
* We also provide a description of each attribute and the type of the attribute.
* This is used to generate the query prompts.
*/
const attributeInfo: AttributeInfo[] = [
{
name: "genre",
description: "The genre of the movie",
type: "string or array of strings",
},
{
name: "year",
description: "The year the movie was released",
type: "number",
},
{
name: "director",
description: "The director of the movie",
type: "string",
},
{
name: "rating",
description: "The rating of the movie (1-10)",
type: "number",
},
{
name: "length",
description: "The length of the movie in minutes",
type: "number",
},
];
/**
* Next, we instantiate a vector store. This is where we store the embeddings of the documents.
* We also need to provide an embeddings object. This is used to embed the documents.
*/
const embeddings = new OpenAIEmbeddings();
const llm = new OpenAI();
const documentContents = "Brief summary of a movie";
const vectorStore = await HNSWLib.fromDocuments(docs, embeddings);
const selfQueryRetriever = SelfQueryRetriever.fromLLM({
llm,
vectorStore,
documentContents,
attributeInfo,
/**
* We need to use a translator that translates the queries into a
* filter format that the vector store can understand. We provide a basic translator
* translator here, but you can create your own translator by extending BaseTranslator
* abstract class. Note that the vector store needs to support filtering on the metadata
* attributes you want to query on.
*/
structuredQueryTranslator: new FunctionalTranslator(),
});
/**
* Now we can query the vector store.
* We can ask questions like "Which movies are less than 90 minutes?" or "Which movies are rated higher than 8.5?".
* We can also ask questions like "Which movies are either comedy or drama and are less than 90 minutes?".
* The retriever will automatically convert these questions into queries that can be used to retrieve documents.
*/
const query1 = await selfQueryRetriever.invoke(
"Which movies are less than 90 minutes?"
);
const query2 = await selfQueryRetriever.invoke(
"Which movies are rated higher than 8.5?"
);
const query3 = await selfQueryRetriever.invoke(
"Which movies are directed by Greta Gerwig?"
);
const query4 = await selfQueryRetriever.invoke(
"Which movies are either comedy or drama and are less than 90 minutes?"
);
console.log(query1, query2, query3, query4);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/parent_document_retriever_score_threshold.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { InMemoryStore } from "@langchain/core/stores";
import { ParentDocumentRetriever } from "langchain/retrievers/parent_document";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { ScoreThresholdRetriever } from "langchain/retrievers/score_threshold";
const vectorstore = new MemoryVectorStore(new OpenAIEmbeddings());
const byteStore = new InMemoryStore<Uint8Array>();
const childDocumentRetriever = ScoreThresholdRetriever.fromVectorStore(
vectorstore,
{
minSimilarityScore: 0.01, // Essentially no threshold
maxK: 1, // Only return the top result
}
);
const retriever = new ParentDocumentRetriever({
vectorstore,
byteStore,
childDocumentRetriever,
// Optional, not required if you're already passing in split documents
parentSplitter: new RecursiveCharacterTextSplitter({
chunkOverlap: 0,
chunkSize: 500,
}),
childSplitter: new RecursiveCharacterTextSplitter({
chunkOverlap: 0,
chunkSize: 50,
}),
});
const textLoader = new TextLoader("../examples/state_of_the_union.txt");
const parentDocuments = await textLoader.load();
// We must add the parent documents via the retriever's addDocuments method
await retriever.addDocuments(parentDocuments);
const retrievedDocs = await retriever.invoke("justice breyer");
// Retrieved chunk is the larger parent chunk
console.log(retrievedDocs);
/*
[
Document {
pageContent: 'Tonight, I call on the Senate to pass — pass the Freedom to Vote Act. Pass the John Lewis Act — Voting Rights Act. And while you’re at it, pass the DISCLOSE Act so Americans know who is funding our elections.\n' +
'\n' +
'Look, tonight, I’d — I’d like to honor someone who has dedicated his life to serve this country: Justice Breyer — an Army veteran, Constitutional scholar, retiring Justice of the United States Supreme Court.',
metadata: { source: '../examples/state_of_the_union.txt', loc: [Object] }
},
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/vespa.ts | import { VespaRetriever } from "@langchain/community/retrievers/vespa";
export const run = async () => {
const url = "https://doc-search.vespa.oath.cloud";
const query_body = {
yql: "select content from paragraph where userQuery()",
hits: 5,
ranking: "documentation",
locale: "en-us",
};
const content_field = "content";
const retriever = new VespaRetriever({
url,
auth: false,
query_body,
content_field,
});
const result = await retriever.invoke("what is vespa?");
console.log(result);
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/parent_document_retriever_rerank.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import { CohereRerank } from "@langchain/cohere";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { InMemoryStore } from "@langchain/core/stores";
import {
ParentDocumentRetriever,
type SubDocs,
} from "langchain/retrievers/parent_document";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
// init Cohere Rerank. Remember to add COHERE_API_KEY to your .env
const reranker = new CohereRerank({
topN: 50,
model: "rerank-multilingual-v2.0",
});
export function documentCompressorFiltering({
relevanceScore,
}: { relevanceScore?: number } = {}) {
return (docs: SubDocs) => {
let outputDocs = docs;
if (relevanceScore) {
const docsRelevanceScoreValues = docs.map(
(doc) => doc?.metadata?.relevanceScore
);
outputDocs = docs.filter(
(_doc, index) =>
(docsRelevanceScoreValues?.[index] || 1) >= relevanceScore
);
}
return outputDocs;
};
}
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 0,
});
const jimDocs = await splitter.createDocuments([`Jim favorite color is blue.`]);
const pamDocs = await splitter.createDocuments([`Pam favorite color is red.`]);
const vectorstore = await HNSWLib.fromDocuments([], new OpenAIEmbeddings());
const byteStore = new InMemoryStore<Uint8Array>();
const retriever = new ParentDocumentRetriever({
vectorstore,
byteStore,
// Very small chunks for demo purposes.
// Use a bigger chunk size for serious use-cases.
childSplitter: new RecursiveCharacterTextSplitter({
chunkSize: 10,
chunkOverlap: 0,
}),
childK: 50,
parentK: 5,
// We add Reranker
documentCompressor: reranker,
documentCompressorFilteringFn: documentCompressorFiltering({
relevanceScore: 0.3,
}),
});
const docs = jimDocs.concat(pamDocs);
await retriever.addDocuments(docs);
// This will search for documents in vector store and return for LLM already reranked and sorted document
// with appropriate minimum relevance score
const retrievedDocs = await retriever.invoke("What is Pam's favorite color?");
// Pam's favorite color is returned first!
console.log(JSON.stringify(retrievedDocs, null, 2));
/*
[
{
"pageContent": "My favorite color is red.",
"metadata": {
"relevanceScore": 0.9
"loc": {
"lines": {
"from": 1,
"to": 1
}
}
}
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/ensemble_retriever.ts | import { EnsembleRetriever } from "langchain/retrievers/ensemble";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { OpenAIEmbeddings } from "@langchain/openai";
import { BaseRetriever, BaseRetrieverInput } from "@langchain/core/retrievers";
import { Document } from "@langchain/core/documents";
class SimpleCustomRetriever extends BaseRetriever {
lc_namespace = [];
documents: Document[];
constructor(fields: { documents: Document[] } & BaseRetrieverInput) {
super(fields);
this.documents = fields.documents;
}
async _getRelevantDocuments(query: string): Promise<Document[]> {
return this.documents.filter((document) =>
document.pageContent.includes(query)
);
}
}
const docs1 = [
new Document({ pageContent: "I like apples", metadata: { source: 1 } }),
new Document({ pageContent: "I like oranges", metadata: { source: 1 } }),
new Document({
pageContent: "apples and oranges are fruits",
metadata: { source: 1 },
}),
];
const keywordRetriever = new SimpleCustomRetriever({ documents: docs1 });
const docs2 = [
new Document({ pageContent: "You like apples", metadata: { source: 2 } }),
new Document({ pageContent: "You like oranges", metadata: { source: 2 } }),
];
const vectorstore = await MemoryVectorStore.fromDocuments(
docs2,
new OpenAIEmbeddings()
);
const vectorstoreRetriever = vectorstore.asRetriever();
const retriever = new EnsembleRetriever({
retrievers: [vectorstoreRetriever, keywordRetriever],
weights: [0.5, 0.5],
});
const query = "apples";
const retrievedDocs = await retriever.invoke(query);
console.log(retrievedDocs);
/*
[
Document { pageContent: 'You like apples', metadata: { source: 2 } },
Document { pageContent: 'I like apples', metadata: { source: 1 } },
Document { pageContent: 'You like oranges', metadata: { source: 2 } },
Document {
pageContent: 'apples and oranges are fruits',
metadata: { source: 1 }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/retrievers/embeddings_filter.ts | import * as fs from "fs";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { OpenAIEmbeddings } from "@langchain/openai";
import { ContextualCompressionRetriever } from "langchain/retrievers/contextual_compression";
import { EmbeddingsFilter } from "langchain/retrievers/document_compressors/embeddings_filter";
const baseCompressor = new EmbeddingsFilter({
embeddings: new OpenAIEmbeddings(),
similarityThreshold: 0.8,
});
const text = fs.readFileSync("state_of_the_union.txt", "utf8");
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([text]);
// Create a vector store from the documents.
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
const retriever = new ContextualCompressionRetriever({
baseCompressor,
baseRetriever: vectorStore.asRetriever(),
});
const retrievedDocs = await retriever.invoke(
"What did the speaker say about Justice Breyer?"
);
console.log({ retrievedDocs });
/*
{
retrievedDocs: [
Document {
pageContent: 'And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. \n' +
'\n' +
'A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n' +
'\n' +
'And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n' +
'\n' +
'We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \n' +
'\n' +
'We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \n' +
'\n' +
'We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster.',
metadata: [Object]
},
Document {
pageContent: 'In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n' +
'\n' +
'We cannot let this happen. \n' +
'\n' +
'Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n' +
'\n' +
'Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n' +
'\n' +
'One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n' +
'\n' +
'And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.',
metadata: [Object]
}
]
}
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_transformers/html_to_text.ts | import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { HtmlToTextTransformer } from "@langchain/community/document_transformers/html_to_text";
const loader = new CheerioWebBaseLoader(
"https://news.ycombinator.com/item?id=34817881"
);
const docs = await loader.load();
const splitter = RecursiveCharacterTextSplitter.fromLanguage("html");
const transformer = new HtmlToTextTransformer();
const sequence = splitter.pipe(transformer);
const newDocuments = await sequence.invoke(docs);
console.log(newDocuments);
/*
[
Document {
pageContent: 'Hacker News new | past | comments | ask | show | jobs | submit login What Lights\n' +
'the Universe’s Standard Candles? (quantamagazine.org) 75 points by Amorymeltzer\n' +
'5 months ago | hide | past | favorite | 6 comments delta_p_delta_x 5 months ago\n' +
'| next [–] Astrophysical and cosmological simulations are often insightful.\n' +
"They're also very cross-disciplinary; besides the obvious astrophysics, there's\n" +
'networking and sysadmin, parallel computing and algorithm theory (so that the\n' +
'simulation programs are actually fast but still accurate), systems design, and\n' +
'even a bit of graphic design for the visualisations.Some of my favourite\n' +
'simulation projects:- IllustrisTNG:',
metadata: {
source: 'https://news.ycombinator.com/item?id=34817881',
loc: [Object]
}
},
Document {
pageContent: 'that the simulation programs are actually fast but still accurate), systems\n' +
'design, and even a bit of graphic design for the visualisations.Some of my\n' +
'favourite simulation projects:- IllustrisTNG: https://www.tng-project.org/-\n' +
'SWIFT: https://swift.dur.ac.uk/- CO5BOLD:\n' +
'https://www.astro.uu.se/~bf/co5bold_main.html (which produced these animations\n' +
'of a red-giant star: https://www.astro.uu.se/~bf/movie/AGBmovie.html)-\n' +
'AbacusSummit: https://abacussummit.readthedocs.io/en/latest/And I can add the\n' +
'simulations in the article, too. froeb 5 months ago | parent | next [–]\n' +
'Supernova simulations are especially interesting too. I have heard them\n' +
'described as the only time in physics when all 4 of the fundamental forces are\n' +
'important. The explosion can be quite finicky too. If I remember right, you\n' +
"can't get supernova to explode",
metadata: {
source: 'https://news.ycombinator.com/item?id=34817881',
loc: [Object]
}
},
Document {
pageContent: 'heard them described as the only time in physics when all 4 of the fundamental\n' +
'forces are important. The explosion can be quite finicky too. If I remember\n' +
"right, you can't get supernova to explode properly in 1D simulations, only in\n" +
'higher dimensions. This was a mystery until the realization that turbulence is\n' +
'necessary for supernova to trigger--there is no turbulent flow in 1D. andrewflnr\n' +
"5 months ago | prev | next [–] Whoa. I didn't know the accretion theory of Ia\n" +
'supernovae was dead, much less that it had been since 2011. andreareina 5 months\n' +
'ago | prev | next [–] This seems to be the paper',
metadata: {
source: 'https://news.ycombinator.com/item?id=34817881',
loc: [Object]
}
},
Document {
pageContent: 'andreareina 5 months ago | prev | next [–] This seems to be the paper\n' +
'https://academic.oup.com/mnras/article/517/4/5260/6779709 andreareina 5 months\n' +
"ago | prev [–] Wouldn't double detonation show up as variance in the brightness?\n" +
'yencabulator 5 months ago | parent [–] Or widening of the peak. If one type Ia\n' +
'supernova goes 1,2,3,2,1, the sum of two could go 1+0=1 2+1=3 3+2=5 2+3=5 1+2=3\n' +
'0+1=1 Guidelines | FAQ | Lists |',
metadata: {
source: 'https://news.ycombinator.com/item?id=34817881',
loc: [Object]
}
},
Document {
pageContent: 'the sum of two could go 1+0=1 2+1=3 3+2=5 2+3=5 1+2=3 0+1=1 Guidelines | FAQ |\n' +
'Lists | API | Security | Legal | Apply to YC | Contact Search:',
metadata: {
source: 'https://news.ycombinator.com/item?id=34817881',
loc: [Object]
}
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_transformers/metadata_tagger_custom_prompt.ts | import { z } from "zod";
import { createMetadataTaggerFromZod } from "langchain/document_transformers/openai_functions";
import { ChatOpenAI } from "@langchain/openai";
import { Document } from "@langchain/core/documents";
import { PromptTemplate } from "@langchain/core/prompts";
const taggingChainTemplate = `Extract the desired information from the following passage.
Anonymous critics are actually Roger Ebert.
Passage:
{input}
`;
const zodSchema = z.object({
movie_title: z.string(),
critic: z.string(),
tone: z.enum(["positive", "negative"]),
rating: z
.optional(z.number())
.describe("The number of stars the critic rated the movie"),
});
const metadataTagger = createMetadataTaggerFromZod(zodSchema, {
llm: new ChatOpenAI({ model: "gpt-3.5-turbo" }),
prompt: PromptTemplate.fromTemplate(taggingChainTemplate),
});
const documents = [
new Document({
pageContent:
"Review of The Bee Movie\nBy Roger Ebert\nThis is the greatest movie ever made. 4 out of 5 stars.",
}),
new Document({
pageContent:
"Review of The Godfather\nBy Anonymous\n\nThis movie was super boring. 1 out of 5 stars.",
metadata: { reliable: false },
}),
];
const taggedDocuments = await metadataTagger.transformDocuments(documents);
console.log(taggedDocuments);
/*
[
Document {
pageContent: 'Review of The Bee Movie\n' +
'By Roger Ebert\n' +
'This is the greatest movie ever made. 4 out of 5 stars.',
metadata: {
movie_title: 'The Bee Movie',
critic: 'Roger Ebert',
tone: 'positive',
rating: 4
}
},
Document {
pageContent: 'Review of The Godfather\n' +
'By Anonymous\n' +
'\n' +
'This movie was super boring. 1 out of 5 stars.',
metadata: {
movie_title: 'The Godfather',
critic: 'Roger Ebert',
tone: 'negative',
rating: 1,
reliable: false
}
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_transformers/metadata_tagger.ts | import { z } from "zod";
import { createMetadataTaggerFromZod } from "langchain/document_transformers/openai_functions";
import { ChatOpenAI } from "@langchain/openai";
import { Document } from "@langchain/core/documents";
const zodSchema = z.object({
movie_title: z.string(),
critic: z.string(),
tone: z.enum(["positive", "negative"]),
rating: z
.optional(z.number())
.describe("The number of stars the critic rated the movie"),
});
const metadataTagger = createMetadataTaggerFromZod(zodSchema, {
llm: new ChatOpenAI({ model: "gpt-3.5-turbo" }),
});
const documents = [
new Document({
pageContent:
"Review of The Bee Movie\nBy Roger Ebert\nThis is the greatest movie ever made. 4 out of 5 stars.",
}),
new Document({
pageContent:
"Review of The Godfather\nBy Anonymous\n\nThis movie was super boring. 1 out of 5 stars.",
metadata: { reliable: false },
}),
];
const taggedDocuments = await metadataTagger.transformDocuments(documents);
console.log(taggedDocuments);
/*
[
Document {
pageContent: 'Review of The Bee Movie\n' +
'By Roger Ebert\n' +
'This is the greatest movie ever made. 4 out of 5 stars.',
metadata: {
movie_title: 'The Bee Movie',
critic: 'Roger Ebert',
tone: 'positive',
rating: 4
}
},
Document {
pageContent: 'Review of The Godfather\n' +
'By Anonymous\n' +
'\n' +
'This movie was super boring. 1 out of 5 stars.',
metadata: {
movie_title: 'The Godfather',
critic: 'Anonymous',
tone: 'negative',
rating: 1,
reliable: false
}
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_transformers/mozilla_readability.ts | import { HTMLWebBaseLoader } from "@langchain/community/document_loaders/web/html";
import { MozillaReadabilityTransformer } from "@langchain/community/document_transformers/mozilla_readability";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
const loader = new HTMLWebBaseLoader(
"https://news.ycombinator.com/item?id=34817881"
);
const docs = await loader.load();
const splitter = RecursiveCharacterTextSplitter.fromLanguage("html");
const transformer = new MozillaReadabilityTransformer();
const sequence = transformer.pipe(splitter);
const newDocuments = await sequence.invoke(docs);
console.log(newDocuments);
/*
[
Document {
pageContent: 'Hacker News new | past | comments | ask | show | jobs | submit login What Lights\n' +
'the Universe’s Standard Candles? (quantamagazine.org) 75 points by Amorymeltzer\n' +
'5 months ago | hide | past | favorite | 6 comments delta_p_delta_x 5 months ago\n' +
'| next [–] Astrophysical and cosmological simulations are often insightful.\n' +
"They're also very cross-disciplinary; besides the obvious astrophysics, there's\n" +
'networking and sysadmin, parallel computing and algorithm theory (so that the\n' +
'simulation programs are actually fast but still accurate), systems design, and\n' +
'even a bit of graphic design for the visualisations.Some of my favourite\n' +
'simulation projects:- IllustrisTNG:',
metadata: {
source: 'https://news.ycombinator.com/item?id=34817881',
loc: [Object]
}
},
Document {
pageContent: 'that the simulation programs are actually fast but still accurate), systems\n' +
'design, and even a bit of graphic design for the visualisations.Some of my\n' +
'favourite simulation projects:- IllustrisTNG: https://www.tng-project.org/-\n' +
'SWIFT: https://swift.dur.ac.uk/- CO5BOLD:\n' +
'https://www.astro.uu.se/~bf/co5bold_main.html (which produced these animations\n' +
'of a red-giant star: https://www.astro.uu.se/~bf/movie/AGBmovie.html)-\n' +
'AbacusSummit: https://abacussummit.readthedocs.io/en/latest/And I can add the\n' +
'simulations in the article, too. froeb 5 months ago | parent | next [–]\n' +
'Supernova simulations are especially interesting too. I have heard them\n' +
'described as the only time in physics when all 4 of the fundamental forces are\n' +
'important. The explosion can be quite finicky too. If I remember right, you\n' +
"can't get supernova to explode",
metadata: {
source: 'https://news.ycombinator.com/item?id=34817881',
loc: [Object]
}
},
Document {
pageContent: 'heard them described as the only time in physics when all 4 of the fundamental\n' +
'forces are important. The explosion can be quite finicky too. If I remember\n' +
"right, you can't get supernova to explode properly in 1D simulations, only in\n" +
'higher dimensions. This was a mystery until the realization that turbulence is\n' +
'necessary for supernova to trigger--there is no turbulent flow in 1D. andrewflnr\n' +
"5 months ago | prev | next [–] Whoa. I didn't know the accretion theory of Ia\n" +
'supernovae was dead, much less that it had been since 2011. andreareina 5 months\n' +
'ago | prev | next [–] This seems to be the paper',
metadata: {
source: 'https://news.ycombinator.com/item?id=34817881',
loc: [Object]
}
},
Document {
pageContent: 'andreareina 5 months ago | prev | next [–] This seems to be the paper\n' +
'https://academic.oup.com/mnras/article/517/4/5260/6779709 andreareina 5 months\n' +
"ago | prev [–] Wouldn't double detonation show up as variance in the brightness?\n" +
'yencabulator 5 months ago | parent [–] Or widening of the peak. If one type Ia\n' +
'supernova goes 1,2,3,2,1, the sum of two could go 1+0=1 2+1=3 3+2=5 2+3=5 1+2=3\n' +
'0+1=1 Guidelines | FAQ | Lists |',
metadata: {
source: 'https://news.ycombinator.com/item?id=34817881',
loc: [Object]
}
},
Document {
pageContent: 'the sum of two could go 1+0=1 2+1=3 3+2=5 2+3=5 1+2=3 0+1=1 Guidelines | FAQ |\n' +
'Lists | API | Security | Legal | Apply to YC | Contact Search:',
metadata: {
source: 'https://news.ycombinator.com/item?id=34817881',
loc: [Object]
}
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/imsdb.ts | import { IMSDBLoader } from "@langchain/community/document_loaders/web/imsdb";
export const run = async () => {
const loader = new IMSDBLoader(
"https://imsdb.com/scripts/BlacKkKlansman.html"
);
const docs = await loader.load();
console.log({ docs });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/apify_dataset_new.ts | import { ApifyDatasetLoader } from "@langchain/community/document_loaders/web/apify_dataset";
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { Document } from "@langchain/core/documents";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
import { createRetrievalChain } from "langchain/chains/retrieval";
const APIFY_API_TOKEN = "YOUR-APIFY-API-TOKEN"; // or set as process.env.APIFY_API_TOKEN
const OPENAI_API_KEY = "YOUR-OPENAI-API-KEY"; // or set as process.env.OPENAI_API_KEY
/*
* datasetMappingFunction is a function that maps your Apify dataset format to LangChain documents.
* In the below example, the Apify dataset format looks like this:
* {
* "url": "https://apify.com",
* "text": "Apify is the best web scraping and automation platform."
* }
*/
const loader = await ApifyDatasetLoader.fromActorCall(
"apify/website-content-crawler",
{
maxCrawlPages: 10,
crawlerType: "cheerio",
startUrls: [{ url: "https://js.langchain.com/docs/" }],
},
{
datasetMappingFunction: (item) =>
new Document({
pageContent: (item.text || "") as string,
metadata: { source: item.url },
}),
clientOptions: {
token: APIFY_API_TOKEN,
},
}
);
const docs = await loader.load();
const vectorStore = await HNSWLib.fromDocuments(
docs,
new OpenAIEmbeddings({ apiKey: OPENAI_API_KEY })
);
const model = new ChatOpenAI({
temperature: 0,
apiKey: OPENAI_API_KEY,
});
const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([
[
"system",
"Answer the user's questions based on the below context:\n\n{context}",
],
["human", "{input}"],
]);
const combineDocsChain = await createStuffDocumentsChain({
llm: model,
prompt: questionAnsweringPrompt,
});
const chain = await createRetrievalChain({
retriever: vectorStore.asRetriever(),
combineDocsChain,
});
const res = await chain.invoke({ input: "What is LangChain?" });
console.log(res.answer);
console.log(res.context.map((doc) => doc.metadata.source));
/*
LangChain is a framework for developing applications powered by language models.
[
'https://js.langchain.com/docs/',
'https://js.langchain.com/docs/modules/chains/',
'https://js.langchain.com/docs/modules/chains/llmchain/',
'https://js.langchain.com/docs/category/functions-4'
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/unstructured_directory.ts | import { UnstructuredDirectoryLoader } from "@langchain/community/document_loaders/fs/unstructured";
const options = {
apiKey: "MY_API_KEY",
};
const loader = new UnstructuredDirectoryLoader(
"langchain/src/document_loaders/tests/example_data",
options
);
const docs = await loader.load();
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/pdf_directory.ts | import { DirectoryLoader } from "langchain/document_loaders/fs/directory";
import { PDFLoader } from "@langchain/community/document_loaders/fs/pdf";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
/* Load all PDFs within the specified directory */
const directoryLoader = new DirectoryLoader(
"src/document_loaders/example_data/",
{
".pdf": (path: string) => new PDFLoader(path),
}
);
const docs = await directoryLoader.load();
console.log({ docs });
/* Additional steps : Split text into chunks with any TextSplitter. You can then use it as context or save it to memory afterwards. */
const textSplitter = new RecursiveCharacterTextSplitter({
chunkSize: 1000,
chunkOverlap: 200,
});
const splitDocs = await textSplitter.splitDocuments(docs);
console.log({ splitDocs });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/cheerio_web.ts | import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio";
export const run = async () => {
const loader = new CheerioWebBaseLoader(
"https://news.ycombinator.com/item?id=34817881"
);
const docs = await loader.load();
console.log({ docs });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/obsidian.ts | import { ObsidianLoader } from "@langchain/community/document_loaders/fs/obsidian";
export const run = async () => {
const loader = new ObsidianLoader(
"src/document_loaders/example_data/obsidian"
);
const docs = await loader.load();
console.log({ docs });
};
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/azure_blob_storage_file.ts | import { AzureBlobStorageFileLoader } from "@langchain/community/document_loaders/web/azure_blob_storage_file";
const loader = new AzureBlobStorageFileLoader({
azureConfig: {
connectionString: "",
container: "container_name",
blobName: "example.txt",
},
unstructuredConfig: {
apiUrl: "http://localhost:8000/general/v0/general",
apiKey: "", // this will be soon required
},
});
const docs = await loader.load();
console.log(docs);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/notionapi.ts | import { NotionAPILoader } from "@langchain/community/document_loaders/web/notionapi";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
// Loading a page (including child pages all as separate documents)
const pageLoader = new NotionAPILoader({
clientOptions: {
auth: "<NOTION_INTEGRATION_TOKEN>",
},
id: "<PAGE_ID>",
type: "page",
});
const splitter = new RecursiveCharacterTextSplitter();
// A page contents is likely to be more than 1000 characters so it's split into multiple documents (important for vectorization)
const pageDocs = await pageLoader.loadAndSplit(splitter);
console.log({ pageDocs });
// Loading a database (each row is a separate document with all properties as metadata)
const dbLoader = new NotionAPILoader({
clientOptions: {
auth: "<NOTION_INTEGRATION_TOKEN>",
},
id: "<DATABASE_ID>",
type: "database",
onDocumentLoaded: (current, total, currentTitle) => {
console.log(`Loaded Page: ${currentTitle} (${current}/${total})`);
},
callerOptions: {
maxConcurrency: 64, // Default value
},
propertiesAsHeader: true, // Prepends a front matter header of the page properties to the page contents
});
// A database row contents is likely to be less than 1000 characters so it's not split into multiple documents
const dbDocs = await dbLoader.load();
console.log({ dbDocs });
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/parse_sitemap.ts | import { SitemapLoader } from "@langchain/community/document_loaders/web/sitemap";
const loader = new SitemapLoader("https://www.langchain.com/");
const sitemap = await loader.parseSitemap();
console.log(sitemap);
/**
[
{
loc: 'https://www.langchain.com/blog-detail/starting-a-career-in-design',
changefreq: '',
lastmod: '',
priority: ''
},
{
loc: 'https://www.langchain.com/blog-detail/building-a-navigation-component',
changefreq: '',
lastmod: '',
priority: ''
},
{
loc: 'https://www.langchain.com/blog-detail/guide-to-creating-a-website',
changefreq: '',
lastmod: '',
priority: ''
},
{
loc: 'https://www.langchain.com/page-1/terms-and-conditions',
changefreq: '',
lastmod: '',
priority: ''
},
...42 more items
]
*/
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/document_loaders/openai_whisper_audio.ts | import { OpenAIWhisperAudio } from "@langchain/community/document_loaders/fs/openai_whisper_audio";
const filePath = "./src/document_loaders/example_data/test.mp3";
const loader = new OpenAIWhisperAudio(filePath);
const docs = await loader.load();
console.log(docs);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.