index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/supabase.ts | import { SupabaseVectorStore } from "@langchain/community/vectorstores/supabase";
import { OpenAIEmbeddings } from "@langchain/openai";
import { createClient } from "@supabase/supabase-js";
// First, follow set-up instructions at
// https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase
const privateKey = process.env.SUPABASE_PRIVATE_KEY;
if (!privateKey) throw new Error(`Expected env var SUPABASE_PRIVATE_KEY`);
const url = process.env.SUPABASE_URL;
if (!url) throw new Error(`Expected env var SUPABASE_URL`);
export const run = async () => {
const client = createClient(url, privateKey);
const vectorStore = await SupabaseVectorStore.fromTexts(
["Hello world", "Bye bye", "What's this?"],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings(),
{
client,
tableName: "documents",
queryName: "match_documents",
}
);
const resultOne = await vectorStore.similaritySearch("Hello world", 1);
console.log(resultOne);
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/turbopuffer.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import { TurbopufferVectorStore } from "@langchain/community/vectorstores/turbopuffer";
const embeddings = new OpenAIEmbeddings();
const store = new TurbopufferVectorStore(embeddings, {
apiKey: process.env.TURBOPUFFER_API_KEY,
namespace: "my-namespace",
});
const createdAt = new Date().getTime();
// Add some documents to your store.
// Currently, only string metadata values are supported.
const ids = await store.addDocuments([
{
pageContent: "some content",
metadata: { created_at: createdAt.toString() },
},
{ pageContent: "hi", metadata: { created_at: (createdAt + 1).toString() } },
{ pageContent: "bye", metadata: { created_at: (createdAt + 2).toString() } },
{
pageContent: "what's this",
metadata: { created_at: (createdAt + 3).toString() },
},
]);
// Retrieve documents from the store
const results = await store.similaritySearch("hello", 1);
console.log(results);
/*
[
Document {
pageContent: 'hi',
metadata: { created_at: '1705519164987' }
}
]
*/
// Filter by metadata
// See https://turbopuffer.com/docs/reference/query#filter-parameters for more on
// allowed filters
const results2 = await store.similaritySearch("hello", 1, {
created_at: [["Eq", (createdAt + 3).toString()]],
});
console.log(results2);
/*
[
Document {
pageContent: "what's this",
metadata: { created_at: '1705519164989' }
}
]
*/
// Upsert by passing ids
await store.addDocuments(
[
{ pageContent: "changed", metadata: { created_at: createdAt.toString() } },
{
pageContent: "hi changed",
metadata: { created_at: (createdAt + 1).toString() },
},
{
pageContent: "bye changed",
metadata: { created_at: (createdAt + 2).toString() },
},
{
pageContent: "what's this changed",
metadata: { created_at: (createdAt + 3).toString() },
},
],
{ ids }
);
// Filter by metadata
const results3 = await store.similaritySearch("hello", 10, {
created_at: [["Eq", (createdAt + 3).toString()]],
});
console.log(results3);
/*
[
Document {
pageContent: "what's this changed",
metadata: { created_at: '1705519164989' }
}
]
*/
// Remove all vectors from the namespace.
await store.delete({
deleteIndex: true,
});
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/googlevertexai.ts | /* eslint-disable no-process-env */
/* eslint-disable @typescript-eslint/no-non-null-assertion */
import { SyntheticEmbeddings } from "@langchain/core/utils/testing";
import { GoogleCloudStorageDocstore } from "@langchain/community/stores/doc/gcs";
import {
MatchingEngineArgs,
MatchingEngine,
IdDocument,
Restriction,
} from "@langchain/community/vectorstores/googlevertexai";
import { Document } from "@langchain/core/documents";
export const run = async () => {
if (
!process.env.GOOGLE_VERTEXAI_MATCHINGENGINE_INDEX ||
!process.env.GOOGLE_VERTEXAI_MATCHINGENGINE_INDEXENDPOINT ||
!process.env.GOOGLE_CLOUD_STORAGE_BUCKET
) {
throw new Error(
"GOOGLE_VERTEXAI_MATCHINGENGINE_INDEX, GOOGLE_VERTEXAI_MATCHINGENGINE_INDEXENDPOINT, and GOOGLE_CLOUD_STORAGE_BUCKET must be set."
);
}
const embeddings = new SyntheticEmbeddings({
vectorSize: Number.parseInt(
process.env.SYNTHETIC_EMBEDDINGS_VECTOR_SIZE ?? "768",
10
),
});
const store = new GoogleCloudStorageDocstore({
bucket: process.env.GOOGLE_CLOUD_STORAGE_BUCKET!,
});
const config: MatchingEngineArgs = {
index: process.env.GOOGLE_VERTEXAI_MATCHINGENGINE_INDEX!,
indexEndpoint: process.env.GOOGLE_VERTEXAI_MATCHINGENGINE_INDEXENDPOINT!,
apiVersion: "v1beta1",
docstore: store,
};
const engine = new MatchingEngine(embeddings, config);
/*
* Simple document add
*/
const doc = new Document({ pageContent: "this" });
await engine.addDocuments([doc]);
/*
* Simple search.
* Returns documents including an id field
*/
const oldResults: IdDocument[] = await engine.similaritySearch("this");
console.log("simple results", oldResults);
/*
[
Document {
pageContent: 'this',
metadata: {},
id: 'c05d4249-9ddc-4ed9-8b0c-adf344500c2b'
}
]
*/
/*
* Delete the results
*/
const oldIds = oldResults.map((doc) => doc.id!);
await engine.delete({ ids: oldIds });
/*
* Documents with metadata
*/
const documents = [
new Document({
pageContent: "this apple",
metadata: {
color: "red",
category: "edible",
},
}),
new Document({
pageContent: "this blueberry",
metadata: {
color: "blue",
category: "edible",
},
}),
new Document({
pageContent: "this firetruck",
metadata: {
color: "red",
category: "machine",
},
}),
];
// Add all our documents
await engine.addDocuments(documents);
/*
* Documents that match "color == red"
*/
const redFilter: Restriction[] = [
{
namespace: "color",
allowList: ["red"],
},
];
const redResults = await engine.similaritySearch("this", 4, redFilter);
console.log("red results", redResults);
/*
[
Document {
pageContent: 'this apple',
metadata: { color: 'red', category: 'edible' },
id: '724ff599-31ea-4094-8d60-158faf3c3f32'
},
Document {
pageContent: 'this firetruck',
metadata: { color: 'red', category: 'machine' },
id: 'a3c039f3-4ca1-43b3-97d8-c33dfe75bd31'
}
]
*/
/*
* Documents that match "color == red AND category != edible"
*/
const redNotEditableFilter: Restriction[] = [
{
namespace: "color",
allowList: ["red"],
},
{
namespace: "category",
denyList: ["edible"],
},
];
const redNotEdibleResults = await engine.similaritySearch(
"this",
4,
redNotEditableFilter
);
console.log("red not edible results", redNotEdibleResults);
/*
[
Document {
pageContent: 'this apple',
metadata: { color: 'red', category: 'edible' },
id: '724ff599-31ea-4094-8d60-158faf3c3f32'
}
]
*/
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/closevector.ts | // If you want to import the browser version, use the following line instead:
// import { CloseVectorWeb } from "@langchain/community/vectorstores/closevector/web";
import { CloseVectorNode } from "@langchain/community/vectorstores/closevector/node";
import { OpenAIEmbeddings } from "@langchain/openai";
export const run = async () => {
// If you want to import the browser version, use the following line instead:
// const vectorStore = await CloseVectorWeb.fromTexts(
const vectorStore = await CloseVectorNode.fromTexts(
["Hello world", "Bye bye", "hello nice world"],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings()
);
const resultOne = await vectorStore.similaritySearch("hello world", 1);
console.log(resultOne);
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/faiss_mergefrom.ts | import { FaissStore } from "@langchain/community/vectorstores/faiss";
import { OpenAIEmbeddings } from "@langchain/openai";
export const run = async () => {
// Create an initial vector store
const vectorStore = await FaissStore.fromTexts(
["Hello world", "Bye bye", "hello nice world"],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings()
);
// Create another vector store from texts
const vectorStore2 = await FaissStore.fromTexts(
["Some text"],
[{ id: 1 }],
new OpenAIEmbeddings()
);
// merge the first vector store into vectorStore2
await vectorStore2.mergeFrom(vectorStore);
const resultOne = await vectorStore2.similaritySearch("hello world", 1);
console.log(resultOne);
// You can also create a new vector store from another FaissStore index
const vectorStore3 = await FaissStore.fromIndex(
vectorStore2,
new OpenAIEmbeddings()
);
const resultTwo = await vectorStore3.similaritySearch("Bye bye", 1);
console.log(resultTwo);
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/mongodb_atlas_fromTexts.ts | import { MongoDBAtlasVectorSearch } from "@langchain/mongodb";
import { CohereEmbeddings } from "@langchain/cohere";
import { MongoClient } from "mongodb";
const client = new MongoClient(process.env.MONGODB_ATLAS_URI || "");
const namespace = "langchain.test";
const [dbName, collectionName] = namespace.split(".");
const collection = client.db(dbName).collection(collectionName);
const vectorstore = await MongoDBAtlasVectorSearch.fromTexts(
["Hello world", "Bye bye", "What's this?"],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new CohereEmbeddings({ model: "embed-english-v3.0" }),
{
collection,
indexName: "default", // The name of the Atlas search index. Defaults to "default"
textKey: "text", // The name of the collection field containing the raw content. Defaults to "text"
embeddingKey: "embedding", // The name of the collection field containing the embedded text. Defaults to "embedding"
}
);
const assignedIds = await vectorstore.addDocuments([
{ pageContent: "upsertable", metadata: {} },
]);
const upsertedDocs = [{ pageContent: "overwritten", metadata: {} }];
await vectorstore.addDocuments(upsertedDocs, { ids: assignedIds });
await client.close();
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/xata.ts | import { XataVectorSearch } from "@langchain/community/vectorstores/xata";
import { OpenAIEmbeddings, OpenAI } from "@langchain/openai";
import { BaseClient } from "@xata.io/client";
import { VectorDBQAChain } from "langchain/chains";
import { Document } from "@langchain/core/documents";
// First, follow set-up instructions at
// https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/xata
// if you use the generated client, you don't need this function.
// Just import getXataClient from the generated xata.ts instead.
const getXataClient = () => {
if (!process.env.XATA_API_KEY) {
throw new Error("XATA_API_KEY not set");
}
if (!process.env.XATA_DB_URL) {
throw new Error("XATA_DB_URL not set");
}
const xata = new BaseClient({
databaseURL: process.env.XATA_DB_URL,
apiKey: process.env.XATA_API_KEY,
branch: process.env.XATA_BRANCH || "main",
});
return xata;
};
export async function run() {
const client = getXataClient();
const table = "vectors";
const embeddings = new OpenAIEmbeddings();
const store = new XataVectorSearch(embeddings, { client, table });
// Add documents
const docs = [
new Document({
pageContent: "Xata is a Serverless Data platform based on PostgreSQL",
}),
new Document({
pageContent:
"Xata offers a built-in vector type that can be used to store and query vectors",
}),
new Document({
pageContent: "Xata includes similarity search",
}),
];
const ids = await store.addDocuments(docs);
// eslint-disable-next-line no-promise-executor-return
await new Promise((r) => setTimeout(r, 2000));
const model = new OpenAI();
const chain = VectorDBQAChain.fromLLM(model, store, {
k: 1,
returnSourceDocuments: true,
});
const response = await chain.invoke({ query: "What is Xata?" });
console.log(JSON.stringify(response, null, 2));
await store.delete({ ids });
}
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/milvus.ts | import { Milvus } from "@langchain/community/vectorstores/milvus";
import { OpenAIEmbeddings } from "@langchain/openai";
export const run = async () => {
const vectorStore = await Milvus.fromTexts(
["Hello world", "Bye bye", "hello nice world"],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings()
);
const resultOne = await vectorStore.similaritySearch("hello world", 1);
console.log(resultOne);
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/singlestore.ts | import { SingleStoreVectorStore } from "@langchain/community/vectorstores/singlestore";
import { OpenAIEmbeddings } from "@langchain/openai";
export const run = async () => {
const vectorStore = await SingleStoreVectorStore.fromTexts(
["Hello world", "Bye bye", "hello nice world"],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings(),
{
connectionOptions: {
host: process.env.SINGLESTORE_HOST,
port: Number(process.env.SINGLESTORE_PORT),
user: process.env.SINGLESTORE_USERNAME,
password: process.env.SINGLESTORE_PASSWORD,
database: process.env.SINGLESTORE_DATABASE,
},
}
);
const resultOne = await vectorStore.similaritySearch("hello world", 1);
console.log(resultOne);
await vectorStore.end();
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/xata_metadata.ts | import { XataVectorSearch } from "@langchain/community/vectorstores/xata";
import { OpenAIEmbeddings } from "@langchain/openai";
import { BaseClient } from "@xata.io/client";
import { Document } from "@langchain/core/documents";
// First, follow set-up instructions at
// https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/xata
// Also, add a column named "author" to the "vectors" table.
// if you use the generated client, you don't need this function.
// Just import getXataClient from the generated xata.ts instead.
const getXataClient = () => {
if (!process.env.XATA_API_KEY) {
throw new Error("XATA_API_KEY not set");
}
if (!process.env.XATA_DB_URL) {
throw new Error("XATA_DB_URL not set");
}
const xata = new BaseClient({
databaseURL: process.env.XATA_DB_URL,
apiKey: process.env.XATA_API_KEY,
branch: process.env.XATA_BRANCH || "main",
});
return xata;
};
export async function run() {
const client = getXataClient();
const table = "vectors";
const embeddings = new OpenAIEmbeddings();
const store = new XataVectorSearch(embeddings, { client, table });
// Add documents
const docs = [
new Document({
pageContent: "Xata works great with Langchain.js",
metadata: { author: "Xata" },
}),
new Document({
pageContent: "Xata works great with Langchain",
metadata: { author: "Langchain" },
}),
new Document({
pageContent: "Xata includes similarity search",
metadata: { author: "Xata" },
}),
];
const ids = await store.addDocuments(docs);
// eslint-disable-next-line no-promise-executor-return
await new Promise((r) => setTimeout(r, 2000));
// author is applied as pre-filter to the similarity search
const results = await store.similaritySearchWithScore("xata works great", 6, {
author: "Langchain",
});
console.log(JSON.stringify(results, null, 2));
await store.delete({ ids });
}
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/closevector_saveload_fromcloud.ts | // If you want to import the browser version, use the following line instead:
// import { CloseVectorWeb } from "@langchain/community/vectorstores/closevector/web";
import { CloseVectorNode } from "@langchain/community/vectorstores/closevector/node";
import { CloseVectorWeb } from "@langchain/community/vectorstores/closevector/web";
import { OpenAIEmbeddings } from "@langchain/openai";
// Create a vector store through any method, here from texts as an example
// If you want to import the browser version, use the following line instead:
// const vectorStore = await CloseVectorWeb.fromTexts(
const vectorStore = await CloseVectorNode.fromTexts(
["Hello world", "Bye bye", "hello nice world"],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings(),
undefined,
{
key: "your access key",
secret: "your secret",
}
);
// Save the vector store to cloud
await vectorStore.saveToCloud({
description: "example",
public: true,
});
const { uuid } = vectorStore.instance;
// Load the vector store from cloud
// const loadedVectorStore = await CloseVectorWeb.load(
const loadedVectorStore = await CloseVectorNode.loadFromCloud({
uuid,
embeddings: new OpenAIEmbeddings(),
credentials: {
key: "your access key",
secret: "your secret",
},
});
// If you want to import the node version, use the following lines instead:
// const loadedVectorStoreOnNode = await CloseVectorNode.loadFromCloud({
// uuid,
// embeddings: new OpenAIEmbeddings(),
// credentials: {
// key: "your access key",
// secret: "your secret"
// }
// });
const loadedVectorStoreOnBrowser = await CloseVectorWeb.loadFromCloud({
uuid,
embeddings: new OpenAIEmbeddings(),
credentials: {
key: "your access key",
secret: "your secret",
},
});
// vectorStore and loadedVectorStore are identical
const result = await loadedVectorStore.similaritySearch("hello world", 1);
console.log(result);
// or
const resultOnBrowser = await loadedVectorStoreOnBrowser.similaritySearch(
"hello world",
1
);
console.log(resultOnBrowser);
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/typesense.ts | import {
Typesense,
TypesenseConfig,
} from "@langchain/community/vectorstores/typesense";
import { OpenAIEmbeddings } from "@langchain/openai";
import { Client } from "typesense";
import { Document } from "@langchain/core/documents";
const vectorTypesenseClient = new Client({
nodes: [
{
// Ideally should come from your .env file
host: "...",
port: 123,
protocol: "https",
},
],
// Ideally should come from your .env file
apiKey: "...",
numRetries: 3,
connectionTimeoutSeconds: 60,
});
const typesenseVectorStoreConfig = {
// Typesense client
typesenseClient: vectorTypesenseClient,
// Name of the collection to store the vectors in
schemaName: "your_schema_name",
// Optional column names to be used in Typesense
columnNames: {
// "vec" is the default name for the vector column in Typesense but you can change it to whatever you want
vector: "vec",
// "text" is the default name for the text column in Typesense but you can change it to whatever you want
pageContent: "text",
// Names of the columns that you will save in your typesense schema and need to be retrieved as metadata when searching
metadataColumnNames: ["foo", "bar", "baz"],
},
// Optional search parameters to be passed to Typesense when searching
searchParams: {
q: "*",
filter_by: "foo:[fooo]",
query_by: "",
},
// You can override the default Typesense import function if you want to do something more complex
// Default import function:
// async importToTypesense<
// T extends Record<string, unknown> = Record<string, unknown>
// >(data: T[], collectionName: string) {
// const chunkSize = 2000;
// for (let i = 0; i < data.length; i += chunkSize) {
// const chunk = data.slice(i, i + chunkSize);
// await this.caller.call(async () => {
// await this.client
// .collections<T>(collectionName)
// .documents()
// .import(chunk, { action: "emplace", dirty_values: "drop" });
// });
// }
// }
import: async (data, collectionName) => {
await vectorTypesenseClient
.collections(collectionName)
.documents()
.import(data, { action: "emplace", dirty_values: "drop" });
},
} satisfies TypesenseConfig;
/**
* Creates a Typesense vector store from a list of documents.
* Will update documents if there is a document with the same id, at least with the default import function.
* @param documents list of documents to create the vector store from
* @returns Typesense vector store
*/
const createVectorStoreWithTypesense = async (documents: Document[] = []) =>
Typesense.fromDocuments(
documents,
new OpenAIEmbeddings(),
typesenseVectorStoreConfig
);
/**
* Returns a Typesense vector store from an existing index.
* @returns Typesense vector store
*/
const getVectorStoreWithTypesense = async () =>
new Typesense(new OpenAIEmbeddings(), typesenseVectorStoreConfig);
// Do a similarity search
const vectorStore = await getVectorStoreWithTypesense();
const documents = await vectorStore.similaritySearch("hello world");
// Add filters based on metadata with the search parameters of Typesense
// will exclude documents with author:JK Rowling, so if Joe Rowling & JK Rowling exists, only Joe Rowling will be returned
vectorStore.similaritySearch("Rowling", undefined, {
filter_by: "author:!=JK Rowling",
});
// Delete a document
vectorStore.deleteDocuments(["document_id_1", "document_id_2"]);
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/usearch.ts | import { USearch } from "@langchain/community/vectorstores/usearch";
import { OpenAIEmbeddings } from "@langchain/openai";
const vectorStore = await USearch.fromTexts(
["Hello world", "Bye bye", "hello nice world"],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings()
);
const resultOne = await vectorStore.similaritySearch("hello world", 1);
console.log(resultOne);
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/hnswlib_fromdocs.ts | import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { OpenAIEmbeddings } from "@langchain/openai";
import { TextLoader } from "langchain/document_loaders/fs/text";
// Create docs with a loader
const loader = new TextLoader("src/document_loaders/example_data/example.txt");
const docs = await loader.load();
// Load the docs into the vector store
const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
// Search for the most similar document
const result = await vectorStore.similaritySearch("hello world", 1);
console.log(result);
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/supabase_with_query_builder_metadata_filter.ts | import {
SupabaseFilterRPCCall,
SupabaseVectorStore,
} from "@langchain/community/vectorstores/supabase";
import { OpenAIEmbeddings } from "@langchain/openai";
import { createClient } from "@supabase/supabase-js";
// First, follow set-up instructions at
// https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase
const privateKey = process.env.SUPABASE_PRIVATE_KEY;
if (!privateKey) throw new Error(`Expected env var SUPABASE_PRIVATE_KEY`);
const url = process.env.SUPABASE_URL;
if (!url) throw new Error(`Expected env var SUPABASE_URL`);
export const run = async () => {
const client = createClient(url, privateKey);
const embeddings = new OpenAIEmbeddings();
const store = new SupabaseVectorStore(embeddings, {
client,
tableName: "documents",
});
const docs = [
{
pageContent:
"This is a long text, but it actually means something because vector database does not understand Lorem Ipsum. So I would need to expand upon the notion of quantum fluff, a theorectical concept where subatomic particles coalesce to form transient multidimensional spaces. Yet, this abstraction holds no real-world application or comprehensible meaning, reflecting a cosmic puzzle.",
metadata: { b: 1, c: 10, stuff: "right" },
},
{
pageContent:
"This is a long text, but it actually means something because vector database does not understand Lorem Ipsum. So I would need to proceed by discussing the echo of virtual tweets in the binary corridors of the digital universe. Each tweet, like a pixelated canary, hums in an unseen frequency, a fascinatingly perplexing phenomenon that, while conjuring vivid imagery, lacks any concrete implication or real-world relevance, portraying a paradox of multidimensional spaces in the age of cyber folklore.",
metadata: { b: 2, c: 9, stuff: "right" },
},
{ pageContent: "hello", metadata: { b: 1, c: 9, stuff: "right" } },
{ pageContent: "hello", metadata: { b: 1, c: 9, stuff: "wrong" } },
{ pageContent: "hi", metadata: { b: 2, c: 8, stuff: "right" } },
{ pageContent: "bye", metadata: { b: 3, c: 7, stuff: "right" } },
{ pageContent: "what's this", metadata: { b: 4, c: 6, stuff: "right" } },
];
// Also supports an additional {ids: []} parameter for upsertion
await store.addDocuments(docs);
const funcFilterA: SupabaseFilterRPCCall = (rpc) =>
rpc
.filter("metadata->b::int", "lt", 3)
.filter("metadata->c::int", "gt", 7)
.textSearch("content", `'multidimensional' & 'spaces'`, {
config: "english",
});
const resultA = await store.similaritySearch("quantum", 4, funcFilterA);
const funcFilterB: SupabaseFilterRPCCall = (rpc) =>
rpc
.filter("metadata->b::int", "lt", 3)
.filter("metadata->c::int", "gt", 7)
.filter("metadata->>stuff", "eq", "right");
const resultB = await store.similaritySearch("hello", 2, funcFilterB);
console.log(resultA, resultB);
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/weaviate_fromTexts.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
import weaviate, { ApiKey } from "weaviate-ts-client";
import { WeaviateStore } from "@langchain/weaviate";
import { OpenAIEmbeddings } from "@langchain/openai";
export async function run() {
// Something wrong with the weaviate-ts-client types, so we need to disable
const client = (weaviate as any).client({
scheme: process.env.WEAVIATE_SCHEME || "https",
host: process.env.WEAVIATE_HOST || "localhost",
apiKey: new ApiKey(process.env.WEAVIATE_API_KEY || "default"),
});
// Create a store and fill it with some texts + metadata
await WeaviateStore.fromTexts(
["hello world", "hi there", "how are you", "bye now"],
[{ foo: "bar" }, { foo: "baz" }, { foo: "qux" }, { foo: "bar" }],
new OpenAIEmbeddings(),
{
client,
indexName: "Test",
textKey: "text",
metadataKeys: ["foo"],
}
);
}
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/hnswlib_filter.ts | import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
import { OpenAIEmbeddings } from "@langchain/openai";
const vectorStore = await HNSWLib.fromTexts(
["Hello world", "Bye bye", "hello nice world"],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings()
);
const result = await vectorStore.similaritySearch(
"hello world",
10,
(document) => document.metadata.id === 3
);
// only "hello nice world" will be returned
console.log(result);
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/usearch_fromdocs.ts | import { USearch } from "@langchain/community/vectorstores/usearch";
import { OpenAIEmbeddings } from "@langchain/openai";
import { TextLoader } from "langchain/document_loaders/fs/text";
// Create docs with a loader
const loader = new TextLoader("src/document_loaders/example_data/example.txt");
const docs = await loader.load();
// Load the docs into the vector store
const vectorStore = await USearch.fromDocuments(docs, new OpenAIEmbeddings());
// Search for the most similar document
const resultOne = await vectorStore.similaritySearch("hello world", 1);
console.log(resultOne);
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/pgvector_vectorstore/pgvector_hnsw.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import {
DistanceStrategy,
PGVectorStore,
} from "@langchain/community/vectorstores/pgvector";
import { PoolConfig } from "pg";
// First, follow set-up instructions at
// https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/pgvector
const config = {
postgresConnectionOptions: {
type: "postgres",
host: "127.0.0.1",
port: 5433,
user: "myuser",
password: "ChangeMe",
database: "api",
} as PoolConfig,
tableName: "testlangchain",
columns: {
idColumnName: "id",
vectorColumnName: "vector",
contentColumnName: "content",
metadataColumnName: "metadata",
},
// supported distance strategies: cosine (default), innerProduct, or euclidean
distanceStrategy: "cosine" as DistanceStrategy,
};
const pgvectorStore = await PGVectorStore.initialize(
new OpenAIEmbeddings(),
config
);
// create the index
await pgvectorStore.createHnswIndex({
dimensions: 1536,
efConstruction: 64,
m: 16,
});
await pgvectorStore.addDocuments([
{ pageContent: "what's this", metadata: { a: 2, b: ["tag1", "tag2"] } },
{ pageContent: "Cat drinks milk", metadata: { a: 1, b: ["tag2"] } },
]);
const model = new OpenAIEmbeddings();
const query = await model.embedQuery("water");
const results = await pgvectorStore.similaritySearchVectorWithScore(query, 1);
console.log(results);
await pgvectorStore.end();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/pgvector_vectorstore/docker-compose.example.yml | services:
db:
image: pgvector/pgvector:pg16
ports:
- 5433:5432
volumes:
- ./data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=ChangeMe
- POSTGRES_USER=myuser
- POSTGRES_DB=api
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/pgvector_vectorstore/pgvector_pool.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import { PGVectorStore } from "@langchain/community/vectorstores/pgvector";
import pg from "pg";
// First, follow set-up instructions at
// https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/pgvector
const reusablePool = new pg.Pool({
host: "127.0.0.1",
port: 5433,
user: "myuser",
password: "ChangeMe",
database: "api",
});
const originalConfig = {
pool: reusablePool,
tableName: "testlangchain",
collectionName: "sample",
collectionTableName: "collections",
columns: {
idColumnName: "id",
vectorColumnName: "vector",
contentColumnName: "content",
metadataColumnName: "metadata",
},
};
// Set up the DB.
// Can skip this step if you've already initialized the DB.
// await PGVectorStore.initialize(new OpenAIEmbeddings(), originalConfig);
const pgvectorStore = new PGVectorStore(new OpenAIEmbeddings(), originalConfig);
await pgvectorStore.addDocuments([
{ pageContent: "what's this", metadata: { a: 2 } },
{ pageContent: "Cat drinks milk", metadata: { a: 1 } },
]);
const results = await pgvectorStore.similaritySearch("water", 1);
console.log(results);
/*
[ Document { pageContent: 'Cat drinks milk', metadata: { a: 1 } } ]
*/
const pgvectorStore2 = new PGVectorStore(new OpenAIEmbeddings(), {
pool: reusablePool,
tableName: "testlangchain",
collectionTableName: "collections",
collectionName: "some_other_collection",
columns: {
idColumnName: "id",
vectorColumnName: "vector",
contentColumnName: "content",
metadataColumnName: "metadata",
},
});
const results2 = await pgvectorStore2.similaritySearch("water", 1);
console.log(results2);
/*
[]
*/
await reusablePool.end();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/pgvector_vectorstore/pgvector.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import {
DistanceStrategy,
PGVectorStore,
} from "@langchain/community/vectorstores/pgvector";
import { PoolConfig } from "pg";
// First, follow set-up instructions at
// https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/pgvector
const config = {
postgresConnectionOptions: {
type: "postgres",
host: "127.0.0.1",
port: 5433,
user: "myuser",
password: "ChangeMe",
database: "api",
} as PoolConfig,
tableName: "testlangchain",
columns: {
idColumnName: "id",
vectorColumnName: "vector",
contentColumnName: "content",
metadataColumnName: "metadata",
},
// supported distance strategies: cosine (default), innerProduct, or euclidean
distanceStrategy: "cosine" as DistanceStrategy,
};
const pgvectorStore = await PGVectorStore.initialize(
new OpenAIEmbeddings(),
config
);
await pgvectorStore.addDocuments([
{ pageContent: "what's this", metadata: { a: 2, b: ["tag1", "tag2"] } },
{ pageContent: "Cat drinks milk", metadata: { a: 1, b: ["tag2"] } },
]);
const results = await pgvectorStore.similaritySearch("water", 1);
console.log(results);
/*
[ Document { pageContent: 'Cat drinks milk', metadata: { a: 1 } } ]
*/
// Filtering is supported
const results2 = await pgvectorStore.similaritySearch("water", 1, {
a: 2,
});
console.log(results2);
/*
[ Document { pageContent: 'what's this', metadata: { a: 2 } } ]
*/
// Filtering on multiple values using "in" is supported too
const results3 = await pgvectorStore.similaritySearch("water", 1, {
a: {
in: [2],
},
});
console.log(results3);
/*
[ Document { pageContent: 'what's this', metadata: { a: 2 } } ]
*/
await pgvectorStore.delete({
filter: {
a: 1,
},
});
const results4 = await pgvectorStore.similaritySearch("water", 1);
console.log(results4);
/*
[ Document { pageContent: 'what's this', metadata: { a: 2 } } ]
*/
// Filtering using arrayContains (?|) is supported
const results5 = await pgvectorStore.similaritySearch("water", 1, {
b: {
arrayContains: ["tag1"],
},
});
console.log(results5);
/*
[ Document { pageContent: "what's this", metadata: { a: 2, b: ['tag1', 'tag2'] } } } ]
*/
await pgvectorStore.end();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/momento_vector_index/fromDocs.ts | import { MomentoVectorIndex } from "@langchain/community/vectorstores/momento_vector_index";
// For browser/edge, adjust this to import from "@gomomento/sdk-web";
import {
PreviewVectorIndexClient,
VectorIndexConfigurations,
CredentialProvider,
} from "@gomomento/sdk";
import { OpenAIEmbeddings } from "@langchain/openai";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { sleep } from "langchain/util/time";
// Create docs with a loader
const loader = new TextLoader("src/document_loaders/example_data/example.txt");
const docs = await loader.load();
const vectorStore = await MomentoVectorIndex.fromDocuments(
docs,
new OpenAIEmbeddings(),
{
client: new PreviewVectorIndexClient({
configuration: VectorIndexConfigurations.Laptop.latest(),
credentialProvider: CredentialProvider.fromEnvironmentVariable({
environmentVariableName: "MOMENTO_API_KEY",
}),
}),
indexName: "langchain-example-index",
}
);
// because indexing is async, wait for it to finish to search directly after
await sleep();
// Search for the most similar document
const response = await vectorStore.similaritySearch("hello", 1);
console.log(response);
/*
[
Document {
pageContent: 'Foo\nBar\nBaz\n\n',
metadata: { source: 'src/document_loaders/example_data/example.txt' }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/momento_vector_index/fromTexts.ts | import { MomentoVectorIndex } from "@langchain/community/vectorstores/momento_vector_index";
// For browser/edge, adjust this to import from "@gomomento/sdk-web";
import {
PreviewVectorIndexClient,
VectorIndexConfigurations,
CredentialProvider,
} from "@gomomento/sdk";
import { OpenAIEmbeddings } from "@langchain/openai";
import { sleep } from "langchain/util/time";
const vectorStore = await MomentoVectorIndex.fromTexts(
["hello world", "goodbye world", "salutations world", "farewell world"],
{},
new OpenAIEmbeddings(),
{
client: new PreviewVectorIndexClient({
configuration: VectorIndexConfigurations.Laptop.latest(),
credentialProvider: CredentialProvider.fromEnvironmentVariable({
environmentVariableName: "MOMENTO_API_KEY",
}),
}),
indexName: "langchain-example-index",
},
{ ids: ["1", "2", "3", "4"] }
);
// because indexing is async, wait for it to finish to search directly after
await sleep();
const response = await vectorStore.similaritySearch("hello", 2);
console.log(response);
/*
[
Document { pageContent: 'hello world', metadata: {} },
Document { pageContent: 'salutations world', metadata: {} }
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/momento_vector_index/fromExisting.ts | import { MomentoVectorIndex } from "@langchain/community/vectorstores/momento_vector_index";
// For browser/edge, adjust this to import from "@gomomento/sdk-web";
import {
PreviewVectorIndexClient,
VectorIndexConfigurations,
CredentialProvider,
} from "@gomomento/sdk";
import { OpenAIEmbeddings } from "@langchain/openai";
const vectorStore = new MomentoVectorIndex(new OpenAIEmbeddings(), {
client: new PreviewVectorIndexClient({
configuration: VectorIndexConfigurations.Laptop.latest(),
credentialProvider: CredentialProvider.fromEnvironmentVariable({
environmentVariableName: "MOMENTO_API_KEY",
}),
}),
indexName: "langchain-example-index",
});
const response = await vectorStore.similaritySearch("hello", 1);
console.log(response);
/*
[
Document {
pageContent: 'Foo\nBar\nBaz\n\n',
metadata: { source: 'src/document_loaders/example_data/example.txt' }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/elasticsearch/elasticsearch.ts | import { Client, ClientOptions } from "@elastic/elasticsearch";
import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { VectorDBQAChain } from "langchain/chains";
import {
ElasticClientArgs,
ElasticVectorSearch,
} from "@langchain/community/vectorstores/elasticsearch";
import { Document } from "@langchain/core/documents";
// to run this first run Elastic's docker-container with `docker-compose up -d --build`
export async function run() {
const config: ClientOptions = {
node: process.env.ELASTIC_URL ?? "http://127.0.0.1:9200",
};
if (process.env.ELASTIC_API_KEY) {
config.auth = {
apiKey: process.env.ELASTIC_API_KEY,
};
} else if (process.env.ELASTIC_USERNAME && process.env.ELASTIC_PASSWORD) {
config.auth = {
username: process.env.ELASTIC_USERNAME,
password: process.env.ELASTIC_PASSWORD,
};
}
const clientArgs: ElasticClientArgs = {
client: new Client(config),
indexName: process.env.ELASTIC_INDEX ?? "test_vectorstore",
};
// Index documents
const docs = [
new Document({
metadata: { foo: "bar" },
pageContent: "Elasticsearch is a powerful vector db",
}),
new Document({
metadata: { foo: "bar" },
pageContent: "the quick brown fox jumped over the lazy dog",
}),
new Document({
metadata: { baz: "qux" },
pageContent: "lorem ipsum dolor sit amet",
}),
new Document({
metadata: { baz: "qux" },
pageContent:
"Elasticsearch a distributed, RESTful search engine optimized for speed and relevance on production-scale workloads.",
}),
];
const embeddings = new OpenAIEmbeddings();
// await ElasticVectorSearch.fromDocuments(docs, embeddings, clientArgs);
const vectorStore = new ElasticVectorSearch(embeddings, clientArgs);
// Also supports an additional {ids: []} parameter for upsertion
const ids = await vectorStore.addDocuments(docs);
/* Search the vector DB independently with meta filters */
const results = await vectorStore.similaritySearch("fox jump", 1);
console.log(JSON.stringify(results, null, 2));
/* [
{
"pageContent": "the quick brown fox jumped over the lazy dog",
"metadata": {
"foo": "bar"
}
}
]
*/
/* Use as part of a chain (currently no metadata filters) for LLM query */
const model = new OpenAI();
const chain = VectorDBQAChain.fromLLM(model, vectorStore, {
k: 1,
returnSourceDocuments: true,
});
const response = await chain.invoke({ query: "What is Elasticsearch?" });
console.log(JSON.stringify(response, null, 2));
/*
{
"text": " Elasticsearch is a distributed, RESTful search engine optimized for speed and relevance on production-scale workloads.",
"sourceDocuments": [
{
"pageContent": "Elasticsearch a distributed, RESTful search engine optimized for speed and relevance on production-scale workloads.",
"metadata": {
"baz": "qux"
}
}
]
}
*/
await vectorStore.delete({ ids });
const response2 = await chain.invoke({ query: "What is Elasticsearch?" });
console.log(JSON.stringify(response2, null, 2));
/*
[]
*/
}
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/zep/zep_with_openai_embeddings.ts | import { ZepVectorStore } from "@langchain/community/vectorstores/zep";
import { OpenAIEmbeddings } from "@langchain/openai";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { randomUUID } from "crypto";
const loader = new TextLoader("src/document_loaders/example_data/example.txt");
const docs = await loader.load();
export const run = async () => {
const collectionName = `collection${randomUUID().split("-")[0]}`;
const zepConfig = {
apiUrl: "http://localhost:8000", // this should be the URL of your Zep implementation
collectionName,
embeddingDimensions: 1536, // this much match the width of the embeddings you're using
isAutoEmbedded: false, // set to false to disable auto-embedding
};
const embeddings = new OpenAIEmbeddings();
const vectorStore = await ZepVectorStore.fromDocuments(
docs,
embeddings,
zepConfig
);
const results = await vectorStore.similaritySearchWithScore("bar", 3);
console.log("Similarity Results:");
console.log(JSON.stringify(results));
const results2 = await vectorStore.maxMarginalRelevanceSearch("bar", {
k: 3,
});
console.log("MMR Results:");
console.log(JSON.stringify(results2));
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/zep/zep_cloud.ts | import { ZepCloudVectorStore } from "@langchain/community/vectorstores/zep_cloud";
import { FakeEmbeddings } from "@langchain/core/utils/testing";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { randomUUID } from "crypto";
const loader = new TextLoader("src/document_loaders/example_data/example.txt");
const docs = await loader.load();
const collectionName = `collection${randomUUID().split("-")[0]}`;
const zepConfig = {
// Your Zep Cloud Project API key https://help.getzep.com/projects
apiKey: "<Zep Api Key>",
collectionName,
};
// We're using fake embeddings here, because Zep Cloud handles embedding for you
const embeddings = new FakeEmbeddings();
const vectorStore = await ZepCloudVectorStore.fromDocuments(
docs,
embeddings,
zepConfig
);
// Wait for the documents to be embedded
// eslint-disable-next-line no-constant-condition
while (true) {
const c = await vectorStore.client.document.getCollection(collectionName);
console.log(
`Embedding status: ${c.documentEmbeddedCount}/${c.documentCount} documents embedded`
);
// eslint-disable-next-line no-promise-executor-return
await new Promise((resolve) => setTimeout(resolve, 1000));
if (c.documentEmbeddedCount === c.documentCount) {
break;
}
}
const results = await vectorStore.similaritySearchWithScore("bar", 3);
console.log("Similarity Results:");
console.log(JSON.stringify(results));
const results2 = await vectorStore.maxMarginalRelevanceSearch("bar", {
k: 3,
});
console.log("MMR Results:");
console.log(JSON.stringify(results2));
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/zep/zep_from_docs.ts | import { ZepVectorStore } from "@langchain/community/vectorstores/zep";
import { FakeEmbeddings } from "@langchain/core/utils/testing";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { randomUUID } from "crypto";
const loader = new TextLoader("src/document_loaders/example_data/example.txt");
const docs = await loader.load();
export const run = async () => {
const collectionName = `collection${randomUUID().split("-")[0]}`;
const zepConfig = {
apiUrl: "http://localhost:8000", // this should be the URL of your Zep implementation
collectionName,
embeddingDimensions: 1536, // this much match the width of the embeddings you're using
isAutoEmbedded: true, // If true, the vector store will automatically embed documents when they are added
};
const embeddings = new FakeEmbeddings();
const vectorStore = await ZepVectorStore.fromDocuments(
docs,
embeddings,
zepConfig
);
// Wait for the documents to be embedded
// eslint-disable-next-line no-constant-condition
while (true) {
const c = await vectorStore.client.document.getCollection(collectionName);
console.log(
`Embedding status: ${c.document_embedded_count}/${c.document_count} documents embedded`
);
// eslint-disable-next-line no-promise-executor-return
await new Promise((resolve) => setTimeout(resolve, 1000));
if (c.status === "ready") {
break;
}
}
const results = await vectorStore.similaritySearchWithScore("bar", 3);
console.log("Similarity Results:");
console.log(JSON.stringify(results));
const results2 = await vectorStore.maxMarginalRelevanceSearch("bar", {
k: 3,
});
console.log("MMR Results:");
console.log(JSON.stringify(results2));
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/zep/zep_with_metadata.ts | import { ZepVectorStore } from "@langchain/community/vectorstores/zep";
import { FakeEmbeddings } from "@langchain/core/utils/testing";
import { randomUUID } from "crypto";
import { Document } from "@langchain/core/documents";
const docs = [
new Document({
metadata: { album: "Led Zeppelin IV", year: 1971 },
pageContent:
"Stairway to Heaven is one of the most iconic songs by Led Zeppelin.",
}),
new Document({
metadata: { album: "Led Zeppelin I", year: 1969 },
pageContent:
"Dazed and Confused was a standout track on Led Zeppelin's debut album.",
}),
new Document({
metadata: { album: "Physical Graffiti", year: 1975 },
pageContent:
"Kashmir, from Physical Graffiti, showcases Led Zeppelin's unique blend of rock and world music.",
}),
new Document({
metadata: { album: "Houses of the Holy", year: 1973 },
pageContent:
"The Rain Song is a beautiful, melancholic piece from Houses of the Holy.",
}),
new Document({
metadata: { band: "Black Sabbath", album: "Paranoid", year: 1970 },
pageContent:
"Paranoid is Black Sabbath's second studio album and includes some of their most notable songs.",
}),
new Document({
metadata: {
band: "Iron Maiden",
album: "The Number of the Beast",
year: 1982,
},
pageContent:
"The Number of the Beast is often considered Iron Maiden's best album.",
}),
new Document({
metadata: { band: "Metallica", album: "Master of Puppets", year: 1986 },
pageContent:
"Master of Puppets is widely regarded as Metallica's finest work.",
}),
new Document({
metadata: { band: "Megadeth", album: "Rust in Peace", year: 1990 },
pageContent:
"Rust in Peace is Megadeth's fourth studio album and features intricate guitar work.",
}),
];
export const run = async () => {
const collectionName = `collection${randomUUID().split("-")[0]}`;
const zepConfig = {
apiUrl: "http://localhost:8000", // this should be the URL of your Zep implementation
collectionName,
embeddingDimensions: 1536, // this much match the width of the embeddings you're using
isAutoEmbedded: true, // If true, the vector store will automatically embed documents when they are added
};
const embeddings = new FakeEmbeddings();
const vectorStore = await ZepVectorStore.fromDocuments(
docs,
embeddings,
zepConfig
);
// Wait for the documents to be embedded
// eslint-disable-next-line no-constant-condition
while (true) {
const c = await vectorStore.client.document.getCollection(collectionName);
console.log(
`Embedding status: ${c.document_embedded_count}/${c.document_count} documents embedded`
);
// eslint-disable-next-line no-promise-executor-return
await new Promise((resolve) => setTimeout(resolve, 1000));
if (c.status === "ready") {
break;
}
}
vectorStore
.similaritySearchWithScore("sad music", 3, {
where: { jsonpath: "$[*] ? (@.year == 1973)" }, // We should see a single result: The Rain Song
})
.then((results) => {
console.log(`\n\nSimilarity Results:\n${JSON.stringify(results)}`);
})
.catch((e) => {
if (e.name === "NotFoundError") {
console.log("No results found");
} else {
throw e;
}
});
// We're not filtering here, but rather demonstrating MMR at work.
// We could also add a filter to the MMR search, as we did with the similarity search above.
vectorStore
.maxMarginalRelevanceSearch("sad music", {
k: 3,
})
.then((results) => {
console.log(`\n\nMMR Results:\n${JSON.stringify(results)}`);
})
.catch((e) => {
if (e.name === "NotFoundError") {
console.log("No results found");
} else {
throw e;
}
});
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/prisma_vectorstore/docker-compose.example.yml | services:
db:
image: ankane/pgvector
ports:
- 5432:5432
volumes:
- ./data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=
- POSTGRES_USER=
- POSTGRES_DB=
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/prisma_vectorstore/prisma.ts | import { PrismaVectorStore } from "@langchain/community/vectorstores/prisma";
import { OpenAIEmbeddings } from "@langchain/openai";
import { PrismaClient, Prisma, Document } from "@prisma/client";
export const run = async () => {
const db = new PrismaClient();
// Use the `withModel` method to get proper type hints for `metadata` field:
const vectorStore = PrismaVectorStore.withModel<Document>(db).create(
new OpenAIEmbeddings(),
{
prisma: Prisma,
tableName: "Document",
vectorColumnName: "vector",
columns: {
id: PrismaVectorStore.IdColumn,
content: PrismaVectorStore.ContentColumn,
},
}
);
const texts = ["Hello world", "Bye bye", "What's this?"];
await vectorStore.addModels(
await db.$transaction(
texts.map((content) => db.document.create({ data: { content } }))
)
);
const resultOne = await vectorStore.similaritySearch("Hello world", 1);
console.log(resultOne);
// create an instance with default filter
const vectorStore2 = PrismaVectorStore.withModel<Document>(db).create(
new OpenAIEmbeddings(),
{
prisma: Prisma,
tableName: "Document",
vectorColumnName: "vector",
columns: {
id: PrismaVectorStore.IdColumn,
content: PrismaVectorStore.ContentColumn,
},
filter: {
content: {
equals: "default",
},
},
}
);
await vectorStore2.addModels(
await db.$transaction(
texts.map((content) => db.document.create({ data: { content } }))
)
);
// Use the default filter a.k.a {"content": "default"}
const resultTwo = await vectorStore.similaritySearch("Hello world", 1);
console.log(resultTwo);
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/prisma_vectorstore/.env.example | # Add DATABASE_URL to .env file in this directory
DATABASE_URL=postgresql://[USERNAME]:[PASSWORD]@[ADDR]/[DBNAME] |
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/prisma_vectorstore | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/prisma_vectorstore/prisma/schema.prisma | // This is your Prisma schema file,
// learn more about it in the docs: https://pris.ly/d/prisma-schema
generator client {
provider = "prisma-client-js"
}
datasource db {
provider = "postgresql"
url = env("DATABASE_URL")
}
model Document {
id String @id @default(cuid())
content String
namespace String? @default("default")
vector Unsupported("vector")?
}
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/prisma_vectorstore/prisma | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/prisma_vectorstore/prisma/migrations/migration_lock.toml | # Please do not edit this file manually
# It should be added in your version-control system (i.e. Git)
provider = "postgresql" |
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/prisma_vectorstore/prisma/migrations | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/prisma_vectorstore/prisma/migrations/00_init/migration.sql | -- CreateTable
CREATE EXTENSION IF NOT EXISTS vector;
CREATE TABLE "Document" (
"id" TEXT NOT NULL,
"content" TEXT NOT NULL,
"namespace" TEXT DEFAULT 'default',
"vector" vector,
CONSTRAINT "Document_pkey" PRIMARY KEY ("id")
);
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/neo4j_vector/neo4j_vector_existinggraph.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import { Neo4jVectorStore } from "@langchain/community/vectorstores/neo4j_vector";
/**
* `fromExistingGraph` Method:
*
* Description:
* This method initializes a `Neo4jVectorStore` instance using an existing graph in the Neo4j database.
* It's designed to work with nodes that already have textual properties but might not have embeddings.
* The method will compute and store embeddings for nodes that lack them.
*
* Note:
* This method is particularly useful when you have a pre-existing graph with textual data and you want
* to enhance it with vector embeddings for similarity searches without altering the original data structure.
*/
// Configuration object for Neo4j connection and other related settings
const config = {
url: "bolt://localhost:7687", // URL for the Neo4j instance
username: "neo4j", // Username for Neo4j authentication
password: "pleaseletmein", // Password for Neo4j authentication
indexName: "wikipedia",
nodeLabel: "Wikipedia",
textNodeProperties: ["title", "description"],
embeddingNodeProperty: "embedding",
searchType: "hybrid" as const,
};
// You should have a populated Neo4j database to use this method
const neo4jVectorIndex = await Neo4jVectorStore.fromExistingGraph(
new OpenAIEmbeddings(),
config
);
await neo4jVectorIndex.close();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/neo4j_vector/docker-compose.example.yml | services:
database:
image: neo4j
ports:
- 7687:7687
- 7474:7474
environment:
- NEO4J_AUTH=neo4j/pleaseletmein
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/neo4j_vector/neo4j_vector_retrieval.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import { Neo4jVectorStore } from "@langchain/community/vectorstores/neo4j_vector";
/*
* The retrievalQuery is a customizable Cypher query fragment used in the Neo4jVectorStore class to define how
* search results should be retrieved and presented from the Neo4j database. It allows developers to specify
* the format and structure of the data returned after a similarity search.
* Mandatory columns for `retrievalQuery`:
*
* 1. text:
* - Description: Represents the textual content of the node.
* - Type: String
*
* 2. score:
* - Description: Represents the similarity score of the node in relation to the search query. A
* higher score indicates a closer match.
* - Type: Float (ranging between 0 and 1, where 1 is a perfect match)
*
* 3. metadata:
* - Description: Contains additional properties and information about the node. This can include
* any other attributes of the node that might be relevant to the application.
* - Type: Object (key-value pairs)
* - Example: { "id": "12345", "category": "Books", "author": "John Doe" }
*
* Note: While you can customize the `retrievalQuery` to fetch additional columns or perform
* transformations, never omit the mandatory columns. The names of these columns (`text`, `score`,
* and `metadata`) should remain consistent. Renaming them might lead to errors or unexpected behavior.
*/
// Configuration object for Neo4j connection and other related settings
const config = {
url: "bolt://localhost:7687", // URL for the Neo4j instance
username: "neo4j", // Username for Neo4j authentication
password: "pleaseletmein", // Password for Neo4j authentication
retrievalQuery: `
RETURN node.text AS text, score, {a: node.a * 2} AS metadata
`,
};
const documents = [
{ pageContent: "what's this", metadata: { a: 2 } },
{ pageContent: "Cat drinks milk", metadata: { a: 1 } },
];
const neo4jVectorIndex = await Neo4jVectorStore.fromDocuments(
documents,
new OpenAIEmbeddings(),
config
);
const results = await neo4jVectorIndex.similaritySearch("water", 1);
console.log(results);
/*
[ Document { pageContent: 'Cat drinks milk', metadata: { a: 2 } } ]
*/
await neo4jVectorIndex.close();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/neo4j_vector/neo4j_vector_metadata.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import { Neo4jVectorStore } from "@langchain/community/vectorstores/neo4j_vector";
/**
* `similaritySearch` Method with Metadata Filtering:
*
* Description:
* This method facilitates advanced similarity searches within a Neo4j vector index, leveraging both text embeddings and metadata attributes.
* The third parameter, `filter`, allows for the specification of metadata-based conditions that pre-filter the nodes before performing the similarity search.
* This approach enhances the search precision by allowing users to query based on complex metadata criteria alongside textual similarity.
* Metadata filtering also support the following operators:
*
* $eq: Equal
* $ne: Not Equal
* $lt: Less than
* $lte: Less than or equal
* $gt: Greater than
* $gte: Greater than or equal
* $in: In a list of values
* $nin: Not in a list of values
* $between: Between two values
* $like: Text contains value
* $ilike: lowered text contains value
*
* The filter supports a range of query operations such as equality checks, range queries, and compound conditions (using logical operators like $and, $or).
* This makes it highly adaptable to varied use cases requiring detailed and specific retrieval of documents based on both content and contextual information.
*
* Note:
* Effective use of this method requires a well-structured Neo4j database where nodes are enriched with both text and metadata properties.
* The method is particularly useful in scenarios where the integration of text analysis with detailed metadata querying is crucial, such as in content recommendation systems, detailed archival searches, or any application where contextual relevance is key.
*/
// Configuration object for Neo4j connection and other related settings
const config = {
url: "bolt://localhost:7687", // URL for the Neo4j instance
username: "neo4j", // Username for Neo4j authentication
password: "pleaseletmein", // Password for Neo4j authentication
indexName: "vector", // Name of the vector index
keywordIndexName: "keyword", // Name of the keyword index if using hybrid search
searchType: "vector" as const, // Type of search (e.g., vector, hybrid)
nodeLabel: "Chunk", // Label for the nodes in the graph
textNodeProperty: "text", // Property of the node containing text
embeddingNodeProperty: "embedding", // Property of the node containing embedding
};
const documents = [
{ pageContent: "what's this", metadata: { a: 2 } },
{ pageContent: "Cat drinks milk", metadata: { a: 1 } },
];
const neo4jVectorIndex = await Neo4jVectorStore.fromDocuments(
documents,
new OpenAIEmbeddings(),
config
);
const filter = { a: { $eq: 1 } };
const results = await neo4jVectorIndex.similaritySearch("water", 1, { filter });
console.log(results);
/*
[ Document { pageContent: 'Cat drinks milk', metadata: { a: 1 } } ]
*/
await neo4jVectorIndex.close();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/neo4j_vector/neo4j_vector.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import { Neo4jVectorStore } from "@langchain/community/vectorstores/neo4j_vector";
// Configuration object for Neo4j connection and other related settings
const config = {
url: "bolt://localhost:7687", // URL for the Neo4j instance
username: "neo4j", // Username for Neo4j authentication
password: "pleaseletmein", // Password for Neo4j authentication
indexName: "vector", // Name of the vector index
keywordIndexName: "keyword", // Name of the keyword index if using hybrid search
searchType: "vector" as const, // Type of search (e.g., vector, hybrid)
nodeLabel: "Chunk", // Label for the nodes in the graph
textNodeProperty: "text", // Property of the node containing text
embeddingNodeProperty: "embedding", // Property of the node containing embedding
};
const documents = [
{ pageContent: "what's this", metadata: { a: 2 } },
{ pageContent: "Cat drinks milk", metadata: { a: 1 } },
];
const neo4jVectorIndex = await Neo4jVectorStore.fromDocuments(
documents,
new OpenAIEmbeddings(),
config
);
const results = await neo4jVectorIndex.similaritySearch("water", 1);
console.log(results);
/*
[ Document { pageContent: 'Cat drinks milk', metadata: { a: 1 } } ]
*/
await neo4jVectorIndex.close();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/upstash/create_client.ts | import { Index } from "@upstash/vector";
import { OpenAIEmbeddings } from "@langchain/openai";
import { UpstashVectorStore } from "@langchain/community/vectorstores/upstash";
const embeddings = new OpenAIEmbeddings({});
// Creating the index with the provided credentials.
const indexWithCredentials = new Index({
url: process.env.UPSTASH_VECTOR_REST_URL as string,
token: process.env.UPSTASH_VECTOR_REST_TOKEN as string,
});
const storeWithCredentials = new UpstashVectorStore(embeddings, {
index: indexWithCredentials,
});
// Creating the index from the environment variables automatically.
const indexFromEnv = new Index();
const storeFromEnv = new UpstashVectorStore(embeddings, {
index: indexFromEnv,
});
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/upstash/upstash_embeddings.ts | import { Index } from "@upstash/vector";
import { Document } from "@langchain/core/documents";
import { UpstashVectorStore } from "@langchain/community/vectorstores/upstash";
import { FakeEmbeddings } from "@langchain/core/utils/testing";
const index = new Index({
url: process.env.UPSTASH_VECTOR_REST_URL as string,
token: process.env.UPSTASH_VECTOR_REST_TOKEN as string,
});
// Initializing the UpstashVectorStore with the Upstash Embeddings configuration.
// Passing FakeEmbeddings here will enable the store to generate embeddings using Upstash Embeddings.
const UpstashVector = new UpstashVectorStore(new FakeEmbeddings(), { index });
// Creating the docs to be indexed.
const id = new Date().getTime();
const documents = [
new Document({
metadata: { name: id },
pageContent: "Hello there!",
}),
new Document({
metadata: { name: id },
pageContent: "What are you building?",
}),
new Document({
metadata: { time: id },
pageContent: "Upstash Vector is great for building AI applications.",
}),
new Document({
metadata: { time: id },
pageContent: "To be, or not to be, that is the question.",
}),
];
// Creating embeddings from the provided documents, and adding them to Upstash database.
await UpstashVector.addDocuments(documents);
// Waiting vectors to be indexed in the vector store.
// eslint-disable-next-line no-promise-executor-return
await new Promise((resolve) => setTimeout(resolve, 1000));
const queryResult = await UpstashVector.similaritySearchWithScore(
"Vector database",
2
);
console.log(queryResult);
/**
[
[
Document {
pageContent: 'Upstash Vector is great for building AI applications.',
metadata: [Object]
},
0.9016147
],
[
Document {
pageContent: 'What are you building?',
metadata: [Object]
},
0.8613077
]
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/upstash/index_and_query_docs.ts | import { Index } from "@upstash/vector";
import { OpenAIEmbeddings } from "@langchain/openai";
import { Document } from "@langchain/core/documents";
import { UpstashVectorStore } from "@langchain/community/vectorstores/upstash";
const index = new Index({
url: process.env.UPSTASH_VECTOR_REST_URL as string,
token: process.env.UPSTASH_VECTOR_REST_TOKEN as string,
});
const embeddings = new OpenAIEmbeddings({});
const UpstashVector = new UpstashVectorStore(embeddings, { index });
// Creating the docs to be indexed.
const id = new Date().getTime();
const documents = [
new Document({
metadata: { name: id },
pageContent: "Hello there!",
}),
new Document({
metadata: { name: id },
pageContent: "What are you building?",
}),
new Document({
metadata: { time: id },
pageContent: "Upstash Vector is great for building AI applications.",
}),
new Document({
metadata: { time: id },
pageContent: "To be, or not to be, that is the question.",
}),
];
// Creating embeddings from the provided documents, and adding them to Upstash database.
await UpstashVector.addDocuments(documents);
// Waiting vectors to be indexed in the vector store.
// eslint-disable-next-line no-promise-executor-return
await new Promise((resolve) => setTimeout(resolve, 1000));
const queryResult = await UpstashVector.similaritySearchWithScore(
"Vector database",
2
);
console.log(queryResult);
/**
[
[
Document {
pageContent: 'Upstash Vector is great for building AI applications.',
metadata: [Object]
},
0.9016147
],
[
Document {
pageContent: 'What are you building?',
metadata: [Object]
},
0.8613077
]
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/upstash/namespaces.ts | import { Index } from "@upstash/vector";
import { OpenAIEmbeddings } from "@langchain/openai";
import { Document } from "@langchain/core/documents";
import { UpstashVectorStore } from "@langchain/community/vectorstores/upstash";
const index = new Index({
url: process.env.UPSTASH_VECTOR_REST_URL as string,
token: process.env.UPSTASH_VECTOR_REST_TOKEN as string,
});
const embeddings = new OpenAIEmbeddings({});
const UpstashVector = new UpstashVectorStore(embeddings, {
index,
namespace: "test-namespace",
});
// Creating the docs to be indexed.
const id = new Date().getTime();
const documents = [
new Document({
metadata: { name: id },
pageContent: "Vector databases are great!",
}),
];
// Creating embeddings from the provided documents, and adding them to target namespace in Upstash Vector database.
await UpstashVector.addDocuments(documents);
// Waiting vectors to be indexed in the vector store.
// eslint-disable-next-line no-promise-executor-return
await new Promise((resolve) => setTimeout(resolve, 1000));
const queryResult = await UpstashVector.similaritySearchWithScore(
"Vector database",
1
);
console.log(queryResult);
/**
[
[
Document {
pageContent: 'Vector databases are great!',
metadata: [Object]
},
0.9016147
],
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/upstash/delete_docs.ts | import { Index } from "@upstash/vector";
import { OpenAIEmbeddings } from "@langchain/openai";
import { UpstashVectorStore } from "@langchain/community/vectorstores/upstash";
const index = new Index({
url: process.env.UPSTASH_VECTOR_REST_URL as string,
token: process.env.UPSTASH_VECTOR_REST_TOKEN as string,
});
const embeddings = new OpenAIEmbeddings({});
const UpstashVector = new UpstashVectorStore(embeddings, { index });
// Creating the docs to be indexed.
const createdAt = new Date().getTime();
const IDs = await UpstashVector.addDocuments([
{ pageContent: "hello", metadata: { a: createdAt + 1 } },
{ pageContent: "car", metadata: { a: createdAt } },
{ pageContent: "adjective", metadata: { a: createdAt } },
{ pageContent: "hi", metadata: { a: createdAt } },
]);
// Waiting vectors to be indexed in the vector store.
// eslint-disable-next-line no-promise-executor-return
await new Promise((resolve) => setTimeout(resolve, 1000));
await UpstashVector.delete({ ids: [IDs[0], IDs[2], IDs[3]] });
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/opensearch/docker-compose.yml | # Reference:
# https://opensearch.org/docs/latest/install-and-configure/install-opensearch/docker/#sample-docker-composeyml
version: "3"
services:
opensearch:
image: opensearchproject/opensearch:2.6.0
container_name: opensearch
environment:
- cluster.name=opensearch
- node.name=opensearch
- discovery.type=single-node
- bootstrap.memory_lock=true
- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m"
- "DISABLE_INSTALL_DEMO_CONFIG=true"
- "DISABLE_SECURITY_PLUGIN=true"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- opensearch_data:/usr/share/opensearch/data
ports:
- 9200:9200
- 9600:9600
networks:
- opensearch
opensearch-dashboards:
image: opensearchproject/opensearch-dashboards:latest # Make sure the version of opensearch-dashboards matches the version of opensearch installed on other nodes
container_name: opensearch-dashboards
ports:
- 5601:5601 # Map host port 5601 to container port 5601
expose:
- "5601" # Expose port 5601 for web access to OpenSearch Dashboards
environment:
OPENSEARCH_HOSTS: '["http://opensearch:9200"]' # Define the OpenSearch nodes that OpenSearch Dashboards will query
DISABLE_SECURITY_DASHBOARDS_PLUGIN: "true" # disables security dashboards plugin in OpenSearch Dashboards
networks:
- opensearch
networks:
opensearch:
volumes:
opensearch_data:
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/opensearch/opensearch.ts | import { Client } from "@opensearch-project/opensearch";
import { OpenAIEmbeddings } from "@langchain/openai";
import { OpenSearchVectorStore } from "@langchain/community/vectorstores/opensearch";
import * as uuid from "uuid";
import { Document } from "@langchain/core/documents";
export async function run() {
const client = new Client({
nodes: [process.env.OPENSEARCH_URL ?? "http://127.0.0.1:9200"],
});
const embeddings = new OpenAIEmbeddings();
const vectorStore = await OpenSearchVectorStore.fromTexts(
["Hello world", "Bye bye", "What's this?"],
[{ id: 2 }, { id: 1 }, { id: 3 }],
embeddings,
{
client,
indexName: "documents",
}
);
const resultOne = await vectorStore.similaritySearch("Hello world", 1);
console.log(resultOne);
const vectorStore2 = new OpenSearchVectorStore(embeddings, {
client,
indexName: "custom",
});
const documents = [
new Document({
pageContent: "Do I dare to eat an apple?",
metadata: {
foo: "baz",
},
}),
new Document({
pageContent: "There is no better place than the hotel lobby",
metadata: {
foo: "bar",
},
}),
new Document({
pageContent: "OpenSearch is a powerful vector db",
metadata: {
foo: "bat",
},
}),
];
const vectors = Array.from({ length: documents.length }, (_, i) => [
i,
i + 1,
i + 2,
]);
const ids = Array.from({ length: documents.length }, () => uuid.v4());
await vectorStore2.addVectors(vectors, documents, { ids });
const resultTwo = await vectorStore2.similaritySearchVectorWithScore(
vectors[0],
3
);
console.log(resultTwo);
}
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/convex/fromTexts.ts | "use node";
import { ConvexVectorStore } from "@langchain/community/vectorstores/convex";
import { OpenAIEmbeddings } from "@langchain/openai";
import { action } from "./_generated/server.js";
export const ingest = action({
args: {},
handler: async (ctx) => {
await ConvexVectorStore.fromTexts(
["Hello world", "Bye bye", "What's this?"],
[{ prop: 2 }, { prop: 1 }, { prop: 3 }],
new OpenAIEmbeddings(),
{ ctx }
);
},
});
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/convex/search.ts | "use node";
import { ConvexVectorStore } from "@langchain/community/vectorstores/convex";
import { OpenAIEmbeddings } from "@langchain/openai";
import { v } from "convex/values";
import { action } from "./_generated/server.js";
export const search = action({
args: {
query: v.string(),
},
handler: async (ctx, args) => {
const vectorStore = new ConvexVectorStore(new OpenAIEmbeddings(), { ctx });
const resultOne = await vectorStore.similaritySearch(args.query, 1);
console.log(resultOne);
},
});
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/convex | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/convex/_generated/server.d.ts | /* eslint-disable */
/**
* Generated utilities for implementing server-side Convex query and mutation functions.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import {
ActionBuilder,
HttpActionBuilder,
MutationBuilder,
QueryBuilder,
GenericActionCtx,
GenericMutationCtx,
GenericQueryCtx,
GenericDatabaseReader,
GenericDatabaseWriter,
} from "convex/server";
import type { DataModel } from "./dataModel.js";
/**
* Define a query in this Convex app's public API.
*
* This function will be allowed to read your Convex database and will be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export declare const query: QueryBuilder<DataModel, "public">;
/**
* Define a query that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to read from your Convex database. It will not be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export declare const internalQuery: QueryBuilder<DataModel, "internal">;
/**
* Define a mutation in this Convex app's public API.
*
* This function will be allowed to modify your Convex database and will be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export declare const mutation: MutationBuilder<DataModel, "public">;
/**
* Define a mutation that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to modify your Convex database. It will not be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export declare const internalMutation: MutationBuilder<DataModel, "internal">;
/**
* Define an action in this Convex app's public API.
*
* An action is a function which can execute any JavaScript code, including non-deterministic
* code and code with side-effects, like calling third-party services.
* They can be run in Convex's JavaScript environment or in Node.js using the "use node" directive.
* They can interact with the database indirectly by calling queries and mutations using the {@link ActionCtx}.
*
* @param func - The action. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped action. Include this as an `export` to name it and make it accessible.
*/
export declare const action: ActionBuilder<DataModel, "public">;
/**
* Define an action that is only accessible from other Convex functions (but not from the client).
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped function. Include this as an `export` to name it and make it accessible.
*/
export declare const internalAction: ActionBuilder<DataModel, "internal">;
/**
* Define an HTTP action.
*
* This function will be used to respond to HTTP requests received by a Convex
* deployment if the requests matches the path and method where this action
* is routed. Be sure to route your action in `convex/http.js`.
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped function. Import this function from `convex/http.js` and route it to hook it up.
*/
export declare const httpAction: HttpActionBuilder;
/**
* A set of services for use within Convex query functions.
*
* The query context is passed as the first argument to any Convex query
* function run on the server.
*
* This differs from the {@link MutationCtx} because all of the services are
* read-only.
*/
export type QueryCtx = GenericQueryCtx<DataModel>;
/**
* A set of services for use within Convex mutation functions.
*
* The mutation context is passed as the first argument to any Convex mutation
* function run on the server.
*/
export type MutationCtx = GenericMutationCtx<DataModel>;
/**
* A set of services for use within Convex action functions.
*
* The action context is passed as the first argument to any Convex action
* function run on the server.
*/
export type ActionCtx = GenericActionCtx<DataModel>;
/**
* An interface to read from the database within Convex query functions.
*
* The two entry points are {@link DatabaseReader.get}, which fetches a single
* document by its {@link Id}, or {@link DatabaseReader.query}, which starts
* building a query.
*/
export type DatabaseReader = GenericDatabaseReader<DataModel>;
/**
* An interface to read from and write to the database within Convex mutation
* functions.
*
* Convex guarantees that all writes within a single mutation are
* executed atomically, so you never have to worry about partial writes leaving
* your data in an inconsistent state. See [the Convex Guide](https://docs.convex.dev/understanding/convex-fundamentals/functions#atomicity-and-optimistic-concurrency-control)
* for the guarantees Convex provides your functions.
*/
export type DatabaseWriter = GenericDatabaseWriter<DataModel>;
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/convex | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/convex/_generated/api.js | /* eslint-disable */
/**
* Generated `api` utility.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import { anyApi } from "convex/server";
/**
* A utility for referencing Convex functions in your app's API.
*
* Usage:
* ```js
* const myFunctionReference = api.myModule.myFunction;
* ```
*/
export const api = anyApi;
export const internal = anyApi;
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/convex | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/convex/_generated/server.js | /* eslint-disable */
/**
* Generated utilities for implementing server-side Convex query and mutation functions.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import {
actionGeneric,
httpActionGeneric,
queryGeneric,
mutationGeneric,
internalActionGeneric,
internalMutationGeneric,
internalQueryGeneric,
} from "convex/server";
/**
* Define a query in this Convex app's public API.
*
* This function will be allowed to read your Convex database and will be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export const query = queryGeneric;
/**
* Define a query that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to read from your Convex database. It will not be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export const internalQuery = internalQueryGeneric;
/**
* Define a mutation in this Convex app's public API.
*
* This function will be allowed to modify your Convex database and will be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export const mutation = mutationGeneric;
/**
* Define a mutation that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to modify your Convex database. It will not be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export const internalMutation = internalMutationGeneric;
/**
* Define an action in this Convex app's public API.
*
* An action is a function which can execute any JavaScript code, including non-deterministic
* code and code with side-effects, like calling third-party services.
* They can be run in Convex's JavaScript environment or in Node.js using the "use node" directive.
* They can interact with the database indirectly by calling queries and mutations using the {@link ActionCtx}.
*
* @param func - The action. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped action. Include this as an `export` to name it and make it accessible.
*/
export const action = actionGeneric;
/**
* Define an action that is only accessible from other Convex functions (but not from the client).
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped function. Include this as an `export` to name it and make it accessible.
*/
export const internalAction = internalActionGeneric;
/**
* Define a Convex HTTP action.
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument, and a `Request` object
* as its second.
* @returns The wrapped endpoint function. Route a URL path to this function in `convex/http.js`.
*/
export const httpAction = httpActionGeneric;
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/convex | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/convex/_generated/dataModel.d.ts | /* eslint-disable */
/**
* Generated data model types.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import { AnyDataModel } from "convex/server";
import type { GenericId } from "convex/values";
/**
* No `schema.ts` file found!
*
* This generated code has permissive types like `Doc = any` because
* Convex doesn't know your schema. If you'd like more type safety, see
* https://docs.convex.dev/using/schemas for instructions on how to add a
* schema file.
*
* After you change a schema, rerun codegen with `npx convex dev`.
*/
/**
* The names of all of your Convex tables.
*/
export type TableNames = string;
/**
* The type of a document stored in Convex.
*/
export type Doc = any;
/**
* An identifier for a document in Convex.
*
* Convex documents are uniquely identified by their `Id`, which is accessible
* on the `_id` field. To learn more, see [Document IDs](https://docs.convex.dev/using/document-ids).
*
* Documents can be loaded using `db.get(id)` in query and mutation functions.
*
* IDs are just strings at runtime, but this type can be used to distinguish them from other
* strings when type checking.
*/
export type Id<TableName extends TableNames = TableNames> =
GenericId<TableName>;
/**
* A type describing your Convex data model.
*
* This type includes information about what tables you have, the type of
* documents stored in those tables, and the indexes defined on them.
*
* This type is used to parameterize methods like `queryGeneric` and
* `mutationGeneric` to make them type-safe.
*/
export type DataModel = AnyDataModel;
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/convex | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/convex/_generated/api.d.ts | /* eslint-disable */
/**
* Generated `api` utility.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* Generated by convex@1.3.1.
* To regenerate, run `npx convex dev`.
* @module
*/
import type {
ApiFromModules,
FilterApi,
FunctionReference,
} from "convex/server";
/**
* A utility for referencing Convex functions in your app's API.
*
* Usage:
* ```js
* const myFunctionReference = api.myModule.myFunction;
* ```
*/
declare const fullApi: ApiFromModules<{}>;
export declare const api: FilterApi<
typeof fullApi,
FunctionReference<any, "public">
>;
export declare const internal: FilterApi<
typeof fullApi,
FunctionReference<any, "internal">
>;
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/redis/docker-compose.yml | version: "3"
services:
redis:
container_name: redis-stack
image: redis/redis-stack:latest
ports:
- 6379:6379
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/redis/redis_delete.ts | import { createClient } from "redis";
import { OpenAIEmbeddings } from "@langchain/openai";
import { RedisVectorStore } from "@langchain/redis";
import { Document } from "@langchain/core/documents";
const client = createClient({
url: process.env.REDIS_URL ?? "redis://localhost:6379",
});
await client.connect();
const docs = [
new Document({
metadata: { foo: "bar" },
pageContent: "redis is fast",
}),
new Document({
metadata: { foo: "bar" },
pageContent: "the quick brown fox jumped over the lazy dog",
}),
new Document({
metadata: { baz: "qux" },
pageContent: "lorem ipsum dolor sit amet",
}),
new Document({
metadata: { baz: "qux" },
pageContent: "consectetur adipiscing elit",
}),
];
const vectorStore = await RedisVectorStore.fromDocuments(
docs,
new OpenAIEmbeddings(),
{
redisClient: client,
indexName: "docs",
}
);
await vectorStore.delete({ deleteAll: true });
await client.disconnect();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/redis/redis_query.ts | import { createClient } from "redis";
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { RedisVectorStore } from "@langchain/redis";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
import { createRetrievalChain } from "langchain/chains/retrieval";
const client = createClient({
url: process.env.REDIS_URL ?? "redis://localhost:6379",
});
await client.connect();
const vectorStore = new RedisVectorStore(new OpenAIEmbeddings(), {
redisClient: client,
indexName: "docs",
});
/* Simple standalone search in the vector DB */
const simpleRes = await vectorStore.similaritySearch("redis", 1);
console.log(simpleRes);
/*
[
Document {
pageContent: "redis is fast",
metadata: { foo: "bar" }
}
]
*/
/* Search in the vector DB using filters */
const filterRes = await vectorStore.similaritySearch("redis", 3, ["qux"]);
console.log(filterRes);
/*
[
Document {
pageContent: "consectetur adipiscing elit",
metadata: { baz: "qux" },
},
Document {
pageContent: "lorem ipsum dolor sit amet",
metadata: { baz: "qux" },
}
]
*/
/* Usage as part of a chain */
const model = new ChatOpenAI({ model: "gpt-3.5-turbo-1106" });
const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([
[
"system",
"Answer the user's questions based on the below context:\n\n{context}",
],
["human", "{input}"],
]);
const combineDocsChain = await createStuffDocumentsChain({
llm: model,
prompt: questionAnsweringPrompt,
});
const chain = await createRetrievalChain({
retriever: vectorStore.asRetriever(),
combineDocsChain,
});
const chainRes = await chain.invoke({ input: "What did the fox do?" });
console.log(chainRes);
/*
{
input: 'What did the fox do?',
chat_history: [],
context: [
Document {
pageContent: 'the quick brown fox jumped over the lazy dog',
metadata: [Object]
},
Document {
pageContent: 'lorem ipsum dolor sit amet',
metadata: [Object]
},
Document {
pageContent: 'consectetur adipiscing elit',
metadata: [Object]
},
Document { pageContent: 'redis is fast', metadata: [Object] }
],
answer: 'The fox jumped over the lazy dog.'
}
*/
await client.disconnect();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/redis/redis_index_options.ts | import { createClient } from "redis";
import { OpenAIEmbeddings } from "@langchain/openai";
import { RedisVectorStore } from "@langchain/redis";
import { Document } from "@langchain/core/documents";
const client = createClient({
url: process.env.REDIS_URL ?? "redis://localhost:6379",
});
await client.connect();
const docs = [
new Document({
metadata: { foo: "bar" },
pageContent: "redis is fast",
}),
new Document({
metadata: { foo: "bar" },
pageContent: "the quick brown fox jumped over the lazy dog",
}),
new Document({
metadata: { baz: "qux" },
pageContent: "lorem ipsum dolor sit amet",
}),
new Document({
metadata: { baz: "qux" },
pageContent: "consectetur adipiscing elit",
}),
];
const vectorStore = await RedisVectorStore.fromDocuments(
docs,
new OpenAIEmbeddings(),
{
redisClient: client,
indexName: "docs",
createIndexOptions: {
TEMPORARY: 1000,
},
}
);
await client.disconnect();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/redis/redis.ts | import { createClient } from "redis";
import { OpenAIEmbeddings } from "@langchain/openai";
import { RedisVectorStore } from "@langchain/redis";
import { Document } from "@langchain/core/documents";
const client = createClient({
url: process.env.REDIS_URL ?? "redis://localhost:6379",
});
await client.connect();
const docs = [
new Document({
metadata: { foo: "bar" },
pageContent: "redis is fast",
}),
new Document({
metadata: { foo: "bar" },
pageContent: "the quick brown fox jumped over the lazy dog",
}),
new Document({
metadata: { baz: "qux" },
pageContent: "lorem ipsum dolor sit amet",
}),
new Document({
metadata: { baz: "qux" },
pageContent: "consectetur adipiscing elit",
}),
];
const vectorStore = await RedisVectorStore.fromDocuments(
docs,
new OpenAIEmbeddings(),
{
redisClient: client,
indexName: "docs",
}
);
await client.disconnect();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/azure_cosmosdb_nosql/azure_cosmosdb_nosql.ts | import { AzureCosmosDBNoSQLVectorStore } from "@langchain/azure-cosmosdb";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
import { createRetrievalChain } from "langchain/chains/retrieval";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
// Load documents from file
const loader = new TextLoader("./state_of_the_union.txt");
const rawDocuments = await loader.load();
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 1000,
chunkOverlap: 0,
});
const documents = await splitter.splitDocuments(rawDocuments);
// Create Azure Cosmos DB vector store
const store = await AzureCosmosDBNoSQLVectorStore.fromDocuments(
documents,
new OpenAIEmbeddings(),
{
databaseName: "langchain",
containerName: "documents",
}
);
// Performs a similarity search
const resultDocuments = await store.similaritySearch(
"What did the president say about Ketanji Brown Jackson?"
);
console.log("Similarity search results:");
console.log(resultDocuments[0].pageContent);
/*
Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections.
Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.
One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.
And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.
*/
// Use the store as part of a chain
const model = new ChatOpenAI({ model: "gpt-3.5-turbo-1106" });
const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([
[
"system",
"Answer the user's questions based on the below context:\n\n{context}",
],
["human", "{input}"],
]);
const combineDocsChain = await createStuffDocumentsChain({
llm: model,
prompt: questionAnsweringPrompt,
});
const chain = await createRetrievalChain({
retriever: store.asRetriever(),
combineDocsChain,
});
const res = await chain.invoke({
input: "What is the president's top priority regarding prices?",
});
console.log("Chain response:");
console.log(res.answer);
/*
The president's top priority is getting prices under control.
*/
// Clean up
await store.delete();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/azure_cosmosdb_nosql/azure_cosmosdb_nosql-managed_identity.ts | import { AzureCosmosDBNoSQLVectorStore } from "@langchain/azure-cosmosdb";
import { OpenAIEmbeddings } from "@langchain/openai";
// Create Azure Cosmos DB vector store
const store = new AzureCosmosDBNoSQLVectorStore(new OpenAIEmbeddings(), {
// Or use environment variable AZURE_COSMOSDB_NOSQL_ENDPOINT
endpoint: "https://my-cosmosdb.documents.azure.com:443/",
// Database and container must already exist
databaseName: "my-database",
containerName: "my-container",
});
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/azure_cosmosdb_nosql/.env.example | # Use connection string to authenticate
AZURE_COSMOSDB_NOSQL_CONNECTION_STRING=
# Use managed identity to authenticate
AZURE_COSMOSDB_NOSQL_ENDPOINT=
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/cloudflare_vectorize/example.ts | // @ts-nocheck
import type {
VectorizeIndex,
Fetcher,
Request,
} from "@cloudflare/workers-types";
import {
CloudflareVectorizeStore,
CloudflareWorkersAIEmbeddings,
} from "@langchain/cloudflare";
export interface Env {
VECTORIZE_INDEX: VectorizeIndex;
AI: Fetcher;
}
export default {
async fetch(request: Request, env: Env) {
const { pathname } = new URL(request.url);
const embeddings = new CloudflareWorkersAIEmbeddings({
binding: env.AI,
model: "@cf/baai/bge-small-en-v1.5",
});
const store = new CloudflareVectorizeStore(embeddings, {
index: env.VECTORIZE_INDEX,
});
if (pathname === "/") {
const results = await store.similaritySearch("hello", 5);
return Response.json(results);
} else if (pathname === "/load") {
// Upsertion by id is supported
await store.addDocuments(
[
{
pageContent: "hello",
metadata: {},
},
{
pageContent: "world",
metadata: {},
},
{
pageContent: "hi",
metadata: {},
},
],
{ ids: ["id1", "id2", "id3"] }
);
return Response.json({ success: true });
} else if (pathname === "/clear") {
await store.delete({ ids: ["id1", "id2", "id3"] });
return Response.json({ success: true });
}
return Response.json({ error: "Not Found" }, { status: 404 });
},
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/hana_vector/advancedFiltering.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import hanaClient from "hdb";
import { Document } from "@langchain/core/documents";
import {
HanaDB,
HanaDBArgs,
} from "@langchain/community/vectorstores/hanavector";
const connectionParams = {
host: process.env.HANA_HOST,
port: process.env.HANA_PORT,
user: process.env.HANA_UID,
password: process.env.HANA_PWD,
};
const client = hanaClient.createClient(connectionParams);
// Connect to SAP HANA
await new Promise<void>((resolve, reject) => {
client.connect((err: Error) => {
if (err) {
reject(err);
} else {
console.log("Connected to SAP HANA successfully.");
resolve();
}
});
});
const docs: Document[] = [
{
pageContent: "First",
metadata: { name: "adam", is_active: true, id: 1, height: 10.0 },
},
{
pageContent: "Second",
metadata: { name: "bob", is_active: false, id: 2, height: 5.7 },
},
{
pageContent: "Third",
metadata: { name: "jane", is_active: true, id: 3, height: 2.4 },
},
];
// Initialize embeddings
const embeddings = new OpenAIEmbeddings();
const args: HanaDBArgs = {
connection: client,
tableName: "testAdvancedFilters",
};
// Create a LangChain VectorStore interface for the HANA database and specify the table (collection) to use in args.
const vectorStore = new HanaDB(embeddings, args);
// need to initialize once an instance is created.
await vectorStore.initialize();
// Delete already existing documents from the table
await vectorStore.delete({ filter: {} });
await vectorStore.addDocuments(docs);
// Helper function to print filter results
function printFilterResult(result: Document[]) {
if (result.length === 0) {
console.log("<empty result>");
} else {
result.forEach((doc) => console.log(doc.metadata));
}
}
let advancedFilter;
// Not equal
advancedFilter = { id: { $ne: 1 } };
console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
printFilterResult(
await vectorStore.similaritySearch("just testing", 5, advancedFilter)
);
/* Filter: {"id":{"$ne":1}}
{ name: 'bob', is_active: false, id: 2, height: 5.7 }
{ name: 'jane', is_active: true, id: 3, height: 2.4 }
*/
// Between range
advancedFilter = { id: { $between: [1, 2] } };
console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
printFilterResult(
await vectorStore.similaritySearch("just testing", 5, advancedFilter)
);
/* Filter: {"id":{"$between":[1,2]}}
{ name: 'adam', is_active: true, id: 1, height: 10 }
{ name: 'bob', is_active: false, id: 2, height: 5.7 } */
// In list
advancedFilter = { name: { $in: ["adam", "bob"] } };
console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
printFilterResult(
await vectorStore.similaritySearch("just testing", 5, advancedFilter)
);
/* Filter: {"name":{"$in":["adam","bob"]}}
{ name: 'adam', is_active: true, id: 1, height: 10 }
{ name: 'bob', is_active: false, id: 2, height: 5.7 } */
// Not in list
advancedFilter = { name: { $nin: ["adam", "bob"] } };
console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
printFilterResult(
await vectorStore.similaritySearch("just testing", 5, advancedFilter)
);
/* Filter: {"name":{"$nin":["adam","bob"]}}
{ name: 'jane', is_active: true, id: 3, height: 2.4 } */
// Greater than
advancedFilter = { id: { $gt: 1 } };
console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
printFilterResult(
await vectorStore.similaritySearch("just testing", 5, advancedFilter)
);
/* Filter: {"id":{"$gt":1}}
{ name: 'bob', is_active: false, id: 2, height: 5.7 }
{ name: 'jane', is_active: true, id: 3, height: 2.4 } */
// Greater than or equal to
advancedFilter = { id: { $gte: 1 } };
console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
printFilterResult(
await vectorStore.similaritySearch("just testing", 5, advancedFilter)
);
/* Filter: {"id":{"$gte":1}}
{ name: 'adam', is_active: true, id: 1, height: 10 }
{ name: 'bob', is_active: false, id: 2, height: 5.7 }
{ name: 'jane', is_active: true, id: 3, height: 2.4 } */
// Less than
advancedFilter = { id: { $lt: 1 } };
console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
printFilterResult(
await vectorStore.similaritySearch("just testing", 5, advancedFilter)
);
/* Filter: {"id":{"$lt":1}}
<empty result> */
// Less than or equal to
advancedFilter = { id: { $lte: 1 } };
console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
printFilterResult(
await vectorStore.similaritySearch("just testing", 5, advancedFilter)
);
/* Filter: {"id":{"$lte":1}}
{ name: 'adam', is_active: true, id: 1, height: 10 } */
// Text filtering with $like
advancedFilter = { name: { $like: "a%" } };
console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
printFilterResult(
await vectorStore.similaritySearch("just testing", 5, advancedFilter)
);
/* Filter: {"name":{"$like":"a%"}}
{ name: 'adam', is_active: true, id: 1, height: 10 } */
advancedFilter = { name: { $like: "%a%" } };
console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
printFilterResult(
await vectorStore.similaritySearch("just testing", 5, advancedFilter)
);
/* Filter: {"name":{"$like":"%a%"}}
{ name: 'adam', is_active: true, id: 1, height: 10 }
{ name: 'jane', is_active: true, id: 3, height: 2.4 } */
// Combined filtering with $or
advancedFilter = { $or: [{ id: 1 }, { name: "bob" }] };
console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
printFilterResult(
await vectorStore.similaritySearch("just testing", 5, advancedFilter)
);
/* Filter: {"$or":[{"id":1},{"name":"bob"}]}
{ name: 'adam', is_active: true, id: 1, height: 10 }
{ name: 'bob', is_active: false, id: 2, height: 5.7 } */
// Combined filtering with $and
advancedFilter = { $and: [{ id: 1 }, { id: 2 }] };
console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
printFilterResult(
await vectorStore.similaritySearch("just testing", 5, advancedFilter)
);
/* Filter: {"$and":[{"id":1},{"id":2}]}
<empty result> */
advancedFilter = { $or: [{ id: 1 }, { id: 2 }, { id: 3 }] };
console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
printFilterResult(
await vectorStore.similaritySearch("just testing", 5, advancedFilter)
);
/* Filter: {"$or":[{"id":1},{"id":2},{"id":3}]}
{ name: 'adam', is_active: true, id: 1, height: 10 }
{ name: 'bob', is_active: false, id: 2, height: 5.7 }
{ name: 'jane', is_active: true, id: 3, height: 2.4 } */
// You can also define a nested filter with $and and $or.
advancedFilter = {
$and: [{ $or: [{ id: 1 }, { id: 2 }] }, { height: { $gte: 5.0 } }],
};
console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
printFilterResult(
await vectorStore.similaritySearch("just testing", 5, advancedFilter)
);
/* Filter: {"$and":[{"$or":[{"id":1},{"id":2}]},{"height":{"$gte":5.0}}]}
{ name: 'adam', is_active: true, id: 1, height: 10 }
{ name: 'bob', is_active: false, id: 2, height: 5.7 } */
// Disconnect from SAP HANA aft er the operations
client.disconnect();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/hana_vector/fromDocs.ts | import hanaClient from "hdb";
import {
HanaDB,
HanaDBArgs,
} from "@langchain/community/vectorstores/hanavector";
import { OpenAIEmbeddings } from "@langchain/openai";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { CharacterTextSplitter } from "@langchain/textsplitters";
const connectionParams = {
host: process.env.HANA_HOST,
port: process.env.HANA_PORT,
user: process.env.HANA_UID,
password: process.env.HANA_PWD,
// useCesu8 : false
};
const client = hanaClient.createClient(connectionParams);
// connet to hanaDB
await new Promise<void>((resolve, reject) => {
client.connect((err: Error) => {
// Use arrow function here
if (err) {
reject(err);
} else {
console.log("Connected to SAP HANA successfully.");
resolve();
}
});
});
const embeddings = new OpenAIEmbeddings();
const args: HanaDBArgs = {
connection: client,
tableName: "test_fromDocs",
};
// Load documents from file
const loader = new TextLoader("./state_of_the_union.txt");
const rawDocuments = await loader.load();
const splitter = new CharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 0,
});
const documents = await splitter.splitDocuments(rawDocuments);
// Create a LangChain VectorStore interface for the HANA database and specify the table (collection) to use in args.
const vectorStore = new HanaDB(embeddings, args);
await vectorStore.initialize();
// Delete already existing documents from the table
await vectorStore.delete({ filter: {} });
// add the loaded document chunks
await vectorStore.addDocuments(documents);
// similarity search (default:“Cosine Similarity”, options:["euclidean", "cosine"])
const query = "What did the president say about Ketanji Brown Jackson";
const docs = await vectorStore.similaritySearch(query, 2);
docs.forEach((doc) => {
console.log("-".repeat(80));
console.log(doc.pageContent);
});
/*
--------------------------------------------------------------------------------
One of the most serious constitutional responsibilities a President has is nominating
someone to serve on the United States Supreme Court.
And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson.
One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.
--------------------------------------------------------------------------------
As I said last year, especially to our younger transgender Americans, I will always have your back as your President,
so you can be yourself and reach your God-given potential.
While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year.
From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice
*/
// similiarity search using euclidean distance method
const argsL2d: HanaDBArgs = {
connection: client,
tableName: "test_fromDocs",
distanceStrategy: "euclidean",
};
const vectorStoreL2d = new HanaDB(embeddings, argsL2d);
const docsL2d = await vectorStoreL2d.similaritySearch(query, 2);
docsL2d.forEach((docsL2d) => {
console.log("-".repeat(80));
console.log(docsL2d.pageContent);
});
// Output should be the same as the cosine similarity search method.
// Maximal Marginal Relevance Search (MMR)
const docsMMR = await vectorStore.maxMarginalRelevanceSearch(query, {
k: 2,
fetchK: 20,
});
docsMMR.forEach((docsMMR) => {
console.log("-".repeat(80));
console.log(docsMMR.pageContent);
});
/*
--------------------------------------------------------------------------------
One of the most serious constitutional responsibilities a President has is nominating someone
to serve on the United States Supreme Court.
And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson.
One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.
--------------------------------------------------------------------------------
Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned
soldiers defending their homeland.
In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.”
The Ukrainian Ambassador to the United States is here tonight.
Let each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world.
*/
client.disconnect();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/hana_vector/basics.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import hanaClient from "hdb";
// or import another node.js driver
// import hanaClient from "@sap/haha-client";
import { Document } from "@langchain/core/documents";
import {
HanaDB,
HanaDBArgs,
} from "@langchain/community/vectorstores/hanavector";
const connectionParams = {
host: process.env.HANA_HOST,
port: process.env.HANA_PORT,
user: process.env.HANA_UID,
password: process.env.HANA_PWD,
// useCesu8 : false
};
const client = hanaClient.createClient(connectionParams);
// connet to hanaDB
await new Promise<void>((resolve, reject) => {
client.connect((err: Error) => {
// Use arrow function here
if (err) {
reject(err);
} else {
console.log("Connected to SAP HANA successfully.");
resolve();
}
});
});
const embeddings = new OpenAIEmbeddings();
// define instance args
const args: HanaDBArgs = {
connection: client,
tableName: "testBasics",
};
// Add documents with metadata.
const docs: Document[] = [
{
pageContent: "foo",
metadata: { start: 100, end: 150, docName: "foo.txt", quality: "bad" },
},
{
pageContent: "bar",
metadata: { start: 200, end: 250, docName: "bar.txt", quality: "good" },
},
];
// Create a LangChain VectorStore interface for the HANA database and specify the table (collection) to use in args.
const vectorStore = new HanaDB(embeddings, args);
// need to initialize once an instance is created.
await vectorStore.initialize();
// Delete already existing documents from the table
await vectorStore.delete({ filter: {} });
await vectorStore.addDocuments(docs);
// Query documents with specific metadata.
const filterMeta = { quality: "bad" };
const query = "foobar";
// With filtering on {"quality": "bad"}, only one document should be returned
const results = await vectorStore.similaritySearch(query, 1, filterMeta);
console.log(results);
/*
[ {
pageContent: "foo",
metadata: { start: 100, end: 150, docName: "foo.txt", quality: "bad" }
}
]
*/
// Delete documents with specific metadata.
await vectorStore.delete({ filter: filterMeta });
// Now the similarity search with the same filter will return no results
const resultsAfterFilter = await vectorStore.similaritySearch(
query,
1,
filterMeta
);
console.log(resultsAfterFilter);
/*
[]
*/
client.disconnect();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/hana_vector/hdb.d.ts | declare module "hdb";
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/hana_vector/chains.ts | import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
import { createRetrievalChain } from "langchain/chains/retrieval";
import hanaClient from "hdb";
import {
HanaDB,
HanaDBArgs,
} from "@langchain/community/vectorstores/hanavector";
// Connection parameters
const connectionParams = {
host: process.env.HANA_HOST,
port: process.env.HANA_PORT,
user: process.env.HANA_UID,
password: process.env.HANA_PWD,
// useCesu8 : false
};
const client = hanaClient.createClient(connectionParams);
// connet to hanaDB
await new Promise<void>((resolve, reject) => {
client.connect((err: Error) => {
// Use arrow function here
if (err) {
reject(err);
} else {
console.log("Connected to SAP HANA successfully.");
resolve();
}
});
});
const embeddings = new OpenAIEmbeddings();
const args: HanaDBArgs = {
connection: client,
tableName: "test_fromDocs",
};
const vectorStore = new HanaDB(embeddings, args);
await vectorStore.initialize();
// Use the store as part of a chain, under the premise that "test_fromDocs" exists and contains the chunked docs.
const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo-1106" });
const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are an expert in state of the union topics. You are provided multiple context items that are related to the prompt you have to answer. Use the following pieces of context to answer the question at the end.\n\n{context}",
],
["human", "{input}"],
]);
const combineDocsChain = await createStuffDocumentsChain({
llm: model,
prompt: questionAnsweringPrompt,
});
const chain = await createRetrievalChain({
retriever: vectorStore.asRetriever(),
combineDocsChain,
});
// Ask the first question (and verify how many text chunks have been used).
const response = await chain.invoke({
input: "What about Mexico and Guatemala?",
});
console.log("Chain response:");
console.log(response.answer);
console.log(
`Number of used source document chunks: ${response.context.length}`
);
/*
The United States has set up joint patrols with Mexico and Guatemala to catch more human traffickers.
Number of used source document chunks: 4
*/
const responseOther = await chain.invoke({
input: "What about other countries?",
});
console.log("Chain response:");
console.log(responseOther.answer);
/* Ask another question on the same conversational chain. The answer should relate to the previous answer given.
....including members of NATO, the European Union, and other allies such as Canada....
*/
client.disconnect();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/hana_vector/fromTexts.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import hanaClient from "hdb";
import {
HanaDB,
HanaDBArgs,
} from "@langchain/community/vectorstores/hanavector";
const connectionParams = {
host: process.env.HANA_HOST,
port: process.env.HANA_PORT,
user: process.env.HANA_UID,
password: process.env.HANA_PWD,
// useCesu8 : false
};
const client = hanaClient.createClient(connectionParams);
// connet to hanaDB
await new Promise<void>((resolve, reject) => {
client.connect((err: Error) => {
// Use arrow function here
if (err) {
reject(err);
} else {
console.log("Connected to SAP HANA successfully.");
resolve();
}
});
});
const embeddings = new OpenAIEmbeddings();
const args: HanaDBArgs = {
connection: client,
tableName: "test_fromTexts",
};
// This function will create a table "test_fromTexts" if not exist, if exists,
// then the value will be appended to the table.
const vectorStore = await HanaDB.fromTexts(
["Bye bye", "Hello world", "hello nice world"],
[
{ id: 2, name: "2" },
{ id: 1, name: "1" },
{ id: 3, name: "3" },
],
embeddings,
args
);
const response = await vectorStore.similaritySearch("hello world", 2);
console.log(response);
/* This result is based on no table "test_fromTexts" existing in the database.
[
{ pageContent: 'Hello world', metadata: { id: 1, name: '1' } },
{ pageContent: 'hello nice world', metadata: { id: 3, name: '3' } }
]
*/
client.disconnect();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/hana_vector/createHnswIndex.ts | import hanaClient from "hdb";
import {
HanaDB,
HanaDBArgs,
} from "@langchain/community/vectorstores/hanavector";
import { OpenAIEmbeddings } from "@langchain/openai";
// table "test_fromDocs" is already created with the previous example.
// Now, we will use this existing table to create indexes and perform similarity search.
const connectionParams = {
host: process.env.HANA_HOST,
port: process.env.HANA_PORT,
user: process.env.HANA_UID,
password: process.env.HANA_PWD,
};
const client = hanaClient.createClient(connectionParams);
// Connect to SAP HANA
await new Promise<void>((resolve, reject) => {
client.connect((err: Error) => {
if (err) {
reject(err);
} else {
console.log("Connected to SAP HANA successfully.");
resolve();
}
});
});
// Initialize embeddings
const embeddings = new OpenAIEmbeddings();
// First instance using the existing table "test_fromDocs" (default: Cosine similarity)
const argsCosine: HanaDBArgs = {
connection: client,
tableName: "test_fromDocs",
};
// Second instance using the existing table "test_fromDocs" but with L2 Euclidean distance
const argsL2: HanaDBArgs = {
connection: client,
tableName: "test_fromDocs",
distanceStrategy: "euclidean", // Use Euclidean distance for this instance
};
// Initialize both HanaDB instances
const vectorStoreCosine = new HanaDB(embeddings, argsCosine);
const vectorStoreL2 = new HanaDB(embeddings, argsL2);
// Create HNSW index with Cosine similarity (default)
await vectorStoreCosine.createHnswIndex({
indexName: "hnsw_cosine_index",
efSearch: 400,
m: 50,
efConstruction: 150,
});
// Create HNSW index with Euclidean (L2) distance
await vectorStoreL2.createHnswIndex({
indexName: "hnsw_l2_index",
efSearch: 400,
m: 50,
efConstruction: 150,
});
// Query text for similarity search
const query = "What did the president say about Ketanji Brown Jackson";
// Perform similarity search using the default Cosine index
const docsCosine = await vectorStoreCosine.similaritySearch(query, 2);
console.log("Cosine Similarity Results:");
docsCosine.forEach((doc) => {
console.log("-".repeat(80));
console.log(doc.pageContent);
});
/*
Cosine Similarity Results:
----------------------------------------------------------------------
One of the most serious constitutional ...
And I did that 4 days ago, when I ...
----------------------------------------------------------------------
As I said last year, especially ...
While it often appears that we never agree, that isn’t true...
*/
// Perform similarity search using Euclidean distance (L2 index)
const docsL2 = await vectorStoreL2.similaritySearch(query, 2);
console.log("Euclidean (L2) Distance Results:");
docsL2.forEach((doc) => {
console.log("-".repeat(80));
console.log(doc.pageContent);
});
// The L2 distance results should be the same as cosine search results.
// Disconnect from SAP HANA after the operations
client.disconnect();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/hana_vector/.env.example | OPENAI_API_KEY = "Your OpenAI API key"
HANA_HOST = "HANA_DB_ADDRESS"
HANA_PORT = "HANA_DB_PORT"
HANA_UID = "HANA_DB_USER"
HANA_PWD = "HANA_DB_PASSWORD" |
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/azure_aisearch/azure_aisearch.ts | import {
AzureAISearchVectorStore,
AzureAISearchQueryType,
} from "@langchain/community/vectorstores/azure_aisearch";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
import { createRetrievalChain } from "langchain/chains/retrieval";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
// Load documents from file
const loader = new TextLoader("./state_of_the_union.txt");
const rawDocuments = await loader.load();
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 1000,
chunkOverlap: 0,
});
const documents = await splitter.splitDocuments(rawDocuments);
// Create Azure AI Search vector store
const store = await AzureAISearchVectorStore.fromDocuments(
documents,
new OpenAIEmbeddings(),
{
search: {
type: AzureAISearchQueryType.SimilarityHybrid,
},
}
);
// The first time you run this, the index will be created.
// You may need to wait a bit for the index to be created before you can perform
// a search, or you can create the index manually beforehand.
// Performs a similarity search
const resultDocuments = await store.similaritySearch(
"What did the president say about Ketanji Brown Jackson?"
);
console.log("Similarity search results:");
console.log(resultDocuments[0].pageContent);
/*
Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections.
Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.
One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.
And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.
*/
// Use the store as part of a chain
const model = new ChatOpenAI({ model: "gpt-3.5-turbo-1106" });
const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([
[
"system",
"Answer the user's questions based on the below context:\n\n{context}",
],
["human", "{input}"],
]);
const combineDocsChain = await createStuffDocumentsChain({
llm: model,
prompt: questionAnsweringPrompt,
});
const chain = await createRetrievalChain({
retriever: store.asRetriever(),
combineDocsChain,
});
const response = await chain.invoke({
input: "What is the president's top priority regarding prices?",
});
console.log("Chain response:");
console.log(response.answer);
/*
The president's top priority is getting prices under control.
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/azure_aisearch/.env.example | # Azure AI Search connection settings
AZURE_AISEARCH_ENDPOINT=
AZURE_AISEARCH_KEY=
# If you're using Azure OpenAI API, you'll need to set these variables
AZURE_OPENAI_API_KEY=
AZURE_OPENAI_API_INSTANCE_NAME=
AZURE_OPENAI_API_DEPLOYMENT_NAME=
AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME=
AZURE_OPENAI_API_VERSION=
# Or you can use the OpenAI API directly
OPENAI_API_KEY=
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/lancedb/fromDocs.ts | import { LanceDB } from "@langchain/community/vectorstores/lancedb";
import { OpenAIEmbeddings } from "@langchain/openai";
import { TextLoader } from "langchain/document_loaders/fs/text";
import fs from "node:fs/promises";
import path from "node:path";
import os from "node:os";
// Create docs with a loader
const loader = new TextLoader("src/document_loaders/example_data/example.txt");
const docs = await loader.load();
export const run = async () => {
const vectorStore = await LanceDB.fromDocuments(docs, new OpenAIEmbeddings());
const resultOne = await vectorStore.similaritySearch("hello world", 1);
console.log(resultOne);
// [
// Document {
// pageContent: 'Foo\nBar\nBaz\n\n',
// metadata: { source: 'src/document_loaders/example_data/example.txt' }
// }
// ]
};
export const run_with_existing_table = async () => {
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "lancedb-"));
const vectorStore = await LanceDB.fromDocuments(docs, new OpenAIEmbeddings());
const resultOne = await vectorStore.similaritySearch("hello world", 1);
console.log(resultOne);
// [
// Document {
// pageContent: 'Foo\nBar\nBaz\n\n',
// metadata: { source: 'src/document_loaders/example_data/example.txt' }
// }
// ]
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/lancedb/fromTexts.ts | import { LanceDB } from "@langchain/community/vectorstores/lancedb";
import { OpenAIEmbeddings } from "@langchain/openai";
import * as fs from "node:fs/promises";
import * as path from "node:path";
import os from "node:os";
export const run = async () => {
const vectorStore = await LanceDB.fromTexts(
["Hello world", "Bye bye", "hello nice world"],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings()
);
const resultOne = await vectorStore.similaritySearch("hello world", 1);
console.log(resultOne);
// [ Document { pageContent: 'hello nice world', metadata: { id: 3 } } ]
};
export const run_with_existing_table = async () => {
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "lancedb-"));
const vectorStore = await LanceDB.fromTexts(
["Hello world", "Bye bye", "hello nice world"],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings()
);
const resultOne = await vectorStore.similaritySearch("hello world", 1);
console.log(resultOne);
// [ Document { pageContent: 'hello nice world', metadata: { id: 3 } } ]
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/lancedb/load.ts | import { LanceDB } from "@langchain/community/vectorstores/lancedb";
import { OpenAIEmbeddings } from "@langchain/openai";
import { connect } from "@lancedb/lancedb";
import * as fs from "node:fs/promises";
import * as path from "node:path";
import os from "node:os";
//
// You can open a LanceDB dataset created elsewhere, such as LangChain Python, by opening
// an existing table
//
export const run = async () => {
const uri = await createdTestDb();
const db = await connect(uri);
const table = await db.openTable("vectors");
const vectorStore = new LanceDB(new OpenAIEmbeddings(), { table });
const resultOne = await vectorStore.similaritySearch("hello world", 1);
console.log(resultOne);
// [ Document { pageContent: 'Hello world', metadata: { id: 1 } } ]
};
async function createdTestDb(): Promise<string> {
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "lancedb-"));
const db = await connect(dir);
await db.createTable("vectors", [
{ vector: Array(1536), text: "Hello world", id: 1 },
{ vector: Array(1536), text: "Bye bye", id: 2 },
{ vector: Array(1536), text: "hello nice world", id: 3 },
]);
return dir;
}
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/typeorm_vectorstore/docker-compose.example.yml | services:
db:
image: ankane/pgvector
ports:
- 5432:5432
volumes:
- ./data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=ChangeMe
- POSTGRES_USER=myuser
- POSTGRES_DB=api
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/typeorm_vectorstore/typeorm.ts | import { DataSourceOptions } from "typeorm";
import { OpenAIEmbeddings } from "@langchain/openai";
import { TypeORMVectorStore } from "@langchain/community/vectorstores/typeorm";
// First, follow set-up instructions at
// https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/typeorm
export const run = async () => {
const args = {
postgresConnectionOptions: {
type: "postgres",
host: "localhost",
port: 5432,
username: "myuser",
password: "ChangeMe",
database: "api",
} as DataSourceOptions,
};
const typeormVectorStore = await TypeORMVectorStore.fromDataSource(
new OpenAIEmbeddings(),
args
);
await typeormVectorStore.ensureTableInDatabase();
await typeormVectorStore.addDocuments([
{ pageContent: "what's this", metadata: { a: 2 } },
{ pageContent: "Cat drinks milk", metadata: { a: 1 } },
]);
const results = await typeormVectorStore.similaritySearch("hello", 2);
console.log(results);
};
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/vercel_postgres/example.ts | import { CohereEmbeddings } from "@langchain/cohere";
import { VercelPostgres } from "@langchain/community/vectorstores/vercel_postgres";
// Config is only required if you want to override default values.
const config = {
// tableName: "testvercelvectorstorelangchain",
// postgresConnectionOptions: {
// connectionString: "postgres://<username>:<password>@<hostname>:<port>/<dbname>",
// },
// columns: {
// idColumnName: "id",
// vectorColumnName: "vector",
// contentColumnName: "content",
// metadataColumnName: "metadata",
// },
};
const vercelPostgresStore = await VercelPostgres.initialize(
new CohereEmbeddings({ model: "embed-english-v3.0" }),
config
);
const docHello = {
pageContent: "hello",
metadata: { topic: "nonsense" },
};
const docHi = { pageContent: "hi", metadata: { topic: "nonsense" } };
const docMitochondria = {
pageContent: "Mitochondria is the powerhouse of the cell",
metadata: { topic: "science" },
};
const ids = await vercelPostgresStore.addDocuments([
docHello,
docHi,
docMitochondria,
]);
const results = await vercelPostgresStore.similaritySearch("hello", 2);
console.log(results);
/*
[
Document { pageContent: 'hello', metadata: { topic: 'nonsense' } },
Document { pageContent: 'hi', metadata: { topic: 'nonsense' } }
]
*/
// Metadata filtering
const results2 = await vercelPostgresStore.similaritySearch(
"Irrelevant query, metadata filtering",
2,
{
topic: "science",
}
);
console.log(results2);
/*
[
Document {
pageContent: 'Mitochondria is the powerhouse of the cell',
metadata: { topic: 'science' }
}
]
*/
// Metadata filtering with IN-filters works as well
const results3 = await vercelPostgresStore.similaritySearch(
"Irrelevant query, metadata filtering",
3,
{
topic: { in: ["science", "nonsense"] },
}
);
console.log(results3);
/*
[
Document {
pageContent: 'hello',
metadata: { topic: 'nonsense' }
},
Document {
pageContent: 'hi',
metadata: { topic: 'nonsense' }
},
Document {
pageContent: 'Mitochondria is the powerhouse of the cell',
metadata: { topic: 'science' }
}
]
*/
// Upserting is supported as well
await vercelPostgresStore.addDocuments(
[
{
pageContent: "ATP is the powerhouse of the cell",
metadata: { topic: "science" },
},
],
{ ids: [ids[2]] }
);
const results4 = await vercelPostgresStore.similaritySearch(
"What is the powerhouse of the cell?",
1
);
console.log(results4);
/*
[
Document {
pageContent: 'ATP is the powerhouse of the cell',
metadata: { topic: 'science' }
}
]
*/
await vercelPostgresStore.delete({ ids: [ids[2]] });
const results5 = await vercelPostgresStore.similaritySearch(
"No more metadata",
2,
{
topic: "science",
}
);
console.log(results5);
/*
[]
*/
// Remember to call .end() to close the connection!
await vercelPostgresStore.end();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/couchbase/.env.example | # Couchbase connection params
COUCHBASE_DB_CONN_STR=
COUCHBASE_DB_USERNAME=
COUCHBASE_DB_PASSWORD=
# Couchbase vector store args
COUCHBASE_DB_BUCKET_NAME=
COUCHBASE_DB_SCOPE_NAME=
COUCHBASE_DB_COLLECTION_NAME=
COUCHBASE_DB_INDEX_NAME=
# Open AI Key for embeddings
OPENAI_API_KEY= |
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/azure_cosmosdb_mongodb/azure_cosmosdb_mongodb.ts | import {
AzureCosmosDBMongoDBVectorStore,
AzureCosmosDBMongoDBSimilarityType,
} from "@langchain/azure-cosmosdb";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
import { createRetrievalChain } from "langchain/chains/retrieval";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
// Load documents from file
const loader = new TextLoader("./state_of_the_union.txt");
const rawDocuments = await loader.load();
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 1000,
chunkOverlap: 0,
});
const documents = await splitter.splitDocuments(rawDocuments);
// Create Azure Cosmos DB for MongoDB vCore vector store
const store = await AzureCosmosDBMongoDBVectorStore.fromDocuments(
documents,
new OpenAIEmbeddings(),
{
databaseName: "langchain",
collectionName: "documents",
indexOptions: {
numLists: 100,
dimensions: 1536,
similarity: AzureCosmosDBMongoDBSimilarityType.COS,
},
}
);
// Performs a similarity search
const resultDocuments = await store.similaritySearch(
"What did the president say about Ketanji Brown Jackson?"
);
console.log("Similarity search results:");
console.log(resultDocuments[0].pageContent);
/*
Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections.
Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.
One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.
And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.
*/
// Use the store as part of a chain
const model = new ChatOpenAI({ model: "gpt-3.5-turbo-1106" });
const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([
[
"system",
"Answer the user's questions based on the below context:\n\n{context}",
],
["human", "{input}"],
]);
const combineDocsChain = await createStuffDocumentsChain({
llm: model,
prompt: questionAnsweringPrompt,
});
const chain = await createRetrievalChain({
retriever: store.asRetriever(),
combineDocsChain,
});
const res = await chain.invoke({
input: "What is the president's top priority regarding prices?",
});
console.log("Chain response:");
console.log(res.answer);
/*
The president's top priority is getting prices under control.
*/
// Clean up
await store.delete();
await store.close();
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/azure_cosmosdb_mongodb/.env.example | AZURE_COSMOSDB_MONGODB_CONNECTION_STRING=
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/chroma/fromDocs.ts | import { Chroma } from "@langchain/community/vectorstores/chroma";
import { OpenAIEmbeddings } from "@langchain/openai";
import { TextLoader } from "langchain/document_loaders/fs/text";
// Create docs with a loader
const loader = new TextLoader("src/document_loaders/example_data/example.txt");
const docs = await loader.load();
// Create vector store and index the docs
const vectorStore = await Chroma.fromDocuments(docs, new OpenAIEmbeddings(), {
collectionName: "a-test-collection",
url: "http://localhost:8000", // Optional, will default to this value
collectionMetadata: {
"hnsw:space": "cosine",
}, // Optional, can be used to specify the distance method of the embedding space https://docs.trychroma.com/guides#changing-the-distance-function
});
// Search for the most similar document
const response = await vectorStore.similaritySearch("hello", 1);
console.log(response);
/*
[
Document {
pageContent: 'Foo\nBar\nBaz\n\n',
metadata: { source: 'src/document_loaders/example_data/example.txt' }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/chroma/delete.ts | import { Chroma } from "@langchain/community/vectorstores/chroma";
import { OpenAIEmbeddings } from "@langchain/openai";
const embeddings = new OpenAIEmbeddings();
const vectorStore = new Chroma(embeddings, {
collectionName: "test-deletion",
});
const documents = [
{
pageContent: `Tortoise: Labyrinth? Labyrinth? Could it Are we in the notorious Little
Harmonic Labyrinth of the dreaded Majotaur?`,
metadata: {
speaker: "Tortoise",
},
},
{
pageContent: "Achilles: Yiikes! What is that?",
metadata: {
speaker: "Achilles",
},
},
{
pageContent: `Tortoise: They say-although I person never believed it myself-that an I
Majotaur has created a tiny labyrinth sits in a pit in the middle of
it, waiting innocent victims to get lost in its fears complexity.
Then, when they wander and dazed into the center, he laughs and
laughs at them-so hard, that he laughs them to death!`,
metadata: {
speaker: "Tortoise",
},
},
{
pageContent: "Achilles: Oh, no!",
metadata: {
speaker: "Achilles",
},
},
{
pageContent: "Tortoise: But it's only a myth. Courage, Achilles.",
metadata: {
speaker: "Tortoise",
},
},
];
// Also supports an additional {ids: []} parameter for upsertion
const ids = await vectorStore.addDocuments(documents);
const response = await vectorStore.similaritySearch("scared", 2);
console.log(response);
/*
[
Document { pageContent: 'Achilles: Oh, no!', metadata: {} },
Document {
pageContent: 'Achilles: Yiikes! What is that?',
metadata: { id: 1 }
}
]
*/
// You can also pass a "filter" parameter instead
await vectorStore.delete({ ids });
const response2 = await vectorStore.similaritySearch("scared", 2);
console.log(response2);
/*
[]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/chroma/fromTexts.ts | import { Chroma } from "@langchain/community/vectorstores/chroma";
import { OpenAIEmbeddings } from "@langchain/openai";
// text sample from Godel, Escher, Bach
const vectorStore = await Chroma.fromTexts(
[
`Tortoise: Labyrinth? Labyrinth? Could it Are we in the notorious Little
Harmonic Labyrinth of the dreaded Majotaur?`,
"Achilles: Yiikes! What is that?",
`Tortoise: They say-although I person never believed it myself-that an I
Majotaur has created a tiny labyrinth sits in a pit in the middle of
it, waiting innocent victims to get lost in its fears complexity.
Then, when they wander and dazed into the center, he laughs and
laughs at them-so hard, that he laughs them to death!`,
"Achilles: Oh, no!",
"Tortoise: But it's only a myth. Courage, Achilles.",
],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings(),
{
collectionName: "godel-escher-bach",
}
);
const response = await vectorStore.similaritySearch("scared", 2);
console.log(response);
/*
[
Document { pageContent: 'Achilles: Oh, no!', metadata: {} },
Document {
pageContent: 'Achilles: Yiikes! What is that?',
metadata: { id: 1 }
}
]
*/
// You can also filter by metadata
const filteredResponse = await vectorStore.similaritySearch("scared", 2, {
id: 1,
});
console.log(filteredResponse);
/*
[
Document {
pageContent: 'Achilles: Yiikes! What is that?',
metadata: { id: 1 }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/chroma/search.ts | import { Chroma } from "@langchain/community/vectorstores/chroma";
import { OpenAIEmbeddings } from "@langchain/openai";
const vectorStore = await Chroma.fromExistingCollection(
new OpenAIEmbeddings(),
{ collectionName: "godel-escher-bach" }
);
const response = await vectorStore.similaritySearch("scared", 2);
console.log(response);
/*
[
Document { pageContent: 'Achilles: Oh, no!', metadata: {} },
Document {
pageContent: 'Achilles: Yiikes! What is that?',
metadata: { id: 1 }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/qdrant/fromDocs.ts | import { QdrantVectorStore } from "@langchain/qdrant";
import { OpenAIEmbeddings } from "@langchain/openai";
import { TextLoader } from "langchain/document_loaders/fs/text";
// Create docs with a loader
const loader = new TextLoader("src/document_loaders/example_data/example.txt");
const docs = await loader.load();
const vectorStore = await QdrantVectorStore.fromDocuments(
docs,
new OpenAIEmbeddings(),
{
url: process.env.QDRANT_URL,
collectionName: "a_test_collection",
}
);
// Search for the most similar document
const response = await vectorStore.similaritySearch("hello", 1);
console.log(response);
/*
[
Document {
pageContent: 'Foo\nBar\nBaz\n\n',
metadata: { source: 'src/document_loaders/example_data/example.txt' }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/qdrant/fromTexts.ts | import { QdrantVectorStore } from "@langchain/qdrant";
import { OpenAIEmbeddings } from "@langchain/openai";
// text sample from Godel, Escher, Bach
const vectorStore = await QdrantVectorStore.fromTexts(
[
`Tortoise: Labyrinth? Labyrinth? Could it Are we in the notorious Little
Harmonic Labyrinth of the dreaded Majotaur?`,
`Achilles: Yiikes! What is that?`,
`Tortoise: They say-although I person never believed it myself-that an I
Majotaur has created a tiny labyrinth sits in a pit in the middle of
it, waiting innocent victims to get lost in its fears complexity.
Then, when they wander and dazed into the center, he laughs and
laughs at them-so hard, that he laughs them to death!`,
`Achilles: Oh, no!`,
`Tortoise: But it's only a myth. Courage, Achilles.`,
],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings(),
{
url: process.env.QDRANT_URL,
collectionName: "goldel_escher_bach",
}
);
const response = await vectorStore.similaritySearch("scared", 2);
console.log(response);
/*
[
Document { pageContent: 'Achilles: Oh, no!', metadata: {} },
Document {
pageContent: 'Achilles: Yiikes! What is that?',
metadata: { id: 1 }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/qdrant/fromExisting.ts | import { QdrantVectorStore } from "@langchain/qdrant";
import { OpenAIEmbeddings } from "@langchain/openai";
const vectorStore = await QdrantVectorStore.fromExistingCollection(
new OpenAIEmbeddings(),
{
url: process.env.QDRANT_URL,
collectionName: "goldel_escher_bach",
}
);
const response = await vectorStore.similaritySearch("scared", 2);
console.log(response);
/*
[
Document { pageContent: 'Achilles: Oh, no!', metadata: {} },
Document {
pageContent: 'Achilles: Yiikes! What is that?',
metadata: { id: 1 }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/neon/example.ts | import { OpenAIEmbeddings } from "@langchain/openai";
import { NeonPostgres } from "@langchain/community/vectorstores/neon";
// Initialize an embeddings instance
const embeddings = new OpenAIEmbeddings({
apiKey: process.env.OPENAI_API_KEY,
dimensions: 256,
model: "text-embedding-3-small",
});
// Initialize a NeonPostgres instance to store embedding vectors
const vectorStore = await NeonPostgres.initialize(embeddings, {
connectionString: process.env.DATABASE_URL as string,
});
// You can add documents to the store, strings in the `pageContent` field will be embedded
// and stored in the database
const documents = [
{ pageContent: "Hello world", metadata: { topic: "greeting" } },
{ pageContent: "Bye bye", metadata: { topic: "greeting" } },
{
pageContent: "Mitochondria is the powerhouse of the cell",
metadata: { topic: "science" },
},
];
const idsInserted = await vectorStore.addDocuments(documents);
// You can now query the store for similar documents to the input query
const resultOne = await vectorStore.similaritySearch("hola", 1);
console.log(resultOne);
/*
[
Document {
pageContent: 'Hello world',
metadata: { topic: 'greeting' }
}
]
*/
// You can also filter by metadata
const resultTwo = await vectorStore.similaritySearch("Irrelevant query", 2, {
topic: "science",
});
console.log(resultTwo);
/*
[
Document {
pageContent: 'Mitochondria is the powerhouse of the cell',
metadata: { topic: 'science' }
}
]
*/
// Metadata filtering with IN-filters works as well
const resultsThree = await vectorStore.similaritySearch("Irrelevant query", 2, {
topic: { in: ["greeting"] },
});
console.log(resultsThree);
/*
[
Document { pageContent: 'Bye bye', metadata: { topic: 'greeting' } },
Document {
pageContent: 'Hello world',
metadata: { topic: 'greeting' }
}
]
*/
// Upserting is supported as well
await vectorStore.addDocuments(
[
{
pageContent: "ATP is the powerhouse of the cell",
metadata: { topic: "science" },
},
],
{ ids: [idsInserted[2]] }
);
const resultsFour = await vectorStore.similaritySearch(
"powerhouse of the cell",
1
);
console.log(resultsFour);
/*
[
Document {
pageContent: 'ATP is the powerhouse of the cell',
metadata: { topic: 'science' }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/pinecone/mmr.ts | /* eslint-disable @typescript-eslint/no-non-null-assertion */
import { Pinecone } from "@pinecone-database/pinecone";
import { OpenAIEmbeddings } from "@langchain/openai";
import { PineconeStore } from "@langchain/pinecone";
// Instantiate a new Pinecone client, which will automatically read the
// env vars: PINECONE_API_KEY which comes from
// the Pinecone dashboard at https://app.pinecone.io
const pinecone = new Pinecone();
const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX!);
/**
* Pinecone allows you to partition the records in an index into namespaces.
* Queries and other operations are then limited to one namespace,
* so different requests can search different subsets of your index.
* Read more about namespaces here: https://docs.pinecone.io/guides/indexes/use-namespaces
*
* NOTE: If you have namespace enabled in your Pinecone index, you must provide the namespace when creating the PineconeStore.
*/
const namespace = "pinecone";
const vectorStore = await PineconeStore.fromExistingIndex(
new OpenAIEmbeddings(),
{ pineconeIndex, namespace }
);
/* Search the vector DB independently with meta filters */
const results = await vectorStore.maxMarginalRelevanceSearch("pinecone", {
k: 5,
fetchK: 20, // Default value for the number of initial documents to fetch for reranking.
// You can pass a filter as well
// filter: {},
});
console.log(results);
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/pinecone/index_docs.ts | /* eslint-disable @typescript-eslint/no-non-null-assertion */
import { Pinecone } from "@pinecone-database/pinecone";
import { Document } from "@langchain/core/documents";
import { OpenAIEmbeddings } from "@langchain/openai";
import { PineconeStore } from "@langchain/pinecone";
// import { Index } from "@upstash/vector";
// Instantiate a new Pinecone client, which will automatically read the
// env vars: PINECONE_API_KEY which comes from
// the Pinecone dashboard at https://app.pinecone.io
const pinecone = new Pinecone();
// If index already exists:
// const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX!);
// If index does not exist, create it:
await pinecone.createIndex({
name: process.env.PINECONE_INDEX!,
dimension: 1536,
metric: "cosine",
spec: {
serverless: {
cloud: "aws",
region: "us-east-1",
},
},
deletionProtection: "disabled", // Note: deletion protection disabled https://docs.pinecone.io/guides/indexes/prevent-index-deletion#disable-deletion-protection
});
const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX!);
const docs = [
new Document({
metadata: { foo: "bar" },
pageContent: "pinecone is a vector db",
}),
new Document({
metadata: { foo: "bar" },
pageContent: "the quick brown fox jumped over the lazy dog",
}),
new Document({
metadata: { baz: "qux" },
pageContent: "lorem ipsum dolor sit amet",
}),
new Document({
metadata: { baz: "qux" },
pageContent: "pinecones are the woody fruiting body and of a pine tree",
}),
];
await PineconeStore.fromDocuments(docs, new OpenAIEmbeddings(), {
pineconeIndex,
maxConcurrency: 5, // Maximum number of batch requests to allow at once. Each batch is 1000 vectors.
});
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/pinecone/query_docs.ts | /* eslint-disable @typescript-eslint/no-non-null-assertion */
import { Pinecone } from "@pinecone-database/pinecone";
import { OpenAIEmbeddings } from "@langchain/openai";
import { PineconeStore } from "@langchain/pinecone";
// Instantiate a new Pinecone client, which will automatically read the
// env vars: PINECONE_API_KEY which comes from
// the Pinecone dashboard at https://app.pinecone.io
const pinecone = new Pinecone();
const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX!);
/**
* Pinecone allows you to partition the records in an index into namespaces.
* Queries and other operations are then limited to one namespace,
* so different requests can search different subsets of your index.
* Read more about namespaces here: https://docs.pinecone.io/guides/indexes/use-namespaces
*
* NOTE: If you have namespace enabled in your Pinecone index, you must provide the namespace when creating the PineconeStore.
*/
const namespace = "pinecone";
const vectorStore = await PineconeStore.fromExistingIndex(
new OpenAIEmbeddings(),
{ pineconeIndex, namespace }
);
/* Search the vector DB independently with metadata filters */
const results = await vectorStore.similaritySearch("pinecone", 1, {
foo: "bar",
});
console.log(results);
/*
[
Document {
pageContent: 'pinecone is a vector db',
metadata: { foo: 'bar' }
}
]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes/vector_stores | lc_public_repos/langchainjs/examples/src/indexes/vector_stores/pinecone/delete_docs.ts | /* eslint-disable @typescript-eslint/no-non-null-assertion */
import { Pinecone } from "@pinecone-database/pinecone";
import { Document } from "@langchain/core/documents";
import { OpenAIEmbeddings } from "@langchain/openai";
import { PineconeStore } from "@langchain/pinecone";
// Instantiate a new Pinecone client, which will automatically read the
// env vars: PINECONE_API_KEY which comes from
// the Pinecone dashboard at https://app.pinecone.io
const pinecone = new Pinecone();
const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX!);
const embeddings = new OpenAIEmbeddings();
const pineconeStore = new PineconeStore(embeddings, { pineconeIndex });
const docs = [
new Document({
metadata: { foo: "bar" },
pageContent: "pinecone is a vector db",
}),
new Document({
metadata: { foo: "bar" },
pageContent: "the quick brown fox jumped over the lazy dog",
}),
new Document({
metadata: { baz: "qux" },
pageContent: "lorem ipsum dolor sit amet",
}),
new Document({
metadata: { baz: "qux" },
pageContent: "pinecones are the woody fruiting body and of a pine tree",
}),
];
const pageContent = "some arbitrary content";
// Also takes an additional {ids: []} parameter for upsertion
const ids = await pineconeStore.addDocuments(docs);
const results = await pineconeStore.similaritySearch(pageContent, 2, {
foo: "bar",
});
console.log(results);
/*
[
Document {
pageContent: 'pinecone is a vector db',
metadata: { foo: 'bar' },
},
Document {
pageContent: "the quick brown fox jumped over the lazy dog",
metadata: { foo: "bar" },
}
]
*/
await pineconeStore.delete({
ids: [ids[0], ids[1]],
});
const results2 = await pineconeStore.similaritySearch(pageContent, 2, {
foo: "bar",
});
console.log(results2);
/*
[]
*/
|
0 | lc_public_repos/langchainjs/examples/src/indexes | lc_public_repos/langchainjs/examples/src/indexes/indexing_api/indexing.ts | import { PostgresRecordManager } from "@langchain/community/indexes/postgres";
import { index } from "langchain/indexes";
import { PGVectorStore } from "@langchain/community/vectorstores/pgvector";
import { PoolConfig } from "pg";
import { OpenAIEmbeddings } from "@langchain/openai";
import { CharacterTextSplitter } from "@langchain/textsplitters";
import { BaseDocumentLoader } from "@langchain/core/document_loaders/base";
// First, follow set-up instructions at
// https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/pgvector
const config = {
postgresConnectionOptions: {
type: "postgres",
host: "127.0.0.1",
port: 5432,
user: "myuser",
password: "ChangeMe",
database: "api",
} as PoolConfig,
tableName: "testlangchain",
columns: {
idColumnName: "id",
vectorColumnName: "vector",
contentColumnName: "content",
metadataColumnName: "metadata",
},
};
const vectorStore = await PGVectorStore.initialize(
new OpenAIEmbeddings(),
config
);
// Create a new record manager
const recordManagerConfig = {
postgresConnectionOptions: {
type: "postgres",
host: "127.0.0.1",
port: 5432,
user: "myuser",
password: "ChangeMe",
database: "api",
} as PoolConfig,
tableName: "upsertion_records",
};
const recordManager = new PostgresRecordManager(
"test_namespace",
recordManagerConfig
);
// Create the schema if it doesn't exist
await recordManager.createSchema();
// Index some documents
const doc1 = {
pageContent: "kitty",
metadata: { source: "kitty.txt" },
};
const doc2 = {
pageContent: "doggy",
metadata: { source: "doggy.txt" },
};
/**
* Hacky helper method to clear content. See the `full` mode section to to understand why it works.
*/
async function clear() {
await index({
docsSource: [],
recordManager,
vectorStore,
options: {
cleanup: "full",
sourceIdKey: "source",
},
});
}
// No cleanup
await clear();
// This mode does not do automatic clean up of old versions of content; however, it still takes care of content de-duplication.
console.log(
await index({
docsSource: [doc1, doc1, doc1, doc1, doc1, doc1],
recordManager,
vectorStore,
options: {
cleanup: undefined,
sourceIdKey: "source",
},
})
);
/*
{
numAdded: 1,
numUpdated: 0,
numDeleted: 0,
numSkipped: 0,
}
*/
await clear();
console.log(
await index({
docsSource: [doc1, doc2],
recordManager,
vectorStore,
options: {
cleanup: undefined,
sourceIdKey: "source",
},
})
);
/*
{
numAdded: 2,
numUpdated: 0,
numDeleted: 0,
numSkipped: 0,
}
*/
// Second time around all content will be skipped
console.log(
await index({
docsSource: [doc1, doc2],
recordManager,
vectorStore,
options: {
cleanup: undefined,
sourceIdKey: "source",
},
})
);
/*
{
numAdded: 0,
numUpdated: 0,
numDeleted: 0,
numSkipped: 2,
}
*/
// Updated content will be added, but old won't be deleted
const doc1Updated = {
pageContent: "kitty updated",
metadata: { source: "kitty.txt" },
};
console.log(
await index({
docsSource: [doc1Updated, doc2],
recordManager,
vectorStore,
options: {
cleanup: undefined,
sourceIdKey: "source",
},
})
);
/*
{
numAdded: 1,
numUpdated: 0,
numDeleted: 0,
numSkipped: 1,
}
*/
/*
Resulting records in the database:
[
{
pageContent: "kitty",
metadata: { source: "kitty.txt" },
},
{
pageContent: "doggy",
metadata: { source: "doggy.txt" },
},
{
pageContent: "kitty updated",
metadata: { source: "kitty.txt" },
}
]
*/
// Incremental mode
await clear();
console.log(
await index({
docsSource: [doc1, doc2],
recordManager,
vectorStore,
options: {
cleanup: "incremental",
sourceIdKey: "source",
},
})
);
/*
{
numAdded: 2,
numUpdated: 0,
numDeleted: 0,
numSkipped: 0,
}
*/
// Indexing again should result in both documents getting skipped – also skipping the embedding operation!
console.log(
await index({
docsSource: [doc1, doc2],
recordManager,
vectorStore,
options: {
cleanup: "incremental",
sourceIdKey: "source",
},
})
);
/*
{
numAdded: 0,
numUpdated: 0,
numDeleted: 0,
numSkipped: 2,
}
*/
// If we provide no documents with incremental indexing mode, nothing will change.
console.log(
await index({
docsSource: [],
recordManager,
vectorStore,
options: {
cleanup: "incremental",
sourceIdKey: "source",
},
})
);
/*
{
numAdded: 0,
numUpdated: 0,
numDeleted: 0,
numSkipped: 0,
}
*/
// If we mutate a document, the new version will be written and all old versions sharing the same source will be deleted.
// This only affects the documents with the same source id!
const changedDoc1 = {
pageContent: "kitty updated",
metadata: { source: "kitty.txt" },
};
console.log(
await index({
docsSource: [changedDoc1],
recordManager,
vectorStore,
options: {
cleanup: "incremental",
sourceIdKey: "source",
},
})
);
/*
{
numAdded: 1,
numUpdated: 0,
numDeleted: 1,
numSkipped: 0,
}
*/
// Full mode
await clear();
// In full mode the user should pass the full universe of content that should be indexed into the indexing function.
// Any documents that are not passed into the indexing function and are present in the vectorStore will be deleted!
// This behavior is useful to handle deletions of source documents.
const allDocs = [doc1, doc2];
console.log(
await index({
docsSource: allDocs,
recordManager,
vectorStore,
options: {
cleanup: "full",
sourceIdKey: "source",
},
})
);
/*
{
numAdded: 2,
numUpdated: 0,
numDeleted: 0,
numSkipped: 0,
}
*/
// Say someone deleted the first doc:
const doc2Only = [doc2];
// Using full mode will clean up the deleted content as well.
// This afffects all documents regardless of source id!
console.log(
await index({
docsSource: doc2Only,
recordManager,
vectorStore,
options: {
cleanup: "full",
sourceIdKey: "source",
},
})
);
/*
{
numAdded: 0,
numUpdated: 0,
numDeleted: 1,
numSkipped: 1,
}
*/
await clear();
const newDoc1 = {
pageContent: "kitty kitty kitty kitty kitty",
metadata: { source: "kitty.txt" },
};
const newDoc2 = {
pageContent: "doggy doggy the doggy",
metadata: { source: "doggy.txt" },
};
const splitter = new CharacterTextSplitter({
separator: "t",
keepSeparator: true,
chunkSize: 12,
chunkOverlap: 2,
});
const newDocs = await splitter.splitDocuments([newDoc1, newDoc2]);
console.log(newDocs);
/*
[
{
pageContent: 'kitty kit',
metadata: {source: 'kitty.txt'}
},
{
pageContent: 'tty kitty ki',
metadata: {source: 'kitty.txt'}
},
{
pageContent: 'tty kitty',
metadata: {source: 'kitty.txt'},
},
{
pageContent: 'doggy doggy',
metadata: {source: 'doggy.txt'},
{
pageContent: 'the doggy',
metadata: {source: 'doggy.txt'},
}
]
*/
console.log(
await index({
docsSource: newDocs,
recordManager,
vectorStore,
options: {
cleanup: "incremental",
sourceIdKey: "source",
},
})
);
/*
{
numAdded: 5,
numUpdated: 0,
numDeleted: 0,
numSkipped: 0,
}
*/
const changedDoggyDocs = [
{
pageContent: "woof woof",
metadata: { source: "doggy.txt" },
},
{
pageContent: "woof woof woof",
metadata: { source: "doggy.txt" },
},
];
console.log(
await index({
docsSource: changedDoggyDocs,
recordManager,
vectorStore,
options: {
cleanup: "incremental",
sourceIdKey: "source",
},
})
);
/*
{
numAdded: 2,
numUpdated: 0,
numDeleted: 2,
numSkipped: 0,
}
*/
// Usage with document loaders
// Create a document loader
class MyCustomDocumentLoader extends BaseDocumentLoader {
load() {
return Promise.resolve([
{
pageContent: "kitty",
metadata: { source: "kitty.txt" },
},
{
pageContent: "doggy",
metadata: { source: "doggy.txt" },
},
]);
}
}
await clear();
const loader = new MyCustomDocumentLoader();
console.log(
await index({
docsSource: loader,
recordManager,
vectorStore,
options: {
cleanup: "incremental",
sourceIdKey: "source",
},
})
);
/*
{
numAdded: 2,
numUpdated: 0,
numDeleted: 0,
numSkipped: 0,
}
*/
// Closing resources
await recordManager.end();
await vectorStore.end();
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/stores/cassandra_storage.ts | import { CassandraKVStore } from "@langchain/community/storage/cassandra";
import { AIMessage, HumanMessage } from "@langchain/core/messages";
// This document is the Cassandra driver connection document; the example is to AstraDB but
// any valid Cassandra connection can be used.
const configConnection = {
serviceProviderArgs: {
astra: {
token: "YOUR_TOKEN_OR_LOAD_FROM_ENV" as string,
endpoint: "YOUR_ENDPOINT_OR_LOAD_FROM_ENV" as string,
},
},
};
const store = new CassandraKVStore({
...configConnection,
keyspace: "test", // keyspace must exist
table: "test_kv", // table will be created if it does not exist
keyDelimiter: ":", // optional, default is "/"
});
// Define our encoder/decoder for converting between strings and Uint8Arrays
const encoder = new TextEncoder();
const decoder = new TextDecoder();
/**
* Here you would define your LLM and chat chain, call
* the LLM and eventually get a list of messages.
* For this example, we'll assume we already have a list.
*/
const messages = Array.from({ length: 5 }).map((_, index) => {
if (index % 2 === 0) {
return new AIMessage("ai stuff...");
}
return new HumanMessage("human stuff...");
});
// Set your messages in the store
// The key will be prefixed with `message:id:` and end
// with the index.
await store.mset(
messages.map((message, index) => [
`message:id:${index}`,
encoder.encode(JSON.stringify(message)),
])
);
// Now you can get your messages from the store
const retrievedMessages = await store.mget(["message:id:0", "message:id:1"]);
// Make sure to decode the values
console.log(retrievedMessages.map((v) => decoder.decode(v)));
/**
[
'{"id":["langchain","AIMessage"],"kwargs":{"content":"ai stuff..."}}',
'{"id":["langchain","HumanMessage"],"kwargs":{"content":"human stuff..."}}'
]
*/
// Or, if you want to get back all the keys you can call
// the `yieldKeys` method.
// Optionally, you can pass a key prefix to only get back
// keys which match that prefix.
const yieldedKeys = [];
for await (const key of store.yieldKeys("message:id:")) {
yieldedKeys.push(key);
}
// The keys are not encoded, so no decoding is necessary
console.log(yieldedKeys);
/**
[
'message:id:2',
'message:id:1',
'message:id:3',
'message:id:0',
'message:id:4'
]
*/
// Finally, let's delete the keys from the store
await store.mdelete(yieldedKeys);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/stores/in_memory_storage.ts | import { InMemoryStore } from "@langchain/core/stores";
import { AIMessage, BaseMessage, HumanMessage } from "@langchain/core/messages";
// Instantiate the store using the `fromPath` method.
const store = new InMemoryStore<BaseMessage>();
/**
* Here you would define your LLM and chat chain, call
* the LLM and eventually get a list of messages.
* For this example, we'll assume we already have a list.
*/
const messages = Array.from({ length: 5 }).map((_, index) => {
if (index % 2 === 0) {
return new AIMessage("ai stuff...");
}
return new HumanMessage("human stuff...");
});
// Set your messages in the store
// The key will be prefixed with `message:id:` and end
// with the index.
await store.mset(
messages.map((message, index) => [`message:id:${index}`, message])
);
// Now you can get your messages from the store
const retrievedMessages = await store.mget(["message:id:0", "message:id:1"]);
console.log(retrievedMessages.map((v) => v));
/**
[
AIMessage {
lc_kwargs: { content: 'ai stuff...', additional_kwargs: {} },
content: 'ai stuff...',
...
},
HumanMessage {
lc_kwargs: { content: 'human stuff...', additional_kwargs: {} },
content: 'human stuff...',
...
}
]
*/
// Or, if you want to get back all the keys you can call
// the `yieldKeys` method.
// Optionally, you can pass a key prefix to only get back
// keys which match that prefix.
const yieldedKeys = [];
for await (const key of store.yieldKeys("message:id:")) {
yieldedKeys.push(key);
}
// The keys are not encoded, so no decoding is necessary
console.log(yieldedKeys);
/**
[
'message:id:0',
'message:id:1',
'message:id:2',
'message:id:3',
'message:id:4'
]
*/
// Finally, let's delete the keys from the store
await store.mdelete(yieldedKeys);
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/stores/ioredis_storage.ts | import { Redis } from "ioredis";
import { RedisByteStore } from "@langchain/community/storage/ioredis";
import { AIMessage, HumanMessage } from "@langchain/core/messages";
// Define the client and store
const client = new Redis({});
const store = new RedisByteStore({
client,
});
// Define our encoder/decoder for converting between strings and Uint8Arrays
const encoder = new TextEncoder();
const decoder = new TextDecoder();
/**
* Here you would define your LLM and chat chain, call
* the LLM and eventually get a list of messages.
* For this example, we'll assume we already have a list.
*/
const messages = Array.from({ length: 5 }).map((_, index) => {
if (index % 2 === 0) {
return new AIMessage("ai stuff...");
}
return new HumanMessage("human stuff...");
});
// Set your messages in the store
// The key will be prefixed with `message:id:` and end
// with the index.
await store.mset(
messages.map((message, index) => [
`message:id:${index}`,
encoder.encode(JSON.stringify(message)),
])
);
// Now you can get your messages from the store
const retrievedMessages = await store.mget(["message:id:0", "message:id:1"]);
// Make sure to decode the values
console.log(retrievedMessages.map((v) => decoder.decode(v)));
/**
[
'{"id":["langchain","AIMessage"],"kwargs":{"content":"ai stuff..."}}',
'{"id":["langchain","HumanMessage"],"kwargs":{"content":"human stuff..."}}'
]
*/
// Or, if you want to get back all the keys you can call
// the `yieldKeys` method.
// Optionally, you can pass a key prefix to only get back
// keys which match that prefix.
const yieldedKeys = [];
for await (const key of store.yieldKeys("message:id:")) {
yieldedKeys.push(key);
}
// The keys are not encoded, so no decoding is necessary
console.log(yieldedKeys);
/**
[
'message:id:2',
'message:id:1',
'message:id:3',
'message:id:0',
'message:id:4'
]
*/
// Finally, let's delete the keys from the store
// and close the Redis connection.
await store.mdelete(yieldedKeys);
client.disconnect();
|
0 | lc_public_repos/langchainjs/examples/src | lc_public_repos/langchainjs/examples/src/stores/vercel_kv_storage.ts | import { createClient } from "@vercel/kv";
import { VercelKVStore } from "@langchain/community/storage/vercel_kv";
import { AIMessage, HumanMessage } from "@langchain/core/messages";
// Pro tip: define a helper function for getting your client
// along with handling the case where your environment variables
// are not set.
const getClient = () => {
if (!process.env.VERCEL_KV_API_URL || !process.env.VERCEL_KV_API_TOKEN) {
throw new Error(
"VERCEL_KV_API_URL and VERCEL_KV_API_TOKEN must be set in the environment"
);
}
const client = createClient({
url: process.env.VERCEL_KV_API_URL,
token: process.env.VERCEL_KV_API_TOKEN,
});
return client;
};
// Define the client and store
const client = getClient();
const store = new VercelKVStore({
client,
});
// Define our encoder/decoder for converting between strings and Uint8Arrays
const encoder = new TextEncoder();
const decoder = new TextDecoder();
/**
* Here you would define your LLM and chat chain, call
* the LLM and eventually get a list of messages.
* For this example, we'll assume we already have a list.
*/
const messages = Array.from({ length: 5 }).map((_, index) => {
if (index % 2 === 0) {
return new AIMessage("ai stuff...");
}
return new HumanMessage("human stuff...");
});
// Set your messages in the store
// The key will be prefixed with `message:id:` and end
// with the index.
await store.mset(
messages.map((message, index) => [
`message:id:${index}`,
encoder.encode(JSON.stringify(message)),
])
);
// Now you can get your messages from the store
const retrievedMessages = await store.mget(["message:id:0", "message:id:1"]);
// Make sure to decode the values
console.log(retrievedMessages.map((v) => decoder.decode(v)));
/**
[
'{"id":["langchain","AIMessage"],"kwargs":{"content":"ai stuff..."}}',
'{"id":["langchain","HumanMessage"],"kwargs":{"content":"human stuff..."}}'
]
*/
// Or, if you want to get back all the keys you can call
// the `yieldKeys` method.
// Optionally, you can pass a key prefix to only get back
// keys which match that prefix.
const yieldedKeys = [];
for await (const key of store.yieldKeys("message:id:")) {
yieldedKeys.push(key);
}
// The keys are not encoded, so no decoding is necessary
console.log(yieldedKeys);
/**
[
'message:id:2',
'message:id:1',
'message:id:3',
'message:id:0',
'message:id:4'
]
*/
// Finally, let's delete the keys from the store
await store.mdelete(yieldedKeys);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.