index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-genai/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": ["ES2021", "ES2022.Object", "DOM"], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": ["src/**/*"], "exclude": ["node_modules", "dist", "docs"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-genai/LICENSE
The MIT License Copyright (c) 2023 LangChain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-genai/jest.config.cjs
/** @type {import('ts-jest').JestConfigWithTsJest} */ module.exports = { preset: "ts-jest/presets/default-esm", testEnvironment: "./jest.env.cjs", modulePathIgnorePatterns: ["dist/", "docs/"], moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, transform: { "^.+\\.tsx?$": ["@swc/jest"], }, transformIgnorePatterns: [ "/node_modules/", "\\.pnp\\.[^\\/]+$", "./scripts/jest-setup-after-env.js", ], setupFiles: ["dotenv/config"], testTimeout: 20_000, collectCoverageFrom: ["src/**/*.ts"], };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-genai/jest.env.cjs
const { TestEnvironment } = require("jest-environment-node"); class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { constructor(config, context) { // Make `instanceof Float32Array` return true in tests // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 super(config, context); this.global.Float32Array = Float32Array; } } module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-genai/README.md
# @langchain/google-genai This package contains the LangChain.js integrations for Gemini through their generative-ai SDK. ## Installation ```bash npm2yarn npm install @langchain/google-genai @langchain/core ``` This package, along with the main LangChain package, depends on [`@langchain/core`](https://npmjs.com/package/@langchain/core/). If you are using this package with other LangChain packages, you should make sure that all of the packages depend on the same instance of @langchain/core. You can do so by adding appropriate field to your project's `package.json` like this: ```json { "name": "your-project", "version": "0.0.0", "dependencies": { "@langchain/core": "^0.3.0", "@langchain/google-genai": "^0.0.0" }, "resolutions": { "@langchain/core": "^0.3.0" }, "overrides": { "@langchain/core": "^0.3.0" }, "pnpm": { "overrides": { "@langchain/core": "^0.3.0" } } } ``` The field you need depends on the package manager you're using, but we recommend adding a field for the common `yarn`, `npm`, and `pnpm` to maximize compatibility. ## Chat Models This package contains the `ChatGoogleGenerativeAI` class, which is the recommended way to interface with the Google Gemini series of models. To use, install the requirements, and configure your environment. ```bash export GOOGLE_API_KEY=your-api-key ``` Then initialize ```typescript import { ChatGoogleGenerativeAI } from "@langchain/google-genai"; const model = new ChatGoogleGenerativeAI({ modelName: "gemini-pro", maxOutputTokens: 2048, }); const response = await model.invoke(new HumanMessage("Hello world!")); ``` #### Multimodal inputs Gemini vision model supports image inputs when providing a single chat message. Example: ```bash npm2yarn npm install @langchain/core ``` ```typescript import fs from "fs"; import { ChatGoogleGenerativeAI } from "@langchain/google-genai"; import { HumanMessage } from "@langchain/core/messages"; const vision = new ChatGoogleGenerativeAI({ modelName: "gemini-pro-vision", maxOutputTokens: 2048, }); const image = fs.readFileSync("./hotdog.jpg").toString("base64"); const input = [ new HumanMessage({ content: [ { type: "text", text: "Describe the following image.", }, { type: "image_url", image_url: `data:image/png;base64,${image}`, }, ], }), ]; const res = await vision.invoke(input); ``` The value of `image_url` can be any of the following: - A public image URL - An accessible gcs file (e.g., "gcs://path/to/file.png") - A base64 encoded image (e.g., `data:image/png;base64,abcd124`) - A PIL image ## Embeddings This package also adds support for google's embeddings models. ```typescript import { GoogleGenerativeAIEmbeddings } from "@langchain/google-genai"; import { TaskType } from "@google/generative-ai"; const embeddings = new GoogleGenerativeAIEmbeddings({ modelName: "embedding-001", // 768 dimensions taskType: TaskType.RETRIEVAL_DOCUMENT, title: "Document title", }); const res = await embeddings.embedQuery("OK Google"); ``` ## Development To develop the Google GenAI package, you'll need to follow these instructions: ### Install dependencies ```bash yarn install ``` ### Build the package ```bash yarn build ``` Or from the repo root: ```bash yarn build --filter=@langchain/google-genai ``` ### Run tests Test files should live within a `tests/` file in the `src/` folder. Unit tests should end in `.test.ts` and integration tests should end in `.int.test.ts`: ```bash $ yarn test $ yarn test:int ``` ### Lint & Format Run the linter & formatter to ensure your code is up to standard: ```bash yarn lint && yarn format ``` ### Adding new entrypoints If you add a new file to be exported, either import & re-export from `src/index.ts`, or add it to the `entrypoints` field in the `config` variable located inside `langchain.config.js` and run `yarn build` to generate the new entrypoint.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-genai/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": [ "--workspaces-update=false" ] } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-genai/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof"], ignorePatterns: [ ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-genai/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals: [/node\:/, /@langchain\/core\//], entrypoints: { index: "index", }, tsConfigPath: resolve("./tsconfig.json"), cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-genai/package.json
{ "name": "@langchain/google-genai", "version": "0.1.5", "description": "Google Generative AI integration for LangChain.js", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-google-genai/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/google-genai", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "clean": "rm -rf .turbo dist/", "prepack": "yarn build", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard:unit": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard": "yarn test:standard:unit && yarn test:standard:int", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "@google/generative-ai": "^0.21.0", "zod-to-json-schema": "^3.22.4" }, "peerDependencies": { "@langchain/core": ">=0.3.17 <0.4.0" }, "devDependencies": { "@jest/globals": "^29.5.0", "@langchain/core": "workspace:*", "@langchain/scripts": ">=0.1.0 <0.2.0", "@langchain/standard-tests": "0.0.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@tsconfig/recommended": "^1.0.3", "@typescript-eslint/eslint-plugin": "^6.12.0", "@typescript-eslint/parser": "^6.12.0", "dotenv": "^16.3.1", "dpdm": "^3.12.0", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "hnswlib-node": "^3.0.0", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "prettier": "^2.8.3", "release-it": "^17.6.0", "rollup": "^4.5.2", "ts-jest": "^29.1.0", "typescript": "<5.2.0", "zod": "^3.22.4" }, "publishConfig": { "access": "public" }, "exports": { ".": { "types": { "import": "./index.d.ts", "require": "./index.d.cts", "default": "./index.d.ts" }, "import": "./index.js", "require": "./index.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "index.cjs", "index.js", "index.d.ts", "index.d.cts" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-genai/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": ["node_modules", "dist", "docs", "**/tests"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-genai/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-genai/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/libs/langchain-google-genai
lc_public_repos/langchainjs/libs/langchain-google-genai/src/output_parsers.ts
import type { z } from "zod"; import { BaseLLMOutputParser, OutputParserException, } from "@langchain/core/output_parsers"; import { JsonOutputKeyToolsParserParams } from "@langchain/core/output_parsers/openai_tools"; import { ChatGeneration } from "@langchain/core/outputs"; import { ToolCall } from "@langchain/core/messages/tool"; interface GoogleGenerativeAIToolsOutputParserParams< // eslint-disable-next-line @typescript-eslint/no-explicit-any T extends Record<string, any> > extends JsonOutputKeyToolsParserParams<T> {} export class GoogleGenerativeAIToolsOutputParser< // eslint-disable-next-line @typescript-eslint/no-explicit-any T extends Record<string, any> = Record<string, any> > extends BaseLLMOutputParser<T> { static lc_name() { return "GoogleGenerativeAIToolsOutputParser"; } lc_namespace = ["langchain", "google_genai", "output_parsers"]; returnId = false; /** The type of tool calls to return. */ keyName: string; /** Whether to return only the first tool call. */ returnSingle = false; zodSchema?: z.ZodType<T>; constructor(params: GoogleGenerativeAIToolsOutputParserParams<T>) { super(params); this.keyName = params.keyName; this.returnSingle = params.returnSingle ?? this.returnSingle; this.zodSchema = params.zodSchema; } protected async _validateResult(result: unknown): Promise<T> { if (this.zodSchema === undefined) { return result as T; } const zodParsedResult = await this.zodSchema.safeParseAsync(result); if (zodParsedResult.success) { return zodParsedResult.data; } else { throw new OutputParserException( `Failed to parse. Text: "${JSON.stringify( result, null, 2 )}". Error: ${JSON.stringify(zodParsedResult.error.errors)}`, JSON.stringify(result, null, 2) ); } } async parseResult(generations: ChatGeneration[]): Promise<T> { const tools = generations.flatMap((generation) => { const { message } = generation; if (!("tool_calls" in message) || !Array.isArray(message.tool_calls)) { return []; } return message.tool_calls as ToolCall[]; }); if (tools[0] === undefined) { throw new Error( "No parseable tool calls provided to GoogleGenerativeAIToolsOutputParser." ); } const [tool] = tools; const validatedResult = await this._validateResult(tool.args); return validatedResult; } }
0
lc_public_repos/langchainjs/libs/langchain-google-genai
lc_public_repos/langchainjs/libs/langchain-google-genai/src/types.ts
import { CodeExecutionTool, FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool, GoogleSearchRetrievalTool, } from "@google/generative-ai"; import { BindToolsInput } from "@langchain/core/language_models/chat_models"; export type GoogleGenerativeAIToolType = | BindToolsInput | GoogleGenerativeAIFunctionDeclarationsTool | CodeExecutionTool | GoogleSearchRetrievalTool;
0
lc_public_repos/langchainjs/libs/langchain-google-genai
lc_public_repos/langchainjs/libs/langchain-google-genai/src/index.ts
export * from "./chat_models.js"; export * from "./embeddings.js";
0
lc_public_repos/langchainjs/libs/langchain-google-genai
lc_public_repos/langchainjs/libs/langchain-google-genai/src/chat_models.ts
import { GenerativeModel, GoogleGenerativeAI as GenerativeAI, FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool, FunctionDeclaration as GenerativeAIFunctionDeclaration, type FunctionDeclarationSchema as GenerativeAIFunctionDeclarationSchema, GenerateContentRequest, SafetySetting, Part as GenerativeAIPart, ModelParams, RequestOptions, type CachedContent, } from "@google/generative-ai"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { AIMessageChunk, BaseMessage, UsageMetadata, } from "@langchain/core/messages"; import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { BaseChatModel, type BaseChatModelCallOptions, type LangSmithParams, type BaseChatModelParams, } from "@langchain/core/language_models/chat_models"; import { NewTokenIndices } from "@langchain/core/callbacks/base"; import { BaseLanguageModelInput, StructuredOutputMethodOptions, } from "@langchain/core/language_models/base"; import { Runnable, RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; import type { z } from "zod"; import { isZodSchema } from "@langchain/core/utils/types"; import { BaseLLMOutputParser } from "@langchain/core/output_parsers"; import { zodToGenerativeAIParameters } from "./utils/zod_to_genai_parameters.js"; import { convertBaseMessagesToContent, convertResponseContentToChatGenerationChunk, mapGenerateContentResultToChatResult, } from "./utils/common.js"; import { GoogleGenerativeAIToolsOutputParser } from "./output_parsers.js"; import { GoogleGenerativeAIToolType } from "./types.js"; import { convertToolsToGenAI } from "./utils/tools.js"; interface TokenUsage { completionTokens?: number; promptTokens?: number; totalTokens?: number; } export type BaseMessageExamplePair = { input: BaseMessage; output: BaseMessage; }; export interface GoogleGenerativeAIChatCallOptions extends BaseChatModelCallOptions { tools?: GoogleGenerativeAIToolType[]; /** * Allowed functions to call when the mode is "any". * If empty, any one of the provided functions are called. */ allowedFunctionNames?: string[]; /** * Whether or not to include usage data, like token counts * in the streamed response chunks. * @default true */ streamUsage?: boolean; } /** * An interface defining the input to the ChatGoogleGenerativeAI class. */ export interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<GoogleGenerativeAIChatCallOptions, "streamUsage"> { /** * @deprecated Use "model" instead. * * Model Name to use * * Alias for `model` * * Note: The format must follow the pattern - `{model}` */ modelName?: string; /** * Model Name to use * * Note: The format must follow the pattern - `{model}` */ model?: string; /** * Controls the randomness of the output. * * Values can range from [0.0,1.0], inclusive. A value closer to 1.0 * will produce responses that are more varied and creative, while * a value closer to 0.0 will typically result in less surprising * responses from the model. * * Note: The default value varies by model */ temperature?: number; /** * Maximum number of tokens to generate in the completion. */ maxOutputTokens?: number; /** * Top-p changes how the model selects tokens for output. * * Tokens are selected from most probable to least until the sum * of their probabilities equals the top-p value. * * For example, if tokens A, B, and C have a probability of * .3, .2, and .1 and the top-p value is .5, then the model will * select either A or B as the next token (using temperature). * * Note: The default value varies by model */ topP?: number; /** * Top-k changes how the model selects tokens for output. * * A top-k of 1 means the selected token is the most probable among * all tokens in the model’s vocabulary (also called greedy decoding), * while a top-k of 3 means that the next token is selected from * among the 3 most probable tokens (using temperature). * * Note: The default value varies by model */ topK?: number; /** * The set of character sequences (up to 5) that will stop output generation. * If specified, the API will stop at the first appearance of a stop * sequence. * * Note: The stop sequence will not be included as part of the response. * Note: stopSequences is only supported for Gemini models */ stopSequences?: string[]; /** * A list of unique `SafetySetting` instances for blocking unsafe content. The API will block * any prompts and responses that fail to meet the thresholds set by these settings. If there * is no `SafetySetting` for a given `SafetyCategory` provided in the list, the API will use * the default safety setting for that category. */ safetySettings?: SafetySetting[]; /** * Google API key to use */ apiKey?: string; /** * Google API version to use */ apiVersion?: string; /** * Google API base URL to use */ baseUrl?: string; /** Whether to stream the results or not */ streaming?: boolean; /** * Whether or not to force the model to respond with JSON. * Available for `gemini-1.5` models and later. * @default false */ json?: boolean; /** * Whether or not model supports system instructions. * The following models support system instructions: * - All Gemini 1.5 Pro model versions * - All Gemini 1.5 Flash model versions * - Gemini 1.0 Pro version gemini-1.0-pro-002 */ convertSystemMessageToHumanContent?: boolean | undefined; } /** * Google Generative AI chat model integration. * * Setup: * Install `@langchain/google-genai` and set an environment variable named `GOOGLE_API_KEY`. * * ```bash * npm install @langchain/google-genai * export GOOGLE_API_KEY="your-api-key" * ``` * * ## [Constructor args](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html#constructor) * * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_genai.GoogleGenerativeAIChatCallOptions.html) * * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc. * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below: * * ```typescript * // When calling `.bind`, call options should be passed via the first argument * const llmWithArgsBound = llm.bind({ * stop: ["\n"], * tools: [...], * }); * * // When calling `.bindTools`, call options should be passed via the second argument * const llmWithTools = llm.bindTools( * [...], * { * stop: ["\n"], * } * ); * ``` * * ## Examples * * <details open> * <summary><strong>Instantiate</strong></summary> * * ```typescript * import { ChatGoogleGenerativeAI } from '@langchain/google-genai'; * * const llm = new ChatGoogleGenerativeAI({ * model: "gemini-1.5-flash", * temperature: 0, * maxRetries: 2, * // apiKey: "...", * // other params... * }); * ``` * </details> * * <br /> * * <details> * <summary><strong>Invoking</strong></summary> * * ```typescript * const input = `Translate "I love programming" into French.`; * * // Models also accept a list of chat messages or a formatted prompt * const result = await llm.invoke(input); * console.log(result); * ``` * * ```txt * AIMessage { * "content": "There are a few ways to translate \"I love programming\" into French, depending on the level of formality and nuance you want to convey:\n\n**Formal:**\n\n* **J'aime la programmation.** (This is the most literal and formal translation.)\n\n**Informal:**\n\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\n\n**More specific:**\n\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\n\nThe best translation will depend on the context and your intended audience. \n", * "response_metadata": { * "finishReason": "STOP", * "index": 0, * "safetyRatings": [ * { * "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", * "probability": "NEGLIGIBLE" * }, * { * "category": "HARM_CATEGORY_HATE_SPEECH", * "probability": "NEGLIGIBLE" * }, * { * "category": "HARM_CATEGORY_HARASSMENT", * "probability": "NEGLIGIBLE" * }, * { * "category": "HARM_CATEGORY_DANGEROUS_CONTENT", * "probability": "NEGLIGIBLE" * } * ] * }, * "usage_metadata": { * "input_tokens": 10, * "output_tokens": 149, * "total_tokens": 159 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Streaming Chunks</strong></summary> * * ```typescript * for await (const chunk of await llm.stream(input)) { * console.log(chunk); * } * ``` * * ```txt * AIMessageChunk { * "content": "There", * "response_metadata": { * "index": 0 * } * "usage_metadata": { * "input_tokens": 10, * "output_tokens": 1, * "total_tokens": 11 * } * } * AIMessageChunk { * "content": " are a few ways to translate \"I love programming\" into French, depending on", * } * AIMessageChunk { * "content": " the level of formality and nuance you want to convey:\n\n**Formal:**\n\n", * } * AIMessageChunk { * "content": "* **J'aime la programmation.** (This is the most literal and formal translation.)\n\n**Informal:**\n\n* **J'adore programmer.** (This", * } * AIMessageChunk { * "content": " is a more enthusiastic and informal translation.)\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\n\n**More", * } * AIMessageChunk { * "content": " specific:**\n\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\n\nThe best translation will depend on the context and", * } * AIMessageChunk { * "content": " your intended audience. \n", * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Aggregate Streamed Chunks</strong></summary> * * ```typescript * import { AIMessageChunk } from '@langchain/core/messages'; * import { concat } from '@langchain/core/utils/stream'; * * const stream = await llm.stream(input); * let full: AIMessageChunk | undefined; * for await (const chunk of stream) { * full = !full ? chunk : concat(full, chunk); * } * console.log(full); * ``` * * ```txt * AIMessageChunk { * "content": "There are a few ways to translate \"I love programming\" into French, depending on the level of formality and nuance you want to convey:\n\n**Formal:**\n\n* **J'aime la programmation.** (This is the most literal and formal translation.)\n\n**Informal:**\n\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\n\n**More specific:**\n\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\n\nThe best translation will depend on the context and your intended audience. \n", * "usage_metadata": { * "input_tokens": 10, * "output_tokens": 277, * "total_tokens": 287 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Bind tools</strong></summary> * * ```typescript * import { z } from 'zod'; * * const GetWeather = { * name: "GetWeather", * description: "Get the current weather in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const GetPopulation = { * name: "GetPopulation", * description: "Get the current population in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]); * const aiMsg = await llmWithTools.invoke( * "Which city is hotter today and which is bigger: LA or NY?" * ); * console.log(aiMsg.tool_calls); * ``` * * ```txt * [ * { * name: 'GetWeather', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call' * }, * { * name: 'GetWeather', * args: { location: 'New York, NY' }, * type: 'tool_call' * }, * { * name: 'GetPopulation', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call' * }, * { * name: 'GetPopulation', * args: { location: 'New York, NY' }, * type: 'tool_call' * } * ] * ``` * </details> * * <br /> * * <details> * <summary><strong>Structured Output</strong></summary> * * ```typescript * const Joke = z.object({ * setup: z.string().describe("The setup of the joke"), * punchline: z.string().describe("The punchline to the joke"), * rating: z.number().optional().describe("How funny the joke is, from 1 to 10") * }).describe('Joke to tell user.'); * * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" }); * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats"); * console.log(jokeResult); * ``` * * ```txt * { * setup: "Why don\\'t cats play poker?", * punchline: "Why don\\'t cats play poker? Because they always have an ace up their sleeve!" * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Multimodal</strong></summary> * * ```typescript * import { HumanMessage } from '@langchain/core/messages'; * * const imageUrl = "https://example.com/image.jpg"; * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer()); * const base64Image = Buffer.from(imageData).toString('base64'); * * const message = new HumanMessage({ * content: [ * { type: "text", text: "describe the weather in this image" }, * { * type: "image_url", * image_url: { url: `data:image/jpeg;base64,${base64Image}` }, * }, * ] * }); * * const imageDescriptionAiMsg = await llm.invoke([message]); * console.log(imageDescriptionAiMsg.content); * ``` * * ```txt * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions. * ``` * </details> * * <br /> * * <details> * <summary><strong>Usage Metadata</strong></summary> * * ```typescript * const aiMsgForMetadata = await llm.invoke(input); * console.log(aiMsgForMetadata.usage_metadata); * ``` * * ```txt * { input_tokens: 10, output_tokens: 149, total_tokens: 159 } * ``` * </details> * * <br /> * * <details> * <summary><strong>Response Metadata</strong></summary> * * ```typescript * const aiMsgForResponseMetadata = await llm.invoke(input); * console.log(aiMsgForResponseMetadata.response_metadata); * ``` * * ```txt * { * finishReason: 'STOP', * index: 0, * safetyRatings: [ * { * category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', * probability: 'NEGLIGIBLE' * }, * { * category: 'HARM_CATEGORY_HATE_SPEECH', * probability: 'NEGLIGIBLE' * }, * { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' }, * { * category: 'HARM_CATEGORY_DANGEROUS_CONTENT', * probability: 'NEGLIGIBLE' * } * ] * } * ``` * </details> * * <br /> */ export class ChatGoogleGenerativeAI extends BaseChatModel<GoogleGenerativeAIChatCallOptions, AIMessageChunk> implements GoogleGenerativeAIChatInput { static lc_name() { return "ChatGoogleGenerativeAI"; } lc_serializable = true; get lc_secrets(): { [key: string]: string } | undefined { return { apiKey: "GOOGLE_API_KEY", }; } lc_namespace = ["langchain", "chat_models", "google_genai"]; get lc_aliases() { return { apiKey: "google_api_key", }; } modelName = "gemini-pro"; model = "gemini-pro"; temperature?: number; // default value chosen based on model maxOutputTokens?: number; topP?: number; // default value chosen based on model topK?: number; // default value chosen based on model stopSequences: string[] = []; safetySettings?: SafetySetting[]; apiKey?: string; streaming = false; streamUsage = true; convertSystemMessageToHumanContent: boolean | undefined; private client: GenerativeModel; get _isMultimodalModel() { return this.model.includes("vision") || this.model.startsWith("gemini-1.5"); } constructor(fields?: GoogleGenerativeAIChatInput) { super(fields ?? {}); this.modelName = fields?.model?.replace(/^models\//, "") ?? fields?.modelName?.replace(/^models\//, "") ?? this.model; this.model = this.modelName; this.maxOutputTokens = fields?.maxOutputTokens ?? this.maxOutputTokens; if (this.maxOutputTokens && this.maxOutputTokens < 0) { throw new Error("`maxOutputTokens` must be a positive integer"); } this.temperature = fields?.temperature ?? this.temperature; if (this.temperature && (this.temperature < 0 || this.temperature > 1)) { throw new Error("`temperature` must be in the range of [0.0,1.0]"); } this.topP = fields?.topP ?? this.topP; if (this.topP && this.topP < 0) { throw new Error("`topP` must be a positive integer"); } if (this.topP && this.topP > 1) { throw new Error("`topP` must be below 1."); } this.topK = fields?.topK ?? this.topK; if (this.topK && this.topK < 0) { throw new Error("`topK` must be a positive integer"); } this.stopSequences = fields?.stopSequences ?? this.stopSequences; this.apiKey = fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY"); if (!this.apiKey) { throw new Error( "Please set an API key for Google GenerativeAI " + "in the environment variable GOOGLE_API_KEY " + "or in the `apiKey` field of the " + "ChatGoogleGenerativeAI constructor" ); } this.safetySettings = fields?.safetySettings ?? this.safetySettings; if (this.safetySettings && this.safetySettings.length > 0) { const safetySettingsSet = new Set( this.safetySettings.map((s) => s.category) ); if (safetySettingsSet.size !== this.safetySettings.length) { throw new Error( "The categories in `safetySettings` array must be unique" ); } } this.streaming = fields?.streaming ?? this.streaming; this.client = new GenerativeAI(this.apiKey).getGenerativeModel( { model: this.model, safetySettings: this.safetySettings as SafetySetting[], generationConfig: { candidateCount: 1, stopSequences: this.stopSequences, maxOutputTokens: this.maxOutputTokens, temperature: this.temperature, topP: this.topP, topK: this.topK, ...(fields?.json ? { responseMimeType: "application/json" } : {}), }, }, { apiVersion: fields?.apiVersion, baseUrl: fields?.baseUrl, } ); this.streamUsage = fields?.streamUsage ?? this.streamUsage; } useCachedContent( cachedContent: CachedContent, modelParams?: ModelParams, requestOptions?: RequestOptions ): void { if (!this.apiKey) return; this.client = new GenerativeAI( this.apiKey ).getGenerativeModelFromCachedContent( cachedContent, modelParams, requestOptions ); } get useSystemInstruction(): boolean { return typeof this.convertSystemMessageToHumanContent === "boolean" ? !this.convertSystemMessageToHumanContent : this.computeUseSystemInstruction; } get computeUseSystemInstruction(): boolean { // This works on models from April 2024 and later // Vertex AI: gemini-1.5-pro and gemini-1.0-002 and later // AI Studio: gemini-1.5-pro-latest if (this.modelName === "gemini-1.0-pro-001") { return false; } else if (this.modelName.startsWith("gemini-pro-vision")) { return false; } else if (this.modelName.startsWith("gemini-1.0-pro-vision")) { return false; } else if (this.modelName === "gemini-pro") { // on AI Studio gemini-pro is still pointing at gemini-1.0-pro-001 return false; } return true; } getLsParams(options: this["ParsedCallOptions"]): LangSmithParams { return { ls_provider: "google_genai", ls_model_name: this.model, ls_model_type: "chat", ls_temperature: this.client.generationConfig.temperature, ls_max_tokens: this.client.generationConfig.maxOutputTokens, ls_stop: options.stop, }; } _combineLLMOutput() { return []; } _llmType() { return "googlegenerativeai"; } override bindTools( tools: GoogleGenerativeAIToolType[], kwargs?: Partial<GoogleGenerativeAIChatCallOptions> ): Runnable< BaseLanguageModelInput, AIMessageChunk, GoogleGenerativeAIChatCallOptions > { return this.bind({ tools: convertToolsToGenAI(tools)?.tools, ...kwargs }); } invocationParams( options?: this["ParsedCallOptions"] ): Omit<GenerateContentRequest, "contents"> { const toolsAndConfig = options?.tools?.length ? convertToolsToGenAI(options.tools, { toolChoice: options.tool_choice, allowedFunctionNames: options.allowedFunctionNames, }) : undefined; return { ...(toolsAndConfig?.tools ? { tools: toolsAndConfig.tools } : {}), ...(toolsAndConfig?.toolConfig ? { toolConfig: toolsAndConfig.toolConfig } : {}), }; } async _generate( messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<ChatResult> { const prompt = convertBaseMessagesToContent( messages, this._isMultimodalModel, this.useSystemInstruction ); let actualPrompt = prompt; if (prompt[0].role === "system") { const [systemInstruction] = prompt; this.client.systemInstruction = systemInstruction; actualPrompt = prompt.slice(1); } const parameters = this.invocationParams(options); // Handle streaming if (this.streaming) { const tokenUsage: TokenUsage = {}; const stream = this._streamResponseChunks(messages, options, runManager); const finalChunks: Record<number, ChatGenerationChunk> = {}; for await (const chunk of stream) { const index = (chunk.generationInfo as NewTokenIndices)?.completion ?? 0; if (finalChunks[index] === undefined) { finalChunks[index] = chunk; } else { finalChunks[index] = finalChunks[index].concat(chunk); } } const generations = Object.entries(finalChunks) .sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10)) .map(([_, value]) => value); return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } }; } const res = await this.completionWithRetry({ ...parameters, contents: actualPrompt, }); let usageMetadata: UsageMetadata | undefined; if ("usageMetadata" in res.response) { const genAIUsageMetadata = res.response.usageMetadata as { promptTokenCount: number | undefined; candidatesTokenCount: number | undefined; totalTokenCount: number | undefined; }; usageMetadata = { input_tokens: genAIUsageMetadata.promptTokenCount ?? 0, output_tokens: genAIUsageMetadata.candidatesTokenCount ?? 0, total_tokens: genAIUsageMetadata.totalTokenCount ?? 0, }; } const generationResult = mapGenerateContentResultToChatResult( res.response, { usageMetadata, } ); await runManager?.handleLLMNewToken( generationResult.generations[0].text ?? "" ); return generationResult; } async *_streamResponseChunks( messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): AsyncGenerator<ChatGenerationChunk> { const prompt = convertBaseMessagesToContent( messages, this._isMultimodalModel, this.useSystemInstruction ); let actualPrompt = prompt; if (prompt[0].role === "system") { const [systemInstruction] = prompt; this.client.systemInstruction = systemInstruction; actualPrompt = prompt.slice(1); } const parameters = this.invocationParams(options); const request = { ...parameters, contents: actualPrompt, }; const stream = await this.caller.callWithOptions( { signal: options?.signal }, async () => { const { stream } = await this.client.generateContentStream(request); return stream; } ); let usageMetadata: UsageMetadata | undefined; let index = 0; for await (const response of stream) { if ( "usageMetadata" in response && this.streamUsage !== false && options.streamUsage !== false ) { const genAIUsageMetadata = response.usageMetadata as { promptTokenCount: number; candidatesTokenCount: number; totalTokenCount: number; }; if (!usageMetadata) { usageMetadata = { input_tokens: genAIUsageMetadata.promptTokenCount, output_tokens: genAIUsageMetadata.candidatesTokenCount, total_tokens: genAIUsageMetadata.totalTokenCount, }; } else { // Under the hood, LangChain combines the prompt tokens. Google returns the updated // total each time, so we need to find the difference between the tokens. const outputTokenDiff = genAIUsageMetadata.candidatesTokenCount - usageMetadata.output_tokens; usageMetadata = { input_tokens: 0, output_tokens: outputTokenDiff, total_tokens: outputTokenDiff, }; } } const chunk = convertResponseContentToChatGenerationChunk(response, { usageMetadata, index, }); index += 1; if (!chunk) { continue; } yield chunk; await runManager?.handleLLMNewToken(chunk.text ?? ""); } } async completionWithRetry( request: string | GenerateContentRequest | (string | GenerativeAIPart)[], options?: this["ParsedCallOptions"] ) { return this.caller.callWithOptions( { signal: options?.signal }, async () => { try { return await this.client.generateContent(request); // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e: any) { // TODO: Improve error handling if (e.message?.includes("400 Bad Request")) { e.status = 400; } throw e; } } ); } withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( outputSchema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: StructuredOutputMethodOptions<false> ): Runnable<BaseLanguageModelInput, RunOutput>; withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( outputSchema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: StructuredOutputMethodOptions<true> ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>; withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( outputSchema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: StructuredOutputMethodOptions<boolean> ): | Runnable<BaseLanguageModelInput, RunOutput> | Runnable< BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput } > { // eslint-disable-next-line @typescript-eslint/no-explicit-any const schema: z.ZodType<RunOutput> | Record<string, any> = outputSchema; const name = config?.name; const method = config?.method; const includeRaw = config?.includeRaw; if (method === "jsonMode") { throw new Error( `ChatGoogleGenerativeAI only supports "functionCalling" as a method.` ); } let functionName = name ?? "extract"; let outputParser: BaseLLMOutputParser<RunOutput>; let tools: GoogleGenerativeAIFunctionDeclarationsTool[]; if (isZodSchema(schema)) { const jsonSchema = zodToGenerativeAIParameters(schema); tools = [ { functionDeclarations: [ { name: functionName, description: jsonSchema.description ?? "A function available to call.", parameters: jsonSchema as GenerativeAIFunctionDeclarationSchema, }, ], }, ]; outputParser = new GoogleGenerativeAIToolsOutputParser< z.infer<typeof schema> >({ returnSingle: true, keyName: functionName, zodSchema: schema, }); } else { let geminiFunctionDefinition: GenerativeAIFunctionDeclaration; if ( typeof schema.name === "string" && typeof schema.parameters === "object" && schema.parameters != null ) { geminiFunctionDefinition = schema as GenerativeAIFunctionDeclaration; functionName = schema.name; } else { geminiFunctionDefinition = { name: functionName, description: schema.description ?? "", parameters: schema as GenerativeAIFunctionDeclarationSchema, }; } tools = [ { functionDeclarations: [geminiFunctionDefinition], }, ]; outputParser = new GoogleGenerativeAIToolsOutputParser<RunOutput>({ returnSingle: true, keyName: functionName, }); } const llm = this.bind({ tools, tool_choice: functionName, }); if (!includeRaw) { return llm.pipe(outputParser).withConfig({ runName: "ChatGoogleGenerativeAIStructuredOutput", }) as Runnable<BaseLanguageModelInput, RunOutput>; } const parserAssign = RunnablePassthrough.assign({ // eslint-disable-next-line @typescript-eslint/no-explicit-any parsed: (input: any, config) => outputParser.invoke(input.raw, config), }); const parserNone = RunnablePassthrough.assign({ parsed: () => null, }); const parsedWithFallback = parserAssign.withFallbacks({ fallbacks: [parserNone], }); return RunnableSequence.from< BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput } >([ { raw: llm, }, parsedWithFallback, ]).withConfig({ runName: "StructuredOutputRunnable", }); } }
0
lc_public_repos/langchainjs/libs/langchain-google-genai
lc_public_repos/langchainjs/libs/langchain-google-genai/src/embeddings.ts
import { GoogleGenerativeAI, GenerativeModel } from "@google/generative-ai"; import type { TaskType, EmbedContentRequest } from "@google/generative-ai"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings"; import { chunkArray } from "@langchain/core/utils/chunk_array"; /** * Interface that extends EmbeddingsParams and defines additional * parameters specific to the GoogleGenerativeAIEmbeddings class. */ export interface GoogleGenerativeAIEmbeddingsParams extends EmbeddingsParams { /** * Model Name to use * * Alias for `model` * * Note: The format must follow the pattern - `{model}` */ modelName?: string; /** * Model Name to use * * Note: The format must follow the pattern - `{model}` */ model?: string; /** * Type of task for which the embedding will be used * * Note: currently only supported by `embedding-001` model */ taskType?: TaskType; /** * An optional title for the text. Only applicable when TaskType is * `RETRIEVAL_DOCUMENT` * * Note: currently only supported by `embedding-001` model */ title?: string; /** * Whether to strip new lines from the input text. Default to true */ stripNewLines?: boolean; /** * Google API key to use */ apiKey?: string; } /** * Class that extends the Embeddings class and provides methods for * generating embeddings using the Google Palm API. * @example * ```typescript * const model = new GoogleGenerativeAIEmbeddings({ * apiKey: "<YOUR API KEY>", * modelName: "embedding-001", * }); * * // Embed a single query * const res = await model.embedQuery( * "What would be a good company name for a company that makes colorful socks?" * ); * console.log({ res }); * * // Embed multiple documents * const documentRes = await model.embedDocuments(["Hello world", "Bye bye"]); * console.log({ documentRes }); * ``` */ export class GoogleGenerativeAIEmbeddings extends Embeddings implements GoogleGenerativeAIEmbeddingsParams { apiKey?: string; modelName = "embedding-001"; model = "embedding-001"; taskType?: TaskType; title?: string; stripNewLines = true; maxBatchSize = 100; // Max batch size for embedDocuments set by GenerativeModel client's batchEmbedContents call private client: GenerativeModel; constructor(fields?: GoogleGenerativeAIEmbeddingsParams) { super(fields ?? {}); this.modelName = fields?.model?.replace(/^models\//, "") ?? fields?.modelName?.replace(/^models\//, "") ?? this.modelName; this.model = this.modelName; this.taskType = fields?.taskType ?? this.taskType; this.title = fields?.title ?? this.title; if (this.title && this.taskType !== "RETRIEVAL_DOCUMENT") { throw new Error( "title can only be sepcified with TaskType.RETRIEVAL_DOCUMENT" ); } this.apiKey = fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY"); if (!this.apiKey) { throw new Error( "Please set an API key for Google GenerativeAI " + "in the environmentb variable GOOGLE_API_KEY " + "or in the `apiKey` field of the " + "GoogleGenerativeAIEmbeddings constructor" ); } this.client = new GoogleGenerativeAI(this.apiKey).getGenerativeModel({ model: this.model, }); } private _convertToContent(text: string): EmbedContentRequest { const cleanedText = this.stripNewLines ? text.replace(/\n/g, " ") : text; return { content: { role: "user", parts: [{ text: cleanedText }] }, taskType: this.taskType, title: this.title, }; } protected async _embedQueryContent(text: string): Promise<number[]> { const req = this._convertToContent(text); const res = await this.client.embedContent(req); return res.embedding.values ?? []; } protected async _embedDocumentsContent( documents: string[] ): Promise<number[][]> { const batchEmbedChunks: string[][] = chunkArray<string>( documents, this.maxBatchSize ); const batchEmbedRequests = batchEmbedChunks.map((chunk) => ({ requests: chunk.map((doc) => this._convertToContent(doc)), })); const responses = await Promise.allSettled( batchEmbedRequests.map((req) => this.client.batchEmbedContents(req)) ); const embeddings = responses.flatMap((res, idx) => { if (res.status === "fulfilled") { return res.value.embeddings.map((e) => e.values || []); } else { return Array(batchEmbedChunks[idx].length).fill([]); } }); return embeddings; } /** * Method that takes a document as input and returns a promise that * resolves to an embedding for the document. It calls the _embedText * method with the document as the input. * @param document Document for which to generate an embedding. * @returns Promise that resolves to an embedding for the input document. */ embedQuery(document: string): Promise<number[]> { return this.caller.call(this._embedQueryContent.bind(this), document); } /** * Method that takes an array of documents as input and returns a promise * that resolves to a 2D array of embeddings for each document. It calls * the _embedText method for each document in the array. * @param documents Array of documents for which to generate embeddings. * @returns Promise that resolves to a 2D array of embeddings for each input document. */ embedDocuments(documents: string[]): Promise<number[][]> { return this.caller.call(this._embedDocumentsContent.bind(this), documents); } }
0
lc_public_repos/langchainjs/libs/langchain-google-genai/src
lc_public_repos/langchainjs/libs/langchain-google-genai/src/tests/chat_models.standard.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelIntegrationTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatCallOptions, } from "../chat_models.js"; class ChatGoogleGenerativeAIStandardIntegrationTests extends ChatModelIntegrationTests< GoogleGenerativeAIChatCallOptions, AIMessageChunk > { constructor() { if (!process.env.GOOGLE_API_KEY) { throw new Error( "Can not run Google Generative AI integration tests because GOOGLE_API_KEY is set" ); } super({ Cls: ChatGoogleGenerativeAI, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, supportsParallelToolCalls: true, constructorArgs: { maxRetries: 1, model: "gemini-1.5-pro", }, }); } async testInvokeMoreComplexTools() { this.skipTestMessage( "testInvokeMoreComplexTools", "ChatGoogleGenerativeAI", "ChatGoogleGenerativeAI does not support tool schemas which contain object with unknown/any parameters." + "ChatGoogleGenerativeAI only supports objects in schemas when the parameters are defined." ); } } const testClass = new ChatGoogleGenerativeAIStandardIntegrationTests(); test("ChatGoogleGenerativeAIStandardIntegrationTests", async () => { const testResults = await testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-google-genai/src
lc_public_repos/langchainjs/libs/langchain-google-genai/src/tests/chat_models-extended.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { z } from "zod"; import { ChatGoogleGenerativeAI } from "../chat_models.js"; const baseSchema = z.object({ name: z.string(), age: z.number(), }); test("Google AI - Generate structured output without errors", async () => { const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-flash", temperature: 0.7, }); const structuredLlm = model.withStructuredOutput(baseSchema); const request = "Generate a structured response for a user."; const result = await structuredLlm.invoke(request); console.log("Valid Schema Result:", result); expect(result).toBeDefined(); expect(result).toHaveProperty("name"); expect(result).toHaveProperty("age"); }); test("Google AI - Validate nested schema structures", async () => { const schema = z.object({ name: z.string(), details: z.object({ age: z.number(), address: z.string(), }), }); const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-flash", temperature: 0.7, }); const structuredLlm = model.withStructuredOutput(schema); const request = "Generate structured data with nested schema."; const result = await structuredLlm.invoke(request); console.log("Nested Schema Result:", result); expect(result).toBeDefined(); expect(result.details).toHaveProperty("age"); expect(result.details).toHaveProperty("address"); }); test("Google AI - Handle optional fields in schema", async () => { const schema = z.object({ name: z.string(), age: z.number(), email: z.string().optional(), }); const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-flash", temperature: 0.7, }); const structuredLlm = model.withStructuredOutput(schema); const request = "Generate structured data with optional fields."; const result = await structuredLlm.invoke(request); console.log("Optional Fields Result:", result); expect(result).toBeDefined(); expect(result).toHaveProperty("name"); expect(result).toHaveProperty("age"); expect(result).toHaveProperty("email"); }); test("Google AI - Validate schema with large payloads", async () => { const schema = z.object({ name: z.string(), age: z.number(), address: z.string(), phone: z.string(), email: z.string(), }); const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-flash", temperature: 0.7, }); const structuredLlm = model.withStructuredOutput(schema); const request = "Generate structured data for a user with many fields."; const result = await structuredLlm.invoke(request); console.log("Large Payload Result:", result); expect(result).toBeDefined(); expect(result).toHaveProperty("name"); expect(result).toHaveProperty("age"); expect(result).toHaveProperty("address"); expect(result).toHaveProperty("phone"); expect(result).toHaveProperty("email"); }); test("Google AI - Handle schema with deeply nested structures", async () => { const schema = z.object({ user: z.object({ id: z.string(), profile: z.object({ details: z.object({ name: z.string(), age: z.number(), preferences: z.object({ favoriteColor: z.string(), hobbies: z.array(z.string()), }), }), }), }), }); const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-flash", temperature: 0.7, }); const structuredLlm = model.withStructuredOutput(schema); const request = "Generate a deeply nested user profile structure."; const result = await structuredLlm.invoke(request); console.log("Deeply Nested Schema Result:", result); expect(result).toBeDefined(); expect(result.user.profile.details.preferences).toHaveProperty( "favoriteColor" ); expect(Array.isArray(result.user.profile.details.preferences.hobbies)).toBe( true ); }); test("Google AI - Handle schema with enum fields", async () => { const schema = z.object({ name: z.string(), role: z.enum(["admin", "editor", "viewer"]), }); const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-flash", temperature: 0.7, }); const structuredLlm = model.withStructuredOutput(schema); const request = "Generate structured data with a name and a role (admin, editor, or viewer)."; const result = await structuredLlm.invoke(request); console.log("Enum Fields Result:", result); expect(result).toBeDefined(); expect(result).toHaveProperty("name"); expect(result).toHaveProperty("role"); expect(["admin", "editor", "viewer"]).toContain(result.role); });
0
lc_public_repos/langchainjs/libs/langchain-google-genai/src
lc_public_repos/langchainjs/libs/langchain-google-genai/src/tests/context_caching.int.test.ts
/* eslint-disable no-process-env */ import { test } from "@jest/globals"; import { fileURLToPath } from "node:url"; import * as path from "node:path"; import { FileState, UploadFileResponse, GoogleAIFileManager, GoogleAICacheManager, } from "@google/generative-ai/server"; import { ChatGoogleGenerativeAI } from "../chat_models.js"; const model = new ChatGoogleGenerativeAI({}); let fileResult: UploadFileResponse; beforeAll(async () => { // Download video file and save in src/tests/data // curl -O https://storage.googleapis.com/generativeai-downloads/data/Sherlock_Jr_FullMovie.mp4 const displayName = "Sherlock Jr. video"; const filename = fileURLToPath(import.meta.url); const dirname = path.dirname(filename); const pathToVideoFile = path.join(dirname, "/data/Sherlock_Jr_FullMovie.mp4"); const contextCache = new GoogleAICacheManager( process.env.GOOGLE_API_KEY || "" ); const fileCache = new GoogleAIFileManager(process.env.GOOGLE_API_KEY || ""); fileResult = await fileCache.uploadFile(pathToVideoFile, { displayName, mimeType: "video/mp4", }); const { name } = fileResult.file; // Poll getFile() on a set interval (2 seconds here) to check file state. let file = await fileCache.getFile(name); while (file.state === FileState.PROCESSING) { // Sleep for 2 seconds await new Promise((resolve) => { setTimeout(resolve, 2_000); }); file = await fileCache.getFile(name); } const systemInstruction = "You are an expert video analyzer, and your job is to answer " + "the user's query based on the video file you have access to."; const cachedContent = await contextCache.create({ model: "models/gemini-1.5-flash-001", displayName: "gettysburg audio", systemInstruction, contents: [ { role: "user", parts: [ { fileData: { mimeType: fileResult.file.mimeType, fileUri: fileResult.file.uri, }, }, ], }, ], ttlSeconds: 300, }); model.useCachedContent(cachedContent); }, 10 * 60 * 1000); // Set timeout to 10 minutes to upload file test("Test Google AI", async () => { const res = await model.invoke( "Introduce different characters in the movie by describing " + "their personality, looks, and names. Also list the " + "timestamps they were introduced for the first time." ); console.log(res); expect(res).toBeTruthy(); });
0
lc_public_repos/langchainjs/libs/langchain-google-genai/src
lc_public_repos/langchainjs/libs/langchain-google-genai/src/tests/chat_models.standard.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelUnitTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatCallOptions, } from "../chat_models.js"; class ChatGoogleGenerativeAIStandardUnitTests extends ChatModelUnitTests< GoogleGenerativeAIChatCallOptions, AIMessageChunk > { constructor() { super({ Cls: ChatGoogleGenerativeAI, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: {}, }); // This must be set so method like `.bindTools` or `.withStructuredOutput` // which we call after instantiating the model will work. // (constructor will throw if API key is not set) process.env.GOOGLE_API_KEY = "test"; } testChatModelInitApiKey() { // Unset the API key env var here so this test can properly check // the API key class arg. process.env.GOOGLE_API_KEY = ""; super.testChatModelInitApiKey(); // Re-set the API key env var here so other tests can run properly. process.env.GOOGLE_API_KEY = "test"; } } const testClass = new ChatGoogleGenerativeAIStandardUnitTests(); test("ChatGoogleGenerativeAIStandardUnitTests", () => { const testResults = testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-google-genai/src
lc_public_repos/langchainjs/libs/langchain-google-genai/src/tests/chat_models.int.test.ts
/* eslint-disable no-process-env */ import { test } from "@jest/globals"; import * as fs from "node:fs/promises"; import { fileURLToPath } from "node:url"; import * as path from "node:path"; import { AIMessage, AIMessageChunk, HumanMessage, SystemMessage, ToolMessage, } from "@langchain/core/messages"; import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { StructuredTool } from "@langchain/core/tools"; import { z } from "zod"; import { CodeExecutionTool, DynamicRetrievalMode, SchemaType as FunctionDeclarationSchemaType, GoogleSearchRetrievalTool, } from "@google/generative-ai"; import { concat } from "@langchain/core/utils/stream"; import { ChatGoogleGenerativeAI } from "../chat_models.js"; // Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND; const dummyToolResponse = `[{"title":"Weather in New York City","url":"https://www.weatherapi.com/","content":"{'location': {'name': 'New York', 'region': 'New York', 'country': 'United States of America', 'lat': 40.71, 'lon': -74.01, 'tz_id': 'America/New_York', 'localtime_epoch': 1718659486, 'localtime': '2024-06-17 17:24'}, 'current': {'last_updated_epoch': 1718658900, 'last_updated': '2024-06-17 17:15', 'temp_c': 27.8, 'temp_f': 82.0, 'is_day': 1, 'condition': {'text': 'Partly cloudy', 'icon': '//cdn.weatherapi.com/weather/64x64/day/116.png', 'code': 1003}, 'wind_mph': 2.2, 'wind_kph': 3.6, 'wind_degree': 159, 'wind_dir': 'SSE', 'pressure_mb': 1021.0, 'pressure_in': 30.15, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 58, 'cloud': 25, 'feelslike_c': 29.0, 'feelslike_f': 84.2, 'windchill_c': 26.9, 'windchill_f': 80.5, 'heatindex_c': 27.9, 'heatindex_f': 82.2, 'dewpoint_c': 17.1, 'dewpoint_f': 62.8, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 7.0, 'gust_mph': 18.3, 'gust_kph': 29.4}}","score":0.98192,"raw_content":null},{"title":"New York, NY Monthly Weather | AccuWeather","url":"https://www.accuweather.com/en/us/new-york/10021/june-weather/349727","content":"Get the monthly weather forecast for New York, NY, including daily high/low, historical averages, to help you plan ahead.","score":0.97504,"raw_content":null}]`; test("Test Google AI", async () => { const model = new ChatGoogleGenerativeAI({}); const res = await model.invoke("what is 1 + 1?"); expect(res).toBeTruthy(); }); test("Test Google AI generation", async () => { const model = new ChatGoogleGenerativeAI({}); const res = await model.generate([ [["human", `Translate "I love programming" into Korean.`]], ]); expect(res).toBeTruthy(); }); test("Test Google AI generation with a stop sequence", async () => { const model = new ChatGoogleGenerativeAI({ stopSequences: ["two", "2"], }); const res = await model.invoke([ ["human", `What are the first three positive whole numbers?`], ]); expect(res).toBeTruthy(); expect(res.additional_kwargs.finishReason).toBe("STOP"); expect(res.content).not.toContain("2"); expect(res.content).not.toContain("two"); }); test("Test Google AI generation with a system message", async () => { const model = new ChatGoogleGenerativeAI({}); const res = await model.generate([ [ ["system", `You are an amazing translator.`], ["human", `Translate "I love programming" into Korean.`], ], ]); expect(res).toBeTruthy(); }); test("Test Google AI multimodal generation", async () => { const __filename = fileURLToPath(import.meta.url); const __dirname = path.dirname(__filename); const imageData = ( await fs.readFile(path.join(__dirname, "/data/hotdog.jpg")) ).toString("base64"); const model = new ChatGoogleGenerativeAI({ modelName: "gemini-1.5-flash", }); const res = await model.invoke([ new HumanMessage({ content: [ { type: "text", text: "Describe the following image:", }, { type: "image_url", image_url: `data:image/png;base64,${imageData}`, }, ], }), ]); expect(res).toBeTruthy(); }); test("Test Google AI handleLLMNewToken callback", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { const model = new ChatGoogleGenerativeAI({}); let tokens = ""; const res = await model.call( [new HumanMessage("what is 1 + 1?")], undefined, [ { handleLLMNewToken(token: string) { tokens += token; }, }, ] ); const responseContent = typeof res.content === "string" ? res.content : ""; expect(tokens).toBe(responseContent); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }); test("Test Google AI handleLLMNewToken callback with streaming", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { const model = new ChatGoogleGenerativeAI({}); let tokens = ""; const res = await model.stream([new HumanMessage("what is 1 + 1?")], { callbacks: [ { handleLLMNewToken(token: string) { tokens += token; }, }, ], }); let responseContent = ""; for await (const streamItem of res) { responseContent += streamItem.content; } expect(tokens).toBe(responseContent); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }); test("Test Google AI in streaming mode", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { const model = new ChatGoogleGenerativeAI({ streaming: true }); let tokens = ""; let nrNewTokens = 0; const res = await model.invoke([new HumanMessage("Write a haiku?")], { callbacks: [ { handleLLMNewToken(token: string) { nrNewTokens += 1; tokens += token; }, }, ], }); expect(nrNewTokens).toBeGreaterThanOrEqual(1); expect(res.content).toBe(tokens); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }); async function fileToBase64(filePath: string): Promise<string> { const fileData = await fs.readFile(filePath); const base64String = Buffer.from(fileData).toString("base64"); return base64String; } test("Gemini can understand audio", async () => { // Update this with the correct path to an audio file on your machine. const audioPath = "./src/tests/data/gettysburg10.wav"; const audioMimeType = "audio/wav"; const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-flash", temperature: 0, maxRetries: 0, }); const audioBase64 = await fileToBase64(audioPath); const prompt = ChatPromptTemplate.fromMessages([ new MessagesPlaceholder("audio"), ]); const chain = prompt.pipe(model); const response = await chain.invoke({ audio: new HumanMessage({ content: [ { type: "media", mimeType: audioMimeType, data: audioBase64, }, { type: "text", text: "Summarize the content in this audio. ALso, what is the speaker's tone?", }, ], }), }); expect(typeof response.content).toBe("string"); expect((response.content as string).length).toBeGreaterThan(15); }); class FakeBrowserTool extends StructuredTool { schema = z.object({ url: z.string(), query: z.string().optional(), }); name = "fake_browser_tool"; description = "useful for when you need to find something on the web or summarize a webpage."; async _call(_: z.infer<this["schema"]>): Promise<string> { return "fake_browser_tool"; } } const googleGenAITool = { functionDeclarations: [ { name: "fake_browser_tool", description: "useful for when you need to find something on the web or summarize a webpage.", parameters: { type: FunctionDeclarationSchemaType.OBJECT, required: ["url"], properties: { url: { type: FunctionDeclarationSchemaType.STRING, }, query: { type: FunctionDeclarationSchemaType.STRING, }, }, }, }, ], }; const prompt = new HumanMessage( "Search the web and tell me what the weather will be like tonight in new york. use weather.com" ); test("ChatGoogleGenerativeAI can bind and invoke langchain tools", async () => { const model = new ChatGoogleGenerativeAI({}); const modelWithTools = model.bind({ tools: [new FakeBrowserTool()], }); const res = await modelWithTools.invoke([prompt]); const toolCalls = res.tool_calls; expect(toolCalls).toBeDefined(); if (!toolCalls) { throw new Error("tool_calls not in response"); } expect(toolCalls.length).toBe(1); expect(toolCalls[0].name).toBe("fake_browser_tool"); expect("url" in toolCalls[0].args).toBe(true); }); test("ChatGoogleGenerativeAI can bind and stream langchain tools", async () => { const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-pro", }); const modelWithTools = model.bind({ tools: [new FakeBrowserTool()], }); let finalChunk: AIMessageChunk | undefined; for await (const chunk of await modelWithTools.stream([prompt])) { if (!finalChunk) { finalChunk = chunk; } else { finalChunk = finalChunk.concat(chunk); } } if (!finalChunk) { throw new Error("finalChunk is undefined"); } const toolCalls = finalChunk.tool_calls; expect(toolCalls).toBeDefined(); if (!toolCalls) { throw new Error("tool_calls not in response"); } expect(toolCalls.length).toBe(1); expect(toolCalls[0].name).toBe("fake_browser_tool"); expect("url" in toolCalls[0].args).toBe(true); }); test("ChatGoogleGenerativeAI can handle streaming tool messages.", async () => { const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-pro", maxRetries: 1, }); const browserTool = new FakeBrowserTool(); const modelWithTools = model.bind({ tools: [browserTool], }); let finalChunk: AIMessageChunk | undefined; const fullPrompt = [ new SystemMessage( "You are a helpful assistant. If the chat history contains the tool results, you should use that and not call the tool again." ), prompt, new AIMessage({ content: "", tool_calls: [ { name: browserTool.name, args: { query: "weather tonight new york", url: "https://weather.com", }, }, ], }), new ToolMessage(dummyToolResponse, "id", browserTool.name), ]; for await (const chunk of await modelWithTools.stream(fullPrompt)) { if (!finalChunk) { finalChunk = chunk; } else { finalChunk = finalChunk.concat(chunk); } } if (!finalChunk) { throw new Error("finalChunk is undefined"); } expect(typeof finalChunk.content).toBe("string"); expect(finalChunk.content.length).toBeGreaterThan(1); expect(finalChunk.tool_calls).toHaveLength(0); }); test("ChatGoogleGenerativeAI can handle invoking tool messages.", async () => { const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-pro", maxRetries: 1, }); const browserTool = new FakeBrowserTool(); const modelWithTools = model.bind({ tools: [browserTool], }); const fullPrompt = [ new SystemMessage( "You are a helpful assistant. If the chat history contains the tool results, you should use that and not call the tool again." ), prompt, new AIMessage({ content: "", tool_calls: [ { name: browserTool.name, args: { query: "weather tonight new york", url: "https://weather.com", }, }, ], }), new ToolMessage(dummyToolResponse, "id", browserTool.name), ]; const response = await modelWithTools.invoke(fullPrompt); expect(typeof response.content).toBe("string"); expect(response.content.length).toBeGreaterThan(1); expect(response.tool_calls).toHaveLength(0); }); test("ChatGoogleGenerativeAI can bind and invoke genai tools", async () => { const model = new ChatGoogleGenerativeAI({}); const modelWithTools = model.bind({ tools: [googleGenAITool], }); const res = await modelWithTools.invoke([prompt]); const toolCalls = res.tool_calls; expect(toolCalls).toBeDefined(); if (!toolCalls) { throw new Error("tool_calls not in response"); } expect(toolCalls.length).toBe(1); expect(toolCalls[0].name).toBe("fake_browser_tool"); expect("url" in toolCalls[0].args).toBe(true); }); test("ChatGoogleGenerativeAI can bindTools with langchain tools and invoke", async () => { const model = new ChatGoogleGenerativeAI({}); const modelWithTools = model.bindTools([new FakeBrowserTool()]); const res = await modelWithTools.invoke([prompt]); const toolCalls = res.tool_calls; expect(toolCalls).toBeDefined(); if (!toolCalls) { throw new Error("tool_calls not in response"); } expect(toolCalls.length).toBe(1); expect(toolCalls[0].name).toBe("fake_browser_tool"); expect("url" in toolCalls[0].args).toBe(true); }); test("ChatGoogleGenerativeAI can bindTools with genai tools and invoke", async () => { const model = new ChatGoogleGenerativeAI({}); const modelWithTools = model.bindTools([googleGenAITool]); const res = await modelWithTools.invoke([prompt]); const toolCalls = res.tool_calls; expect(toolCalls).toBeDefined(); if (!toolCalls) { throw new Error("tool_calls not in response"); } expect(toolCalls.length).toBe(1); expect(toolCalls[0].name).toBe("fake_browser_tool"); expect("url" in toolCalls[0].args).toBe(true); }); test("ChatGoogleGenerativeAI can call withStructuredOutput langchain tools and invoke", async () => { const model = new ChatGoogleGenerativeAI({}); const tool = new FakeBrowserTool(); const modelWithTools = model.withStructuredOutput< z.infer<typeof tool.schema> >(tool.schema); const res = await modelWithTools.invoke([prompt]); expect(typeof res.url === "string").toBe(true); }); test("ChatGoogleGenerativeAI can call withStructuredOutput genai tools and invoke", async () => { const model = new ChatGoogleGenerativeAI({}); type GeminiTool = { url: string; query?: string; }; const modelWithTools = model.withStructuredOutput<GeminiTool>( googleGenAITool.functionDeclarations[0].parameters ); const res = await modelWithTools.invoke([prompt]); expect(typeof res.url === "string").toBe(true); }); test("Stream token count usage_metadata", async () => { const model = new ChatGoogleGenerativeAI({ temperature: 0, maxOutputTokens: 10, }); let res: AIMessageChunk | null = null; for await (const chunk of await model.stream( "Why is the sky blue? Be concise." )) { if (!res) { res = chunk; } else { res = res.concat(chunk); } } expect(res?.usage_metadata).toBeDefined(); if (!res?.usage_metadata) { return; } expect(res.usage_metadata.input_tokens).toBeGreaterThan(1); expect(res.usage_metadata.output_tokens).toBeGreaterThan(1); expect(res.usage_metadata.total_tokens).toBe( res.usage_metadata.input_tokens + res.usage_metadata.output_tokens ); }); test("streamUsage excludes token usage", async () => { const model = new ChatGoogleGenerativeAI({ temperature: 0, streamUsage: false, }); let res: AIMessageChunk | null = null; for await (const chunk of await model.stream( "Why is the sky blue? Be concise." )) { if (!res) { res = chunk; } else { res = res.concat(chunk); } } expect(res?.usage_metadata).not.toBeDefined(); }); test("Invoke token count usage_metadata", async () => { const model = new ChatGoogleGenerativeAI({ temperature: 0, maxOutputTokens: 10, }); const res = await model.invoke("Why is the sky blue? Be concise."); expect(res?.usage_metadata).toBeDefined(); if (!res?.usage_metadata) { return; } expect(res.usage_metadata.input_tokens).toBeGreaterThan(1); expect(res.usage_metadata.output_tokens).toBeGreaterThan(1); expect(res.usage_metadata.total_tokens).toBe( res.usage_metadata.input_tokens + res.usage_metadata.output_tokens ); }); test("Invoke with JSON mode", async () => { const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-flash", temperature: 0, maxOutputTokens: 10, json: true, }); const res = await model.invoke("Why is the sky blue? Be concise."); expect(res?.usage_metadata).toBeDefined(); if (!res?.usage_metadata) { return; } expect(res.usage_metadata.input_tokens).toBeGreaterThan(1); expect(res.usage_metadata.output_tokens).toBeGreaterThan(1); expect(res.usage_metadata.total_tokens).toBe( res.usage_metadata.input_tokens + res.usage_metadata.output_tokens ); }); test("Supports tool_choice", async () => { const model = new ChatGoogleGenerativeAI({}); const tools = [ { name: "get_weather", description: "Get the weather", schema: z.object({ location: z.string(), }), }, { name: "calculator", description: "Preform calculations", schema: z.object({ expression: z.string(), }), }, ]; const modelWithTools = model.bindTools(tools, { tool_choice: "calculator", allowedFunctionNames: ["calculator"], }); const response = await modelWithTools.invoke( "What is 27725327 times 283683? Also whats the weather in New York?" ); expect(response.tool_calls?.length).toBe(1); }); describe("GoogleSearchRetrievalTool", () => { test("Supports GoogleSearchRetrievalTool", async () => { const searchRetrievalTool: GoogleSearchRetrievalTool = { googleSearchRetrieval: { dynamicRetrievalConfig: { mode: DynamicRetrievalMode.MODE_DYNAMIC, dynamicThreshold: 0.7, // default is 0.7 }, }, }; const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-pro", temperature: 0, maxRetries: 0, }).bindTools([searchRetrievalTool]); const result = await model.invoke("Who won the 2024 MLB World Series?"); expect(result.response_metadata?.groundingMetadata).toBeDefined(); expect(result.content as string).toContain("Dodgers"); }); test("Can stream GoogleSearchRetrievalTool", async () => { const searchRetrievalTool: GoogleSearchRetrievalTool = { googleSearchRetrieval: { dynamicRetrievalConfig: { mode: DynamicRetrievalMode.MODE_DYNAMIC, dynamicThreshold: 0.7, // default is 0.7 }, }, }; const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-pro", temperature: 0, maxRetries: 0, }).bindTools([searchRetrievalTool]); const stream = await model.stream("Who won the 2024 MLB World Series?"); let finalMsg: AIMessageChunk | undefined; for await (const msg of stream) { finalMsg = finalMsg ? concat(finalMsg, msg) : msg; } if (!finalMsg) { throw new Error("finalMsg is undefined"); } expect(finalMsg.response_metadata?.groundingMetadata).toBeDefined(); expect(finalMsg.content as string).toContain("Dodgers"); }); }); describe("CodeExecutionTool", () => { test("Supports CodeExecutionTool", async () => { const codeExecutionTool: CodeExecutionTool = { codeExecution: {}, // Simply pass an empty object to enable it. }; const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-pro", temperature: 0, maxRetries: 0, }).bindTools([codeExecutionTool]); const result = await model.invoke( "Use code execution to find the sum of the first and last 3 numbers in the following list: [1, 2, 3, 72638, 8, 727, 4, 5, 6]" ); expect(Array.isArray(result.content)).toBeTruthy(); if (!Array.isArray(result.content)) { throw new Error("Content is not an array"); } const texts = result.content .flatMap((item) => ("text" in item ? [item.text] : [])) .join("\n"); expect(texts).toContain("21"); const executableCode = result.content.find( (item) => item.type === "executableCode" ); expect(executableCode).toBeDefined(); const codeResult = result.content.find( (item) => item.type === "codeExecutionResult" ); expect(codeResult).toBeDefined(); }); test("CodeExecutionTool contents can be passed in chat history", async () => { const codeExecutionTool: CodeExecutionTool = { codeExecution: {}, // Simply pass an empty object to enable it. }; const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-pro", temperature: 0, maxRetries: 0, }).bindTools([codeExecutionTool]); const codeResult = await model.invoke( "Use code execution to find the sum of the first and last 3 numbers in the following list: [1, 2, 3, 72638, 8, 727, 4, 5, 6]" ); const explanation = await model.invoke([ codeResult, { role: "user", content: "Please explain the question I asked, the code you wrote, and the answer you got.", }, ]); expect(typeof explanation.content).toBe("string"); expect(explanation.content.length).toBeGreaterThan(10); }); test("Can stream CodeExecutionTool", async () => { const codeExecutionTool: CodeExecutionTool = { codeExecution: {}, // Simply pass an empty object to enable it. }; const model = new ChatGoogleGenerativeAI({ model: "gemini-1.5-pro", temperature: 0, maxRetries: 0, }).bindTools([codeExecutionTool]); const stream = await model.stream( "Use code execution to find the sum of the first and last 3 numbers in the following list: [1, 2, 3, 72638, 8, 727, 4, 5, 6]" ); let finalMsg: AIMessageChunk | undefined; for await (const msg of stream) { finalMsg = finalMsg ? concat(finalMsg, msg) : msg; } if (!finalMsg) { throw new Error("finalMsg is undefined"); } expect(Array.isArray(finalMsg.content)).toBeTruthy(); if (!Array.isArray(finalMsg.content)) { throw new Error("Content is not an array"); } const texts = finalMsg.content .flatMap((item) => ("text" in item ? [item.text] : [])) .join("\n"); expect(texts).toContain("21"); const executableCode = finalMsg.content.find( (item) => item.type === "executableCode" ); expect(executableCode).toBeDefined(); const codeResult = finalMsg.content.find( (item) => item.type === "codeExecutionResult" ); expect(codeResult).toBeDefined(); }); });
0
lc_public_repos/langchainjs/libs/langchain-google-genai/src
lc_public_repos/langchainjs/libs/langchain-google-genai/src/tests/embeddings.int.test.ts
import { test, expect } from "@jest/globals"; import { GoogleGenerativeAIEmbeddings } from "../embeddings.js"; test("Test GooglePalmEmbeddings.embedQuery", async () => { const embeddings = new GoogleGenerativeAIEmbeddings({ maxRetries: 1, }); const res = await embeddings.embedQuery("Hello world"); // console.log(res); expect(typeof res[0]).toBe("number"); }); test("Test GooglePalmEmbeddings.embedDocuments", async () => { const embeddings = new GoogleGenerativeAIEmbeddings({ maxRetries: 1, }); const res = await embeddings.embedDocuments([ "Hello world", "Bye bye", "we need", "at least", "six documents", "to test pagination", ]); // console.log(res); expect(res).toHaveLength(6); res.forEach((r) => { expect(typeof r[0]).toBe("number"); }); });
0
lc_public_repos/langchainjs/libs/langchain-google-genai/src
lc_public_repos/langchainjs/libs/langchain-google-genai/src/tests/chat_models.test.ts
import { test } from "@jest/globals"; import type { HarmBlockThreshold, HarmCategory } from "@google/generative-ai"; import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { AIMessage, HumanMessage, SystemMessage, ToolMessage, } from "@langchain/core/messages"; import { ChatGoogleGenerativeAI } from "../chat_models.js"; import { removeAdditionalProperties } from "../utils/zod_to_genai_parameters.js"; import { convertBaseMessagesToContent, convertMessageContentToParts, } from "../utils/common.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any function extractKeys(obj: Record<string, any>, keys: string[] = []) { for (const key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { keys.push(key); if (typeof obj[key] === "object" && obj[key] !== null) { extractKeys(obj[key], keys); } } } return keys; } test("Google AI - `temperature` must be in range [0.0,1.0]", async () => { expect( () => new ChatGoogleGenerativeAI({ temperature: -1.0, }) ).toThrow(); expect( () => new ChatGoogleGenerativeAI({ temperature: 1.1, }) ).toThrow(); }); test("Google AI - `maxOutputTokens` must be positive", async () => { expect( () => new ChatGoogleGenerativeAI({ maxOutputTokens: -1, }) ).toThrow(); }); test("Google AI - `topP` must be positive", async () => { expect( () => new ChatGoogleGenerativeAI({ topP: -1, }) ).toThrow(); }); test("Google AI - `topP` must be in the range [0,1]", async () => { expect( () => new ChatGoogleGenerativeAI({ topP: 3, }) ).toThrow(); }); test("Google AI - `topK` must be positive", async () => { expect( () => new ChatGoogleGenerativeAI({ topK: -1, }) ).toThrow(); }); test("Google AI - `safetySettings` category array must be unique", async () => { expect( () => new ChatGoogleGenerativeAI({ safetySettings: [ { category: "HARM_CATEGORY_HARASSMENT" as HarmCategory, threshold: "BLOCK_MEDIUM_AND_ABOVE" as HarmBlockThreshold, }, { category: "HARM_CATEGORY_HARASSMENT" as HarmCategory, threshold: "BLOCK_LOW_AND_ABOVE" as HarmBlockThreshold, }, { category: "HARM_CATEGORY_DEROGATORY" as HarmCategory, threshold: "BLOCK_ONLY_HIGH" as HarmBlockThreshold, }, ], }) ).toThrow(); }); test("removeAdditionalProperties can remove all instances of additionalProperties", async () => { const idealResponseSchema = z.object({ idealResponse: z .string() .optional() .describe("The ideal response to the question"), }); const questionSchema = z.object({ question: z.string().describe("Question text"), type: z.enum(["singleChoice", "multiChoice"]).describe("Question type"), options: z.array(z.string()).describe("List of possible answers"), correctAnswer: z .string() .optional() .describe("correct answer from the possible answers"), idealResponses: z .array(idealResponseSchema) .describe("Array of ideal responses to the question"), }); const schema = z.object({ questions: z.array(questionSchema).describe("Array of question objects"), }); const parsedSchemaArr = removeAdditionalProperties(zodToJsonSchema(schema)); const arrSchemaKeys = extractKeys(parsedSchemaArr); expect( arrSchemaKeys.find((key) => key === "additionalProperties") ).toBeUndefined(); const parsedSchemaObj = removeAdditionalProperties( zodToJsonSchema(questionSchema) ); const arrSchemaObj = extractKeys(parsedSchemaObj); expect( arrSchemaObj.find((key) => key === "additionalProperties") ).toBeUndefined(); const analysisSchema = z.object({ decision: z.enum(["UseAPI", "UseFallback"]), explanation: z.string(), apiDetails: z .object({ serviceName: z.string(), endpointName: z.string(), parameters: z.record(z.unknown()), extractionPath: z.string(), }) .optional(), }); const parsedAnalysisSchema = removeAdditionalProperties( zodToJsonSchema(analysisSchema) ); const analysisSchemaObj = extractKeys(parsedAnalysisSchema); expect( analysisSchemaObj.find((key) => key === "additionalProperties") ).toBeUndefined(); }); test("convertMessageContentToParts correctly handles message types", () => { const messages = [ new SystemMessage("You are a helpful assistant"), new HumanMessage("What's the weather like in new york?"), new AIMessage({ content: "", tool_calls: [ { name: "get_current_weather", args: { location: "New York", }, id: "123", }, ], }), new ToolMessage( "{ weather: '28 °C', location: 'New York, NY' }", "get_current_weather", "123" ), ]; const messagesAsGoogleParts = messages .map((msg) => convertMessageContentToParts(msg, false)) .flat(); // console.log(messagesAsGoogleParts); expect(messagesAsGoogleParts).toEqual([ { text: "You are a helpful assistant" }, { text: "What's the weather like in new york?" }, { functionCall: { name: "get_current_weather", args: { location: "New York", }, }, }, { text: "{ weather: '28 °C', location: 'New York, NY' }" }, ]); }); test("convertBaseMessagesToContent correctly creates properly formatted content", async () => { const toolResponse = "{ weather: '28 °C', location: 'New York, NY' }"; const toolName = "get_current_weather"; const toolId = "123"; const toolArgs = { location: "New York", }; const messages = [ new SystemMessage("You are a helpful assistant"), new HumanMessage("What's the weather like in new york?"), new AIMessage({ content: "", tool_calls: [ { name: toolName, args: toolArgs, id: toolId, }, ], }), new ToolMessage(toolResponse, toolName, toolId), ]; const messagesAsGoogleContent = convertBaseMessagesToContent(messages, false); // console.log(messagesAsGoogleContent); // Google Generative AI API only allows for 'model' and 'user' roles // This means that 'system', 'human' and 'tool' messages are converted // to 'user' messages, and ai messages are converted to 'model' messages. expect(messagesAsGoogleContent).toEqual([ { role: "user", parts: [ { text: "You are a helpful assistant" }, { text: "What's the weather like in new york?" }, ], }, { role: "model", parts: [ { functionCall: { name: toolName, args: toolArgs, }, }, ], }, { role: "user", parts: [{ text: toolResponse }], }, ]); }); test("Input has single system message followed by one user message, convert system message is false", async () => { const messages = [ new SystemMessage("You are a helpful assistant"), new HumanMessage("What's the weather like in new york?"), ]; const messagesAsGoogleContent = convertBaseMessagesToContent( messages, false, false ); expect(messagesAsGoogleContent).toEqual([ { role: "user", parts: [ { text: "You are a helpful assistant" }, { text: "What's the weather like in new york?" }, ], }, ]); }); test("Input has a system message that is not the first message, convert system message is false", async () => { const messages = [ new HumanMessage("What's the weather like in new york?"), new SystemMessage("You are a helpful assistant"), ]; expect(() => { convertBaseMessagesToContent(messages, false, false); }).toThrow("System message should be the first one"); }); test("Input has multiple system messages, convert system message is false", async () => { const messages = [ new SystemMessage("You are a helpful assistant"), new SystemMessage("You are not a helpful assistant"), ]; expect(() => { convertBaseMessagesToContent(messages, false, false); }).toThrow("System message should be the first one"); }); test("Input has no system message and one user message, convert system message is false", async () => { const messages = [new HumanMessage("What's the weather like in new york?")]; const messagesAsGoogleContent = convertBaseMessagesToContent( messages, false, false ); expect(messagesAsGoogleContent).toEqual([ { role: "user", parts: [{ text: "What's the weather like in new york?" }], }, ]); }); test("Input has no system message and multiple user message, convert system message is false", async () => { const messages = [ new HumanMessage("What's the weather like in new york?"), new HumanMessage("What's the weather like in toronto?"), new HumanMessage("What's the weather like in los angeles?"), ]; const messagesAsGoogleContent = convertBaseMessagesToContent( messages, false, false ); expect(messagesAsGoogleContent).toEqual([ { role: "user", parts: [{ text: "What's the weather like in new york?" }], }, { role: "user", parts: [{ text: "What's the weather like in toronto?" }], }, { role: "user", parts: [{ text: "What's the weather like in los angeles?" }], }, ]); }); test("Input has single system message followed by one user message, convert system message is true", async () => { const messages = [ new SystemMessage("You are a helpful assistant"), new HumanMessage("What's the weather like in new york?"), ]; const messagesAsGoogleContent = convertBaseMessagesToContent( messages, false, true ); expect(messagesAsGoogleContent).toEqual([ { role: "system", parts: [{ text: "You are a helpful assistant" }], }, { role: "user", parts: [{ text: "What's the weather like in new york?" }], }, ]); }); test("Input has single system message that is not the first message, convert system message is true", async () => { const messages = [ new HumanMessage("What's the weather like in new york?"), new SystemMessage("You are a helpful assistant"), ]; expect(() => convertBaseMessagesToContent(messages, false, true)).toThrow( "System message should be the first one" ); }); test("Input has multiple system message, convert system message is true", async () => { const messages = [ new SystemMessage("What's the weather like in new york?"), new SystemMessage("You are a helpful assistant"), ]; expect(() => convertBaseMessagesToContent(messages, false, true)).toThrow( "System message should be the first one" ); }); test("Input has no system message and one user message, convert system message is true", async () => { const messages = [new HumanMessage("What's the weather like in new york?")]; const messagesAsGoogleContent = convertBaseMessagesToContent( messages, false, true ); expect(messagesAsGoogleContent).toEqual([ { role: "user", parts: [{ text: "What's the weather like in new york?" }], }, ]); }); test("Input has no system message and multiple user messages, convert system message is true", async () => { const messages = [ new HumanMessage("What's the weather like in new york?"), new HumanMessage("Will it rain today?"), new HumanMessage("How about next week?"), ]; const messagesAsGoogleContent = convertBaseMessagesToContent( messages, false, true ); expect(messagesAsGoogleContent).toEqual([ { role: "user", parts: [{ text: "What's the weather like in new york?" }], }, { role: "user", parts: [{ text: "Will it rain today?" }], }, { role: "user", parts: [{ text: "How about next week?" }], }, ]); });
0
lc_public_repos/langchainjs/libs/langchain-google-genai/src
lc_public_repos/langchainjs/libs/langchain-google-genai/src/utils/tools.ts
import { Tool as GenerativeAITool, ToolConfig, FunctionCallingMode, FunctionDeclaration, FunctionDeclarationsTool, FunctionDeclarationSchema, } from "@google/generative-ai"; import { ToolChoice } from "@langchain/core/language_models/chat_models"; import { StructuredToolInterface } from "@langchain/core/tools"; import { isLangChainTool } from "@langchain/core/utils/function_calling"; import { isOpenAITool, ToolDefinition, } from "@langchain/core/language_models/base"; import { convertToGenerativeAITools } from "./common.js"; import { GoogleGenerativeAIToolType } from "../types.js"; import { removeAdditionalProperties } from "./zod_to_genai_parameters.js"; export function convertToolsToGenAI( tools: GoogleGenerativeAIToolType[], extra?: { toolChoice?: ToolChoice; allowedFunctionNames?: string[]; } ): { tools: GenerativeAITool[]; toolConfig?: ToolConfig; } { // Extract function declaration processing to a separate function const genAITools = processTools(tools); // Simplify tool config creation const toolConfig = createToolConfig(genAITools, extra); return { tools: genAITools, toolConfig }; } function processTools(tools: GoogleGenerativeAIToolType[]): GenerativeAITool[] { let functionDeclarationTools: FunctionDeclaration[] = []; const genAITools: GenerativeAITool[] = []; tools.forEach((tool) => { if (isLangChainTool(tool)) { const [convertedTool] = convertToGenerativeAITools([ tool as StructuredToolInterface, ]); if (convertedTool.functionDeclarations) { functionDeclarationTools.push(...convertedTool.functionDeclarations); } } else if (isOpenAITool(tool)) { const { functionDeclarations } = convertOpenAIToolToGenAI(tool); if (functionDeclarations) { functionDeclarationTools.push(...functionDeclarations); } else { throw new Error( "Failed to convert OpenAI structured tool to GenerativeAI tool" ); } } else { genAITools.push(tool as GenerativeAITool); } }); const genAIFunctionDeclaration = genAITools.find( (t) => "functionDeclarations" in t ); if (genAIFunctionDeclaration) { return genAITools.map((tool) => { if ( functionDeclarationTools?.length > 0 && "functionDeclarations" in tool ) { const newTool = { functionDeclarations: [ ...(tool.functionDeclarations || []), ...functionDeclarationTools, ], }; // Clear the functionDeclarationTools array so it is not passed again functionDeclarationTools = []; return newTool; } return tool; }); } return [ ...genAITools, ...(functionDeclarationTools.length > 0 ? [ { functionDeclarations: functionDeclarationTools, }, ] : []), ]; } function convertOpenAIToolToGenAI( tool: ToolDefinition ): FunctionDeclarationsTool { return { functionDeclarations: [ { name: tool.function.name, description: tool.function.description, parameters: removeAdditionalProperties( tool.function.parameters ) as FunctionDeclarationSchema, }, ], }; } function createToolConfig( genAITools: GenerativeAITool[], extra?: { toolChoice?: ToolChoice; allowedFunctionNames?: string[]; } ): ToolConfig | undefined { if (!genAITools.length || !extra) return undefined; const { toolChoice, allowedFunctionNames } = extra; const modeMap: Record<string, FunctionCallingMode> = { any: FunctionCallingMode.ANY, auto: FunctionCallingMode.AUTO, none: FunctionCallingMode.NONE, }; if (toolChoice && ["any", "auto", "none"].includes(toolChoice as string)) { return { functionCallingConfig: { mode: modeMap[toolChoice as keyof typeof modeMap] ?? "MODE_UNSPECIFIED", allowedFunctionNames, }, }; } if (typeof toolChoice === "string" || allowedFunctionNames) { return { functionCallingConfig: { mode: FunctionCallingMode.ANY, allowedFunctionNames: [ ...(allowedFunctionNames ?? []), ...(toolChoice && typeof toolChoice === "string" ? [toolChoice] : []), ], }, }; } return undefined; }
0
lc_public_repos/langchainjs/libs/langchain-google-genai/src
lc_public_repos/langchainjs/libs/langchain-google-genai/src/utils/zod_to_genai_parameters.ts
/* eslint-disable @typescript-eslint/no-unused-vars */ import type { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { type FunctionDeclarationSchema as GenerativeAIFunctionDeclarationSchema, type SchemaType as FunctionDeclarationSchemaType, } from "@google/generative-ai"; export interface GenerativeAIJsonSchema extends Record<string, unknown> { properties?: Record<string, GenerativeAIJsonSchema>; type: FunctionDeclarationSchemaType; } export interface GenerativeAIJsonSchemaDirty extends GenerativeAIJsonSchema { properties?: Record<string, GenerativeAIJsonSchemaDirty>; additionalProperties?: boolean; } export function removeAdditionalProperties( // eslint-disable-next-line @typescript-eslint/no-explicit-any obj: Record<string, any> ): GenerativeAIJsonSchema { if (typeof obj === "object" && obj !== null) { const newObj = { ...obj }; if ("additionalProperties" in newObj) { delete newObj.additionalProperties; } if ("$schema" in newObj) { delete newObj.$schema; } for (const key in newObj) { if (key in newObj) { if (Array.isArray(newObj[key])) { newObj[key] = newObj[key].map(removeAdditionalProperties); } else if (typeof newObj[key] === "object" && newObj[key] !== null) { newObj[key] = removeAdditionalProperties(newObj[key]); } } } return newObj as GenerativeAIJsonSchema; } return obj as GenerativeAIJsonSchema; } export function zodToGenerativeAIParameters( // eslint-disable-next-line @typescript-eslint/no-explicit-any zodObj: z.ZodType<any> ): GenerativeAIFunctionDeclarationSchema { // GenerativeAI doesn't accept either the $schema or additionalProperties // attributes, so we need to explicitly remove them. const jsonSchema = removeAdditionalProperties(zodToJsonSchema(zodObj)); const { $schema, ...rest } = jsonSchema; return rest as GenerativeAIFunctionDeclarationSchema; } export function jsonSchemaToGeminiParameters( // eslint-disable-next-line @typescript-eslint/no-explicit-any schema: Record<string, any> ): GenerativeAIFunctionDeclarationSchema { // Gemini doesn't accept either the $schema or additionalProperties // attributes, so we need to explicitly remove them. // eslint-disable-next-line @typescript-eslint/no-explicit-any const jsonSchema = removeAdditionalProperties( schema as GenerativeAIJsonSchemaDirty ); const { $schema, ...rest } = jsonSchema; return rest as GenerativeAIFunctionDeclarationSchema; }
0
lc_public_repos/langchainjs/libs/langchain-google-genai/src
lc_public_repos/langchainjs/libs/langchain-google-genai/src/utils/common.ts
import { EnhancedGenerateContentResponse, Content, Part, type FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool, type FunctionDeclaration as GenerativeAIFunctionDeclaration, POSSIBLE_ROLES, FunctionResponsePart, FunctionCallPart, } from "@google/generative-ai"; import { AIMessage, AIMessageChunk, BaseMessage, ChatMessage, MessageContent, MessageContentComplex, UsageMetadata, isBaseMessage, } from "@langchain/core/messages"; import { ChatGeneration, ChatGenerationChunk, ChatResult, } from "@langchain/core/outputs"; import { isLangChainTool } from "@langchain/core/utils/function_calling"; import { isOpenAITool } from "@langchain/core/language_models/base"; import { ToolCallChunk } from "@langchain/core/messages/tool"; import { jsonSchemaToGeminiParameters, zodToGenerativeAIParameters, } from "./zod_to_genai_parameters.js"; import { GoogleGenerativeAIToolType } from "../types.js"; export function getMessageAuthor(message: BaseMessage) { const type = message._getType(); if (ChatMessage.isInstance(message)) { return message.role; } if (type === "tool") { return type; } return message.name ?? type; } /** * Maps a message type to a Google Generative AI chat author. * @param message The message to map. * @param model The model to use for mapping. * @returns The message type mapped to a Google Generative AI chat author. */ export function convertAuthorToRole( author: string ): (typeof POSSIBLE_ROLES)[number] { switch (author) { /** * Note: Gemini currently is not supporting system messages * we will convert them to human messages and merge with following * */ case "ai": case "model": // getMessageAuthor returns message.name. code ex.: return message.name ?? type; return "model"; case "system": return "system"; case "human": return "user"; case "tool": case "function": return "function"; default: throw new Error(`Unknown / unsupported author: ${author}`); } } function messageContentMedia(content: MessageContentComplex): Part { if ("mimeType" in content && "data" in content) { return { inlineData: { mimeType: content.mimeType, data: content.data, }, }; } throw new Error("Invalid media content"); } export function convertMessageContentToParts( message: BaseMessage, isMultimodalModel: boolean ): Part[] { if (typeof message.content === "string" && message.content !== "") { return [{ text: message.content }]; } let functionCalls: FunctionCallPart[] = []; let functionResponses: FunctionResponsePart[] = []; let messageParts: Part[] = []; if ( "tool_calls" in message && Array.isArray(message.tool_calls) && message.tool_calls.length > 0 ) { functionCalls = message.tool_calls.map((tc) => ({ functionCall: { name: tc.name, args: tc.args, }, })); } else if (message.getType() === "tool" && message.name && message.content) { functionResponses = [ { functionResponse: { name: message.name, response: message.content, }, }, ]; } else if (Array.isArray(message.content)) { messageParts = message.content.map((c) => { if (c.type === "text") { return { text: c.text, }; } else if (c.type === "executableCode") { return { executableCode: c.executableCode, }; } else if (c.type === "codeExecutionResult") { return { codeExecutionResult: c.codeExecutionResult, }; } if (c.type === "image_url") { if (!isMultimodalModel) { throw new Error(`This model does not support images`); } let source; if (typeof c.image_url === "string") { source = c.image_url; } else if (typeof c.image_url === "object" && "url" in c.image_url) { source = c.image_url.url; } else { throw new Error("Please provide image as base64 encoded data URL"); } const [dm, data] = source.split(","); if (!dm.startsWith("data:")) { throw new Error("Please provide image as base64 encoded data URL"); } const [mimeType, encoding] = dm.replace(/^data:/, "").split(";"); if (encoding !== "base64") { throw new Error("Please provide image as base64 encoded data URL"); } return { inlineData: { data, mimeType, }, }; } else if (c.type === "media") { return messageContentMedia(c); } else if (c.type === "tool_use") { return { functionCall: { name: c.name, args: c.input, }, }; } throw new Error(`Unknown content type ${(c as { type: string }).type}`); }); } return [...messageParts, ...functionCalls, ...functionResponses]; } export function convertBaseMessagesToContent( messages: BaseMessage[], isMultimodalModel: boolean, convertSystemMessageToHumanContent: boolean = false ) { return messages.reduce<{ content: Content[]; mergeWithPreviousContent: boolean; }>( (acc, message, index) => { if (!isBaseMessage(message)) { throw new Error("Unsupported message input"); } const author = getMessageAuthor(message); if (author === "system" && index !== 0) { throw new Error("System message should be the first one"); } const role = convertAuthorToRole(author); const prevContent = acc.content[acc.content.length]; if ( !acc.mergeWithPreviousContent && prevContent && prevContent.role === role ) { throw new Error( "Google Generative AI requires alternate messages between authors" ); } const parts = convertMessageContentToParts(message, isMultimodalModel); if (acc.mergeWithPreviousContent) { const prevContent = acc.content[acc.content.length - 1]; if (!prevContent) { throw new Error( "There was a problem parsing your system message. Please try a prompt without one." ); } prevContent.parts.push(...parts); return { mergeWithPreviousContent: false, content: acc.content, }; } let actualRole = role; if ( actualRole === "function" || (actualRole === "system" && !convertSystemMessageToHumanContent) ) { // GenerativeAI API will throw an error if the role is not "user" or "model." actualRole = "user"; } const content: Content = { role: actualRole, parts, }; return { mergeWithPreviousContent: author === "system" && !convertSystemMessageToHumanContent, content: [...acc.content, content], }; }, { content: [], mergeWithPreviousContent: false } ).content; } export function mapGenerateContentResultToChatResult( response: EnhancedGenerateContentResponse, extra?: { usageMetadata: UsageMetadata | undefined; } ): ChatResult { // if rejected or error, return empty generations with reason in filters if ( !response.candidates || response.candidates.length === 0 || !response.candidates[0] ) { return { generations: [], llmOutput: { filters: response.promptFeedback, }, }; } const functionCalls = response.functionCalls(); const [candidate] = response.candidates; const { content: candidateContent, ...generationInfo } = candidate; let content: MessageContent; if (candidateContent?.parts.length === 1 && candidateContent.parts[0].text) { content = candidateContent.parts[0].text; } else { content = candidateContent.parts.map((p) => { if ("text" in p) { return { type: "text", text: p.text, }; } else if ("executableCode" in p) { return { type: "executableCode", executableCode: p.executableCode, }; } else if ("codeExecutionResult" in p) { return { type: "codeExecutionResult", codeExecutionResult: p.codeExecutionResult, }; } return p; }); } let text = ""; if (typeof content === "string") { text = content; } else if ("text" in content[0]) { text = content[0].text; } const generation: ChatGeneration = { text, message: new AIMessage({ content, tool_calls: functionCalls?.map((fc) => ({ ...fc, type: "tool_call", })), additional_kwargs: { ...generationInfo, }, usage_metadata: extra?.usageMetadata, }), generationInfo, }; return { generations: [generation], }; } export function convertResponseContentToChatGenerationChunk( response: EnhancedGenerateContentResponse, extra: { usageMetadata?: UsageMetadata | undefined; index: number; } ): ChatGenerationChunk | null { if (!response.candidates || response.candidates.length === 0) { return null; } const functionCalls = response.functionCalls(); const [candidate] = response.candidates; const { content: candidateContent, ...generationInfo } = candidate; let content: MessageContent | undefined; // Checks if some parts do not have text. If false, it means that the content is a string. if ( candidateContent?.parts && candidateContent.parts.every((p) => "text" in p) ) { content = candidateContent.parts.map((p) => p.text).join(""); } else if (candidateContent.parts) { content = candidateContent.parts.map((p) => { if ("text" in p) { return { type: "text", text: p.text, }; } else if ("executableCode" in p) { return { type: "executableCode", executableCode: p.executableCode, }; } else if ("codeExecutionResult" in p) { return { type: "codeExecutionResult", codeExecutionResult: p.codeExecutionResult, }; } return p; }); } let text = ""; if (content && typeof content === "string") { text = content; } else if (content && typeof content === "object" && "text" in content[0]) { text = content[0].text; } const toolCallChunks: ToolCallChunk[] = []; if (functionCalls) { toolCallChunks.push( ...functionCalls.map((fc) => ({ ...fc, args: JSON.stringify(fc.args), index: extra.index, type: "tool_call_chunk" as const, })) ); } return new ChatGenerationChunk({ text, message: new AIMessageChunk({ content: content || "", name: !candidateContent ? undefined : candidateContent.role, tool_call_chunks: toolCallChunks, // Each chunk can have unique "generationInfo", and merging strategy is unclear, // so leave blank for now. additional_kwargs: {}, usage_metadata: extra.usageMetadata, }), generationInfo, }); } export function convertToGenerativeAITools( tools: GoogleGenerativeAIToolType[] ): GoogleGenerativeAIFunctionDeclarationsTool[] { if ( tools.every( (tool) => "functionDeclarations" in tool && Array.isArray(tool.functionDeclarations) ) ) { return tools as GoogleGenerativeAIFunctionDeclarationsTool[]; } return [ { functionDeclarations: tools.map( (tool): GenerativeAIFunctionDeclaration => { if (isLangChainTool(tool)) { const jsonSchema = zodToGenerativeAIParameters(tool.schema); return { name: tool.name, description: tool.description, parameters: jsonSchema, }; } if (isOpenAITool(tool)) { return { name: tool.function.name, description: tool.function.description ?? `A function available to call.`, parameters: jsonSchemaToGeminiParameters( tool.function.parameters ), }; } return tool as unknown as GenerativeAIFunctionDeclaration; } ), }, ]; }
0
lc_public_repos/langchainjs/libs/langchain-google-genai
lc_public_repos/langchainjs/libs/langchain-google-genai/scripts/jest-setup-after-env.js
import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { afterAll, jest } from "@jest/globals"; afterAll(awaitAllCallbacks); // Allow console.log to be disabled in tests if (process.env.DISABLE_CONSOLE_LOGS === "true") { console.log = jest.fn(); }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-community/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": ["ES2021", "ES2022.Object", "DOM"], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": ["src/**/*"], "exclude": ["node_modules", "dist", "docs"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-community/LICENSE
The MIT License Copyright (c) 2023 LangChain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-community/jest.config.cjs
/** @type {import('ts-jest').JestConfigWithTsJest} */ module.exports = { preset: "ts-jest/presets/default-esm", testEnvironment: "./jest.env.cjs", modulePathIgnorePatterns: ["dist/", "docs/"], moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, transform: { "^.+\\.tsx?$": ["@swc/jest"], }, transformIgnorePatterns: [ "/node_modules/", "\\.pnp\\.[^\\/]+$", "./scripts/jest-setup-after-env.js", ], setupFiles: ["dotenv/config"], testTimeout: 20_000, collectCoverageFrom: ["src/**/*.ts"], };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-community/jest.env.cjs
const { TestEnvironment } = require("jest-environment-node"); class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { constructor(config, context) { // Make `instanceof Float32Array` return true in tests // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 super(config, context); this.global.Float32Array = Float32Array; } } module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-community/README.md
# 🦜️🧑‍🤝‍🧑 LangChain Community [![CI](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml/badge.svg)](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml) ![npm](https://img.shields.io/npm/dm/@langchain/community) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai) ## Quick Install ```bash $ yarn add @langchain/community ``` This package, along with the main LangChain package, depends on [`@langchain/core`](https://npmjs.com/package/@langchain/core/). If you are using this package with other LangChain packages, you should make sure that all of the packages depend on the same instance of @langchain/core. You can do so by adding appropriate field to your project's `package.json` like this: ```json { "name": "your-project", "version": "0.0.0", "dependencies": { "@langchain/community": "^0.0.0", "@langchain/core": "^0.3.0" }, "resolutions": { "@langchain/core": "^0.3.0" }, "overrides": { "@langchain/core": "^0.3.0" }, "pnpm": { "overrides": { "@langchain/core": "^0.3.0" } } } ``` The field you need depends on the package manager you're using, but we recommend adding a field for the common `yarn`, `npm`, and `pnpm` to maximize compatibility. ## 🤔 What is this? LangChain Community contains third-party integrations that implement the base interfaces defined in LangChain Core, making them ready-to-use in any LangChain application. ![LangChain Stack](../../docs/core_docs/static/svg/langchain_stack_062024.svg) ## 📕 Releases & Versioning `@langchain/community` is currently on version `0.0.x` All changes will be accompanied by a patch version increase. ## 💁 Contributing As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation. For detailed information on how to contribute, see [here](../../CONTRIBUTING.md).
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-community/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": [ "--workspaces-update=false" ] } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-community/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof"], ignorePatterns: [ ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-template": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], "arrow-body-style": 0, "prefer-destructuring": 0 }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-community/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals: [ /node\:/, /@langchain\/core\//, /langchain\//, "@rockset/client/dist/codegen/api.js", "convex", "convex/server", "convex/values", "discord.js", "duck-duck-scrape", "firebase-admin/app", "firebase-admin/firestore", "lunary/langchain", "mysql2/promise", "pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js", "web-auth-library/google", "notion-to-md/build/utils/notion.js", "@getzep/zep-cloud/api", "@supabase/postgrest-js", "@ibm-cloud/watsonx-ai/dist/watsonx-ai-ml/vml_v1.js", ], entrypoints: { load: "load/index", // tools "load/serializable": "load/serializable", "tools/aiplugin": "tools/aiplugin", "tools/aws_lambda": "tools/aws_lambda", "tools/aws_sfn": "tools/aws_sfn", "tools/bingserpapi": "tools/bingserpapi", "tools/brave_search": "tools/brave_search", "tools/duckduckgo_search": "tools/duckduckgo_search", "tools/calculator": "tools/calculator", "tools/connery": "tools/connery", "tools/dadjokeapi": "tools/dadjokeapi", "tools/discord": "tools/discord", "tools/dynamic": "tools/dynamic", "tools/dataforseo_api_search": "tools/dataforseo_api_search", "tools/gmail": "tools/gmail/index", "tools/google_calendar": "tools/google_calendar/index", "tools/google_custom_search": "tools/google_custom_search", "tools/google_places": "tools/google_places", "tools/google_routes": "tools/google_routes", "tools/ifttt": "tools/ifttt", "tools/searchapi": "tools/searchapi", "tools/searxng_search": "tools/searxng_search", "tools/serpapi": "tools/serpapi", "tools/serper": "tools/serper", "tools/stackexchange": "tools/stackexchange", "tools/tavily_search": "tools/tavily_search", "tools/wikipedia_query_run": "tools/wikipedia_query_run", "tools/wolframalpha": "tools/wolframalpha", // toolkits "agents/toolkits/aws_sfn": "agents/toolkits/aws_sfn", "agents/toolkits/base": "agents/toolkits/base", "agents/toolkits/connery": "agents/toolkits/connery/index", "agents/toolkits/stagehand": "agents/toolkits/stagehand", // embeddings "embeddings/alibaba_tongyi": "embeddings/alibaba_tongyi", "embeddings/baidu_qianfan": "embeddings/baidu_qianfan", "embeddings/bedrock": "embeddings/bedrock", "embeddings/cloudflare_workersai": "embeddings/cloudflare_workersai", "embeddings/cohere": "embeddings/cohere", "embeddings/deepinfra": "embeddings/deepinfra", "embeddings/fireworks": "embeddings/fireworks", "embeddings/gradient_ai": "embeddings/gradient_ai", "embeddings/hf": "embeddings/hf", "embeddings/hf_transformers": "embeddings/hf_transformers", "embeddings/ibm": "embeddings/ibm", "embeddings/jina": "embeddings/jina", "embeddings/llama_cpp": "embeddings/llama_cpp", "embeddings/minimax": "embeddings/minimax", "embeddings/ollama": "embeddings/ollama", "embeddings/premai": "embeddings/premai", "embeddings/tensorflow": "embeddings/tensorflow", "embeddings/tencent_hunyuan": "embeddings/tencent_hunyuan/index", "embeddings/tencent_hunyuan/web": "embeddings/tencent_hunyuan/web", "embeddings/togetherai": "embeddings/togetherai", "embeddings/voyage": "embeddings/voyage", "embeddings/zhipuai": "embeddings/zhipuai", // llms "llms/ai21": "llms/ai21", "llms/aleph_alpha": "llms/aleph_alpha", "llms/arcjet": "llms/arcjet", "llms/bedrock": "llms/bedrock/index", "llms/bedrock/web": "llms/bedrock/web", "llms/cloudflare_workersai": "llms/cloudflare_workersai", "llms/cohere": "llms/cohere", "llms/deepinfra": "llms/deepinfra", "llms/fireworks": "llms/fireworks", "llms/friendli": "llms/friendli", "llms/gradient_ai": "llms/gradient_ai", "llms/hf": "llms/hf", "llms/ibm": "llms/ibm", "llms/llama_cpp": "llms/llama_cpp", "llms/ollama": "llms/ollama", "llms/portkey": "llms/portkey", "llms/raycast": "llms/raycast", "llms/replicate": "llms/replicate", "llms/sagemaker_endpoint": "llms/sagemaker_endpoint", "llms/togetherai": "llms/togetherai", "llms/watsonx_ai": "llms/watsonx_ai", "llms/writer": "llms/writer", "llms/yandex": "llms/yandex", "llms/layerup_security": "llms/layerup_security", // vectorstores "vectorstores/analyticdb": "vectorstores/analyticdb", "vectorstores/astradb": "vectorstores/astradb", "vectorstores/azure_aisearch": "vectorstores/azure_aisearch", "vectorstores/azure_cosmosdb": "vectorstores/azure_cosmosdb", "vectorstores/cassandra": "vectorstores/cassandra", "vectorstores/chroma": "vectorstores/chroma", "vectorstores/clickhouse": "vectorstores/clickhouse", "vectorstores/closevector/node": "vectorstores/closevector/node", "vectorstores/closevector/web": "vectorstores/closevector/web", "vectorstores/cloudflare_vectorize": "vectorstores/cloudflare_vectorize", "vectorstores/convex": "vectorstores/convex", "vectorstores/couchbase": "vectorstores/couchbase", "vectorstores/elasticsearch": "vectorstores/elasticsearch", "vectorstores/faiss": "vectorstores/faiss", "vectorstores/googlevertexai": "vectorstores/googlevertexai", "vectorstores/hnswlib": "vectorstores/hnswlib", "vectorstores/hanavector": "vectorstores/hanavector", "vectorstores/lancedb": "vectorstores/lancedb", "vectorstores/libsql": "vectorstores/libsql", "vectorstores/milvus": "vectorstores/milvus", "vectorstores/momento_vector_index": "vectorstores/momento_vector_index", "vectorstores/mongodb_atlas": "vectorstores/mongodb_atlas", "vectorstores/myscale": "vectorstores/myscale", "vectorstores/neo4j_vector": "vectorstores/neo4j_vector", "vectorstores/neon": "vectorstores/neon", "vectorstores/opensearch": "vectorstores/opensearch", "vectorstores/pgvector": "vectorstores/pgvector", "vectorstores/pinecone": "vectorstores/pinecone", "vectorstores/prisma": "vectorstores/prisma", "vectorstores/qdrant": "vectorstores/qdrant", "vectorstores/redis": "vectorstores/redis", "vectorstores/rockset": "vectorstores/rockset", "vectorstores/singlestore": "vectorstores/singlestore", "vectorstores/supabase": "vectorstores/supabase", "vectorstores/tigris": "vectorstores/tigris", "vectorstores/turbopuffer": "vectorstores/turbopuffer", "vectorstores/typeorm": "vectorstores/typeorm", "vectorstores/typesense": "vectorstores/typesense", "vectorstores/upstash": "vectorstores/upstash", "vectorstores/usearch": "vectorstores/usearch", "vectorstores/vectara": "vectorstores/vectara", "vectorstores/vercel_postgres": "vectorstores/vercel_postgres", "vectorstores/voy": "vectorstores/voy", "vectorstores/weaviate": "vectorstores/weaviate", "vectorstores/xata": "vectorstores/xata", "vectorstores/zep": "vectorstores/zep", "vectorstores/zep_cloud": "vectorstores/zep_cloud", // chat_models "chat_models/alibaba_tongyi": "chat_models/alibaba_tongyi", "chat_models/arcjet": "chat_models/arcjet", "chat_models/baiduwenxin": "chat_models/baiduwenxin", "chat_models/bedrock": "chat_models/bedrock/index", "chat_models/bedrock/web": "chat_models/bedrock/web", "chat_models/cloudflare_workersai": "chat_models/cloudflare_workersai", "chat_models/deepinfra": "chat_models/deepinfra", "chat_models/fireworks": "chat_models/fireworks", "chat_models/friendli": "chat_models/friendli", "chat_models/ibm": "chat_models/ibm", "chat_models/iflytek_xinghuo": "chat_models/iflytek_xinghuo/index", "chat_models/iflytek_xinghuo/web": "chat_models/iflytek_xinghuo/web", "chat_models/llama_cpp": "chat_models/llama_cpp", "chat_models/minimax": "chat_models/minimax", "chat_models/moonshot": "chat_models/moonshot", "chat_models/novita": "chat_models/novita", "chat_models/ollama": "chat_models/ollama", "chat_models/portkey": "chat_models/portkey", "chat_models/premai": "chat_models/premai", "chat_models/tencent_hunyuan": "chat_models/tencent_hunyuan/index", "chat_models/tencent_hunyuan/web": "chat_models/tencent_hunyuan/web", "chat_models/togetherai": "chat_models/togetherai", "chat_models/webllm": "chat_models/webllm", "chat_models/yandex": "chat_models/yandex", "chat_models/zhipuai": "chat_models/zhipuai", // callbacks "callbacks/handlers/llmonitor": "callbacks/handlers/llmonitor", "callbacks/handlers/lunary": "callbacks/handlers/lunary", "callbacks/handlers/upstash_ratelimit": "callbacks/handlers/upstash_ratelimit", // retrievers "retrievers/amazon_kendra": "retrievers/amazon_kendra", "retrievers/amazon_knowledge_base": "retrievers/amazon_knowledge_base", "retrievers/bm25": "retrievers/bm25", "retrievers/chaindesk": "retrievers/chaindesk", "retrievers/databerry": "retrievers/databerry", "retrievers/dria": "retrievers/dria", "retrievers/metal": "retrievers/metal", "retrievers/remote": "retrievers/remote/index", "retrievers/supabase": "retrievers/supabase", "retrievers/tavily_search_api": "retrievers/tavily_search_api", "retrievers/vectara_summary": "retrievers/vectara_summary", "retrievers/vespa": "retrievers/vespa", "retrievers/zep": "retrievers/zep", // query translators "structured_query/chroma": "structured_query/chroma", "structured_query/qdrant": "structured_query/qdrant", "structured_query/supabase": "structured_query/supabase", "structured_query/vectara": "structured_query/vectara", "retrievers/zep_cloud": "retrievers/zep_cloud", // cache "caches/cloudflare_kv": "caches/cloudflare_kv", "caches/ioredis": "caches/ioredis", "caches/momento": "caches/momento", "caches/upstash_redis": "caches/upstash_redis", // graphs "graphs/neo4j_graph": "graphs/neo4j_graph", "graphs/memgraph_graph": "graphs/memgraph_graph", // document_compressors "document_compressors/ibm": "document_compressors/ibm", // document transformers "document_transformers/html_to_text": "document_transformers/html_to_text", "document_transformers/mozilla_readability": "document_transformers/mozilla_readability", // storage "storage/cassandra": "storage/cassandra", "storage/convex": "storage/convex", "storage/ioredis": "storage/ioredis", "storage/upstash_redis": "storage/upstash_redis", "storage/vercel_kv": "storage/vercel_kv", // stores "stores/doc/base": "stores/doc/base", "stores/doc/gcs": "stores/doc/gcs", "stores/doc/in_memory": "stores/doc/in_memory", "stores/message/astradb": "stores/message/astradb", "stores/message/cassandra": "stores/message/cassandra", "stores/message/cloudflare_d1": "stores/message/cloudflare_d1", "stores/message/convex": "stores/message/convex", "stores/message/dynamodb": "stores/message/dynamodb", "stores/message/firestore": "stores/message/firestore", "stores/message/file_system": "stores/message/file_system", "stores/message/in_memory": "stores/message/in_memory", "stores/message/ipfs_datastore": "stores/message/ipfs_datastore", "stores/message/ioredis": "stores/message/ioredis", "stores/message/momento": "stores/message/momento", "stores/message/mongodb": "stores/message/mongodb", "stores/message/planetscale": "stores/message/planetscale", "stores/message/postgres": "stores/message/postgres", "stores/message/redis": "stores/message/redis", "stores/message/upstash_redis": "stores/message/upstash_redis", "stores/message/xata": "stores/message/xata", "stores/message/zep_cloud": "stores/message/zep_cloud", // memory "memory/chat_memory": "memory/chat_memory", "memory/motorhead_memory": "memory/motorhead_memory", "memory/zep": "memory/zep", "memory/zep_cloud": "memory/zep_cloud", // indexes "indexes/base": "indexes/base", "indexes/postgres": "indexes/postgres", "indexes/memory": "indexes/memory", "indexes/sqlite": "indexes/sqlite", // document_loaders "document_loaders/web/airtable": "document_loaders/web/airtable", "document_loaders/web/apify_dataset": "document_loaders/web/apify_dataset", "document_loaders/web/assemblyai": "document_loaders/web/assemblyai", "document_loaders/web/azure_blob_storage_container": "document_loaders/web/azure_blob_storage_container", "document_loaders/web/azure_blob_storage_file": "document_loaders/web/azure_blob_storage_file", "document_loaders/web/browserbase": "document_loaders/web/browserbase", "document_loaders/web/cheerio": "document_loaders/web/cheerio", "document_loaders/web/html": "document_loaders/web/html", "document_loaders/web/puppeteer": "document_loaders/web/puppeteer", "document_loaders/web/playwright": "document_loaders/web/playwright", "document_loaders/web/college_confidential": "document_loaders/web/college_confidential", "document_loaders/web/gitbook": "document_loaders/web/gitbook", "document_loaders/web/hn": "document_loaders/web/hn", "document_loaders/web/imsdb": "document_loaders/web/imsdb", "document_loaders/web/figma": "document_loaders/web/figma", "document_loaders/web/firecrawl": "document_loaders/web/firecrawl", "document_loaders/web/github": "document_loaders/web/github", "document_loaders/web/taskade": "document_loaders/web/taskade", "document_loaders/web/notionapi": "document_loaders/web/notionapi", "document_loaders/web/pdf": "document_loaders/web/pdf", "document_loaders/web/recursive_url": "document_loaders/web/recursive_url", "document_loaders/web/s3": "document_loaders/web/s3", "document_loaders/web/sitemap": "document_loaders/web/sitemap", "document_loaders/web/sonix_audio": "document_loaders/web/sonix_audio", "document_loaders/web/confluence": "document_loaders/web/confluence", "document_loaders/web/couchbase": "document_loaders/web/couchbase", "document_loaders/web/searchapi": "document_loaders/web/searchapi", "document_loaders/web/serpapi": "document_loaders/web/serpapi", "document_loaders/web/sort_xyz_blockchain": "document_loaders/web/sort_xyz_blockchain", "document_loaders/web/spider": "document_loaders/web/spider", "document_loaders/web/youtube": "document_loaders/web/youtube", "document_loaders/fs/chatgpt": "document_loaders/fs/chatgpt", "document_loaders/fs/srt": "document_loaders/fs/srt", "document_loaders/fs/pdf": "document_loaders/fs/pdf", "document_loaders/fs/docx": "document_loaders/fs/docx", "document_loaders/fs/epub": "document_loaders/fs/epub", "document_loaders/fs/csv": "document_loaders/fs/csv", "document_loaders/fs/notion": "document_loaders/fs/notion", "document_loaders/fs/obsidian": "document_loaders/fs/obsidian", "document_loaders/fs/unstructured": "document_loaders/fs/unstructured", "document_loaders/fs/openai_whisper_audio": "document_loaders/fs/openai_whisper_audio", "document_loaders/fs/pptx": "document_loaders/fs/pptx", // utils "utils/convex": "utils/convex", "utils/event_source_parse": "utils/event_source_parse", "utils/cassandra": "utils/cassandra", // experimental "experimental/callbacks/handlers/datadog": "experimental/callbacks/handlers/datadog", "experimental/graph_transformers/llm": "experimental/graph_transformers/llm", "experimental/multimodal_embeddings/googlevertexai": "experimental/multimodal_embeddings/googlevertexai", "experimental/hubs/makersuite/googlemakersuitehub": "experimental/hubs/makersuite/googlemakersuitehub", "experimental/chat_models/ollama_functions": "experimental/chat_models/ollama_functions", "experimental/llms/chrome_ai": "experimental/llms/chrome_ai", "experimental/tools/pyinterpreter": "experimental/tools/pyinterpreter", // chains "chains/graph_qa/cypher": "chains/graph_qa/cypher", }, requiresOptionalDependency: [ "tools/aws_sfn", "tools/aws_lambda", "tools/duckduckgo_search", "tools/discord", "tools/gmail", "tools/google_calendar", "agents/toolkits/aws_sfn", "agents/toolkits/stagehand", "callbacks/handlers/llmonitor", "callbacks/handlers/lunary", "callbacks/handlers/upstash_ratelimit", "embeddings/bedrock", "embeddings/cloudflare_workersai", "embeddings/cohere", "embeddings/tensorflow", "embeddings/hf", "embeddings/hf_transformers", "embeddings/ibm", "embeddings/jina", "embeddings/llama_cpp", "embeddings/gradient_ai", "embeddings/premai", "embeddings/tencent_hunyuan", "embeddings/tencent_hunyuan/web", "embeddings/zhipuai", "llms/load", "llms/arcjet", "llms/cohere", "llms/gradient_ai", "llms/hf", "llms/raycast", "llms/ibm", "llms/replicate", "llms/sagemaker_endpoint", "llms/watsonx_ai", "llms/bedrock", "llms/bedrock/web", "llms/llama_cpp", "llms/writer", "llms/portkey", "llms/layerup_security", "vectorstores/analyticdb", "vectorstores/astradb", "vectorstores/azure_aisearch", "vectorstores/azure_cosmosdb", "vectorstores/cassandra", "vectorstores/chroma", "vectorstores/clickhouse", "vectorstores/closevector/node", "vectorstores/closevector/web", "vectorstores/cloudflare_vectorize", "vectorstores/convex", "vectorstores/couchbase", "vectorstores/elasticsearch", "vectorstores/faiss", "vectorstores/googlevertexai", "vectorstores/hnswlib", "vectorstores/hanavector", "vectorstores/lancedb", "vectorstores/libsql", "vectorstores/milvus", "vectorstores/momento_vector_index", "vectorstores/mongodb_atlas", "vectorstores/myscale", "vectorstores/neo4j_vector", "vectorstores/neon", "vectorstores/opensearch", "vectorstores/pgvector", "vectorstores/pinecone", "vectorstores/qdrant", "vectorstores/redis", "vectorstores/rockset", "vectorstores/singlestore", "vectorstores/supabase", "vectorstores/tigris", "vectorstores/typeorm", "vectorstores/typesense", "vectorstores/upstash", "vectorstores/usearch", "vectorstores/vercel_postgres", "vectorstores/voy", "vectorstores/weaviate", "vectorstores/xata", "vectorstores/zep", "vectorstores/zep_cloud", "chat_models/arcjet", "chat_models/bedrock", "chat_models/bedrock/web", "chat_models/llama_cpp", "chat_models/portkey", "chat_models/premai", "chat_models/tencent_hunyuan", "chat_models/tencent_hunyuan/web", "chat_models/ibm", "chat_models/iflytek_xinghuo", "chat_models/iflytek_xinghuo/web", "chat_models/webllm", "chat_models/zhipuai", "retrievers/amazon_kendra", "retrievers/amazon_knowledge_base", "retrievers/dria", "retrievers/metal", "retrievers/supabase", "retrievers/vectara_summary", "retrievers/zep", // query translators "structured_query/chroma", "structured_query/qdrant", "structured_query/supabase", "structured_query/vectara", "retrievers/zep_cloud", "cache/cloudflare_kv", "cache/momento", "cache/upstash_redis", "graphs/neo4j_graph", "graphs/memgraph_graph", // document_compressors "document_compressors/ibm", // document_transformers "document_transformers/html_to_text", "document_transformers/mozilla_readability", // storage "storage/cassandra", "storage/convex", "storage/ioredis", "storage/upstash_redis", "storage/vercel_kv", // stores "stores/message/astradb", "stores/message/cassandra", "stores/message/cloudflare_d1", "stores/message/convex", "stores/message/dynamodb", "stores/message/firestore", "stores/message/ioredis", "stores/message/ipfs_datastore", "stores/message/momento", "stores/message/mongodb", "stores/message/planetscale", "stores/message/postgres", "stores/message/redis", "stores/message/upstash_redis", "stores/message/xata", "stores/message/zep_cloud", // memory "memory/motorhead_memory", "memory/zep", "memory/zep_cloud", // utils "utils/convex", "utils/cassandra", // indexes "indexes/postgres", "indexes/sqlite", // document loaders "document_loaders/web/apify_dataset", "document_loaders/web/assemblyai", "document_loaders/web/azure_blob_storage_container", "document_loaders/web/azure_blob_storage_file", "document_loaders/web/browserbase", "document_loaders/web/cheerio", "document_loaders/web/puppeteer", "document_loaders/web/playwright", "document_loaders/web/college_confidential", "document_loaders/web/gitbook", "document_loaders/web/hn", "document_loaders/web/imsdb", "document_loaders/web/figma", "document_loaders/web/firecrawl", "document_loaders/web/github", "document_loaders/web/pdf", "document_loaders/web/taskade", "document_loaders/web/notionapi", "document_loaders/web/recursive_url", "document_loaders/web/s3", "document_loaders/web/sitemap", "document_loaders/web/sonix_audio", "document_loaders/web/spider", "document_loaders/web/confluence", "document_loaders/web/couchbase", "document_loaders/web/youtube", "document_loaders/fs/chatgpt", "document_loaders/fs/srt", "document_loaders/fs/pdf", "document_loaders/fs/docx", "document_loaders/fs/epub", "document_loaders/fs/csv", "document_loaders/fs/notion", "document_loaders/fs/obsidian", "document_loaders/fs/unstructured", "document_loaders/fs/openai_whisper_audio", "document_loaders/fs/pptx", // experimental "experimental/multimodal_embeddings/googlevertexai", "experimental/hubs/makersuite/googlemakersuitehub", "experimental/tools/pyinterpreter", // chains "chains/graph_qa/cypher", // langgraph checkpointers "langgraph/checkpointers/vercel_kv", ], packageSuffix: "community", tsConfigPath: resolve("./tsconfig.json"), cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-community/.prettierignore
src/load/import_map.ts
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-community/package.json
{ "name": "@langchain/community", "version": "0.3.17", "description": "Third-party integrations for LangChain.js", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-community/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/community", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking --gen-maps", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "clean": "rm -rf .turbo dist/", "prepack": "yarn build", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard:unit": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard": "yarn test:standard:unit && yarn test:standard:int", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "@langchain/openai": ">=0.2.0 <0.4.0", "binary-extensions": "^2.2.0", "expr-eval": "^2.0.2", "flat": "^5.0.2", "js-yaml": "^4.1.0", "langchain": ">=0.2.3 <0.3.0 || >=0.3.4 <0.4.0", "langsmith": "^0.2.8", "uuid": "^10.0.0", "zod": "^3.22.3", "zod-to-json-schema": "^3.22.5" }, "devDependencies": { "@arcjet/redact": "^v1.0.0-alpha.23", "@aws-crypto/sha256-js": "^5.0.0", "@aws-sdk/client-bedrock-agent-runtime": "^3.583.0", "@aws-sdk/client-bedrock-runtime": "^3.422.0", "@aws-sdk/client-dynamodb": "^3.310.0", "@aws-sdk/client-kendra": "^3.352.0", "@aws-sdk/client-lambda": "^3.310.0", "@aws-sdk/client-s3": "^3.310.0", "@aws-sdk/client-sagemaker-runtime": "^3.414.0", "@aws-sdk/client-sfn": "^3.362.0", "@aws-sdk/credential-provider-node": "^3.388.0", "@aws-sdk/types": "^3.357.0", "@azure/search-documents": "^12.0.0", "@azure/storage-blob": "^12.15.0", "@browserbasehq/sdk": "^1.1.5", "@browserbasehq/stagehand": "^1.0.0", "@clickhouse/client": "^0.2.5", "@cloudflare/ai": "1.0.12", "@cloudflare/workers-types": "^4.20230922.0", "@datastax/astra-db-ts": "^1.0.1", "@elastic/elasticsearch": "^8.4.0", "@faker-js/faker": "8.4.1", "@getmetal/metal-sdk": "^4.0.0", "@getzep/zep-cloud": "^1.0.6", "@getzep/zep-js": "^0.9.0", "@gomomento/sdk": "^1.51.1", "@gomomento/sdk-core": "^1.51.1", "@google-ai/generativelanguage": "^2.5.0", "@google-cloud/storage": "^7.7.0", "@gradientai/nodejs-sdk": "^1.2.0", "@huggingface/inference": "^2.6.4", "@ibm-cloud/watsonx-ai": "^1.1.0", "@jest/globals": "^29.5.0", "@lancedb/lancedb": "^0.13.0", "@langchain/core": "workspace:*", "@langchain/scripts": ">=0.1.0 <0.2.0", "@langchain/standard-tests": "0.0.0", "@layerup/layerup-security": "^1.5.12", "@libsql/client": "^0.14.0", "@mendable/firecrawl-js": "^1.4.3", "@mlc-ai/web-llm": ">=0.2.62 <0.3.0", "@mozilla/readability": "^0.4.4", "@neondatabase/serverless": "^0.9.1", "@notionhq/client": "^2.2.10", "@opensearch-project/opensearch": "^2.2.0", "@planetscale/database": "^1.8.0", "@playwright/test": "^1.48.2", "@premai/prem-sdk": "^0.3.25", "@qdrant/js-client-rest": "^1.8.2", "@raycast/api": "^1.83.1", "@rockset/client": "^0.9.1", "@smithy/eventstream-codec": "^2.0.5", "@smithy/protocol-http": "^3.0.6", "@smithy/signature-v4": "^2.0.10", "@smithy/util-utf8": "^2.0.0", "@spider-cloud/spider-client": "^0.0.21", "@supabase/supabase-js": "^2.45.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@tensorflow-models/universal-sentence-encoder": "^1.3.3", "@tensorflow/tfjs-backend-cpu": "^3", "@tensorflow/tfjs-converter": "^3.6.0", "@tensorflow/tfjs-core": "^3.6.0", "@tsconfig/recommended": "^1.0.2", "@types/better-sqlite3": "^7.6.10", "@types/crypto-js": "^4.2.2", "@types/d3-dsv": "^3.0.7", "@types/flat": "^5.0.2", "@types/html-to-text": "^9", "@types/jsdom": "^21.1.1", "@types/jsonwebtoken": "^9", "@types/lodash": "^4", "@types/mozilla-readability": "^0.2.1", "@types/pdf-parse": "^1.1.1", "@types/pg": "^8.11.0", "@types/pg-copy-streams": "^1.2.2", "@types/uuid": "^9", "@types/ws": "^8", "@typescript-eslint/eslint-plugin": "^5.58.0", "@typescript-eslint/parser": "^5.58.0", "@upstash/ratelimit": "^2.0.3", "@upstash/redis": "^1.32.0", "@upstash/vector": "^1.1.1", "@vercel/kv": "^0.2.3", "@vercel/postgres": "^0.5.0", "@writerai/writer-sdk": "^0.40.2", "@xata.io/client": "^0.28.0", "@xenova/transformers": "^2.17.2", "@zilliz/milvus2-sdk-node": ">=2.3.5", "apify-client": "^2.7.1", "assemblyai": "^4.6.0", "better-sqlite3": "9.5.0", "cassandra-driver": "^4.7.2", "cborg": "^4.1.1", "cheerio": "^1.0.0-rc.12", "chromadb": "^1.9.1", "closevector-common": "0.1.3", "closevector-node": "0.1.6", "closevector-web": "0.1.6", "cohere-ai": ">=6.0.0", "convex": "^1.3.1", "couchbase": "^4.3.0", "crypto-js": "^4.2.0", "d3-dsv": "^2.0.0", "datastore-core": "^9.2.9", "discord.js": "^14.14.1", "dotenv": "^16.0.3", "dpdm": "^3.12.0", "dria": "^0.0.3", "duck-duck-scrape": "^2.2.5", "epub2": "^3.0.1", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-jest": "^27.6.0", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "faiss-node": "^0.5.1", "firebase-admin": "^11.9.0 || ^12.0.0", "google-auth-library": "^9.10.0", "googleapis": "^126.0.1", "graphql": "^16.6.0", "hdb": "0.19.8", "hnswlib-node": "^3.0.0", "html-to-text": "^9.0.5", "ibm-cloud-sdk-core": "^5.0.2", "ignore": "^5.2.0", "interface-datastore": "^8.2.11", "ioredis": "^5.3.2", "it-all": "^3.0.4", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "jsdom": "^22.1.0", "jsonwebtoken": "^9.0.2", "llmonitor": "^0.5.9", "lodash": "^4.17.21", "lunary": "^0.7.10", "mammoth": "^1.6.0", "mongodb": "^5.2.0", "mysql2": "^3.9.8", "neo4j-driver": "^5.17.0", "node-llama-cpp": "3.1.1", "notion-to-md": "^3.1.0", "officeparser": "^4.0.4", "openai": "*", "pdf-parse": "1.1.1", "pg": "^8.11.0", "pg-copy-streams": "^6.0.5", "pickleparser": "^0.2.1", "playwright": "^1.32.1", "portkey-ai": "^0.1.11", "prettier": "^2.8.3", "puppeteer": "^22.0.0", "pyodide": "^0.26.2", "redis": "^4.6.6", "release-it": "^17.6.0", "replicate": "^0.29.4", "rollup": "^3.19.1", "sonix-speech-recognition": "^2.1.1", "srt-parser-2": "^1.2.3", "ts-jest": "^29.1.0", "typeorm": "^0.3.20", "typescript": "~5.1.6", "typesense": "^1.5.3", "usearch": "^1.1.1", "voy-search": "0.6.2", "weaviate-ts-client": "^1.4.0", "web-auth-library": "^1.0.3", "youtube-transcript": "^1.0.6", "youtubei.js": "^9.1.0" }, "peerDependencies": { "@arcjet/redact": "^v1.0.0-alpha.23", "@aws-crypto/sha256-js": "^5.0.0", "@aws-sdk/client-bedrock-agent-runtime": "^3.583.0", "@aws-sdk/client-bedrock-runtime": "^3.422.0", "@aws-sdk/client-dynamodb": "^3.310.0", "@aws-sdk/client-kendra": "^3.352.0", "@aws-sdk/client-lambda": "^3.310.0", "@aws-sdk/client-s3": "^3.310.0", "@aws-sdk/client-sagemaker-runtime": "^3.310.0", "@aws-sdk/client-sfn": "^3.310.0", "@aws-sdk/credential-provider-node": "^3.388.0", "@azure/search-documents": "^12.0.0", "@azure/storage-blob": "^12.15.0", "@browserbasehq/sdk": "*", "@browserbasehq/stagehand": "^1.0.0", "@clickhouse/client": "^0.2.5", "@cloudflare/ai": "*", "@datastax/astra-db-ts": "^1.0.0", "@elastic/elasticsearch": "^8.4.0", "@getmetal/metal-sdk": "*", "@getzep/zep-cloud": "^1.0.6", "@getzep/zep-js": "^0.9.0", "@gomomento/sdk": "^1.51.1", "@gomomento/sdk-core": "^1.51.1", "@google-ai/generativelanguage": "*", "@google-cloud/storage": "^6.10.1 || ^7.7.0", "@gradientai/nodejs-sdk": "^1.2.0", "@huggingface/inference": "^2.6.4", "@ibm-cloud/watsonx-ai": "*", "@lancedb/lancedb": "^0.12.0", "@langchain/core": ">=0.2.21 <0.4.0", "@layerup/layerup-security": "^1.5.12", "@libsql/client": "^0.14.0", "@mendable/firecrawl-js": "^1.4.3", "@mlc-ai/web-llm": "*", "@mozilla/readability": "*", "@neondatabase/serverless": "*", "@notionhq/client": "^2.2.10", "@opensearch-project/opensearch": "*", "@pinecone-database/pinecone": "*", "@planetscale/database": "^1.8.0", "@premai/prem-sdk": "^0.3.25", "@qdrant/js-client-rest": "^1.8.2", "@raycast/api": "^1.55.2", "@rockset/client": "^0.9.1", "@smithy/eventstream-codec": "^2.0.5", "@smithy/protocol-http": "^3.0.6", "@smithy/signature-v4": "^2.0.10", "@smithy/util-utf8": "^2.0.0", "@spider-cloud/spider-client": "^0.0.21", "@supabase/supabase-js": "^2.45.0", "@tensorflow-models/universal-sentence-encoder": "*", "@tensorflow/tfjs-converter": "*", "@tensorflow/tfjs-core": "*", "@upstash/ratelimit": "^1.1.3 || ^2.0.3", "@upstash/redis": "^1.20.6", "@upstash/vector": "^1.1.1", "@vercel/kv": "^0.2.3", "@vercel/postgres": "^0.5.0", "@writerai/writer-sdk": "^0.40.2", "@xata.io/client": "^0.28.0", "@xenova/transformers": "^2.17.2", "@zilliz/milvus2-sdk-node": ">=2.3.5", "apify-client": "^2.7.1", "assemblyai": "^4.6.0", "better-sqlite3": ">=9.4.0 <12.0.0", "cassandra-driver": "^4.7.2", "cborg": "^4.1.1", "cheerio": "^1.0.0-rc.12", "chromadb": "*", "closevector-common": "0.1.3", "closevector-node": "0.1.6", "closevector-web": "0.1.6", "cohere-ai": "*", "convex": "^1.3.1", "crypto-js": "^4.2.0", "d3-dsv": "^2.0.0", "discord.js": "^14.14.1", "dria": "^0.0.3", "duck-duck-scrape": "^2.2.5", "epub2": "^3.0.1", "faiss-node": "^0.5.1", "firebase-admin": "^11.9.0 || ^12.0.0", "google-auth-library": "*", "googleapis": "*", "hnswlib-node": "^3.0.0", "html-to-text": "^9.0.5", "ibm-cloud-sdk-core": "*", "ignore": "^5.2.0", "interface-datastore": "^8.2.11", "ioredis": "^5.3.2", "it-all": "^3.0.4", "jsdom": "*", "jsonwebtoken": "^9.0.2", "llmonitor": "^0.5.9", "lodash": "^4.17.21", "lunary": "^0.7.10", "mammoth": "^1.6.0", "mongodb": ">=5.2.0", "mysql2": "^3.9.8", "neo4j-driver": "*", "notion-to-md": "^3.1.0", "officeparser": "^4.0.4", "openai": "*", "pdf-parse": "1.1.1", "pg": "^8.11.0", "pg-copy-streams": "^6.0.5", "pickleparser": "^0.2.1", "playwright": "^1.32.1", "portkey-ai": "^0.1.11", "puppeteer": "*", "pyodide": ">=0.24.1 <0.27.0", "redis": "*", "replicate": "^0.29.4", "sonix-speech-recognition": "^2.1.1", "srt-parser-2": "^1.2.3", "typeorm": "^0.3.20", "typesense": "^1.5.3", "usearch": "^1.1.1", "voy-search": "0.6.2", "weaviate-ts-client": "*", "web-auth-library": "^1.0.3", "ws": "^8.14.2", "youtube-transcript": "^1.0.6", "youtubei.js": "^9.1.0" }, "peerDependenciesMeta": { "@arcjet/redact": { "optional": true }, "@aws-crypto/sha256-js": { "optional": true }, "@aws-sdk/client-bedrock-agent-runtime": { "optional": true }, "@aws-sdk/client-bedrock-runtime": { "optional": true }, "@aws-sdk/client-dynamodb": { "optional": true }, "@aws-sdk/client-kendra": { "optional": true }, "@aws-sdk/client-lambda": { "optional": true }, "@aws-sdk/client-s3": { "optional": true }, "@aws-sdk/client-sagemaker-runtime": { "optional": true }, "@aws-sdk/client-sfn": { "optional": true }, "@aws-sdk/credential-provider-node": { "optional": true }, "@azure/search-documents": { "optional": true }, "@azure/storage-blob": { "optional": true }, "@browserbasehq/sdk": { "optional": true }, "@clickhouse/client": { "optional": true }, "@cloudflare/ai": { "optional": true }, "@datastax/astra-db-ts": { "optional": true }, "@elastic/elasticsearch": { "optional": true }, "@getmetal/metal-sdk": { "optional": true }, "@getzep/zep-cloud": { "optional": true }, "@getzep/zep-js": { "optional": true }, "@gomomento/sdk": { "optional": true }, "@gomomento/sdk-core": { "optional": true }, "@google-ai/generativelanguage": { "optional": true }, "@google-cloud/storage": { "optional": true }, "@gradientai/nodejs-sdk": { "optional": true }, "@huggingface/inference": { "optional": true }, "@lancedb/lancedb": { "optional": true }, "@layerup/layerup-security": { "optional": true }, "@libsql/client": { "optional": true }, "@mendable/firecrawl-js": { "optional": true }, "@mlc-ai/web-llm": { "optional": true }, "@mozilla/readability": { "optional": true }, "@neondatabase/serverless": { "optional": true }, "@notionhq/client": { "optional": true }, "@opensearch-project/opensearch": { "optional": true }, "@pinecone-database/pinecone": { "optional": true }, "@planetscale/database": { "optional": true }, "@premai/prem-sdk": { "optional": true }, "@qdrant/js-client-rest": { "optional": true }, "@raycast/api": { "optional": true }, "@rockset/client": { "optional": true }, "@smithy/eventstream-codec": { "optional": true }, "@smithy/protocol-http": { "optional": true }, "@smithy/signature-v4": { "optional": true }, "@smithy/util-utf8": { "optional": true }, "@spider-cloud/spider-client": { "optional": true }, "@supabase/supabase-js": { "optional": true }, "@tensorflow-models/universal-sentence-encoder": { "optional": true }, "@tensorflow/tfjs-converter": { "optional": true }, "@tensorflow/tfjs-core": { "optional": true }, "@upstash/ratelimit": { "optional": true }, "@upstash/redis": { "optional": true }, "@upstash/vector": { "optional": true }, "@vercel/kv": { "optional": true }, "@vercel/postgres": { "optional": true }, "@writerai/writer-sdk": { "optional": true }, "@xata.io/client": { "optional": true }, "@xenova/transformers": { "optional": true }, "@zilliz/milvus2-sdk-node": { "optional": true }, "apify-client": { "optional": true }, "assemblyai": { "optional": true }, "better-sqlite3": { "optional": true }, "cassandra-driver": { "optional": true }, "cborg": { "optional": true }, "cheerio": { "optional": true }, "chromadb": { "optional": true }, "closevector-common": { "optional": true }, "closevector-node": { "optional": true }, "closevector-web": { "optional": true }, "cohere-ai": { "optional": true }, "convex": { "optional": true }, "crypto-js": { "optional": true }, "d3-dsv": { "optional": true }, "discord.js": { "optional": true }, "dria": { "optional": true }, "duck-duck-scrape": { "optional": true }, "epub2": { "optional": true }, "faiss-node": { "optional": true }, "firebase-admin": { "optional": true }, "google-auth-library": { "optional": true }, "googleapis": { "optional": true }, "hnswlib-node": { "optional": true }, "html-to-text": { "optional": true }, "ignore": { "optional": true }, "interface-datastore": { "optional": true }, "ioredis": { "optional": true }, "it-all": { "optional": true }, "jsdom": { "optional": true }, "jsonwebtoken": { "optional": true }, "llmonitor": { "optional": true }, "lodash": { "optional": true }, "lunary": { "optional": true }, "mammoth": { "optional": true }, "mongodb": { "optional": true }, "mysql2": { "optional": true }, "neo4j-driver": { "optional": true }, "notion-to-md": { "optional": true }, "officeparser": { "optional": true }, "pdf-parse": { "optional": true }, "pg": { "optional": true }, "pg-copy-streams": { "optional": true }, "pickleparser": { "optional": true }, "playwright": { "optional": true }, "portkey-ai": { "optional": true }, "puppeteer": { "optional": true }, "pyodide": { "optional": true }, "redis": { "optional": true }, "replicate": { "optional": true }, "sonix-speech-recognition": { "optional": true }, "srt-parser-2": { "optional": true }, "typeorm": { "optional": true }, "typesense": { "optional": true }, "usearch": { "optional": true }, "voy-search": { "optional": true }, "weaviate-ts-client": { "optional": true }, "web-auth-library": { "optional": true }, "ws": { "optional": true }, "youtube-transcript": { "optional": true }, "youtubei.js": { "optional": true } }, "publishConfig": { "access": "public" }, "exports": { "./load": { "types": { "import": "./load.d.ts", "require": "./load.d.cts", "default": "./load.d.ts" }, "import": "./load.js", "require": "./load.cjs" }, "./load/serializable": { "types": { "import": "./load/serializable.d.ts", "require": "./load/serializable.d.cts", "default": "./load/serializable.d.ts" }, "import": "./load/serializable.js", "require": "./load/serializable.cjs" }, "./tools/aiplugin": { "types": { "import": "./tools/aiplugin.d.ts", "require": "./tools/aiplugin.d.cts", "default": "./tools/aiplugin.d.ts" }, "import": "./tools/aiplugin.js", "require": "./tools/aiplugin.cjs" }, "./tools/aws_lambda": { "types": { "import": "./tools/aws_lambda.d.ts", "require": "./tools/aws_lambda.d.cts", "default": "./tools/aws_lambda.d.ts" }, "import": "./tools/aws_lambda.js", "require": "./tools/aws_lambda.cjs" }, "./tools/aws_sfn": { "types": { "import": "./tools/aws_sfn.d.ts", "require": "./tools/aws_sfn.d.cts", "default": "./tools/aws_sfn.d.ts" }, "import": "./tools/aws_sfn.js", "require": "./tools/aws_sfn.cjs" }, "./tools/bingserpapi": { "types": { "import": "./tools/bingserpapi.d.ts", "require": "./tools/bingserpapi.d.cts", "default": "./tools/bingserpapi.d.ts" }, "import": "./tools/bingserpapi.js", "require": "./tools/bingserpapi.cjs" }, "./tools/brave_search": { "types": { "import": "./tools/brave_search.d.ts", "require": "./tools/brave_search.d.cts", "default": "./tools/brave_search.d.ts" }, "import": "./tools/brave_search.js", "require": "./tools/brave_search.cjs" }, "./tools/duckduckgo_search": { "types": { "import": "./tools/duckduckgo_search.d.ts", "require": "./tools/duckduckgo_search.d.cts", "default": "./tools/duckduckgo_search.d.ts" }, "import": "./tools/duckduckgo_search.js", "require": "./tools/duckduckgo_search.cjs" }, "./tools/calculator": { "types": { "import": "./tools/calculator.d.ts", "require": "./tools/calculator.d.cts", "default": "./tools/calculator.d.ts" }, "import": "./tools/calculator.js", "require": "./tools/calculator.cjs" }, "./tools/connery": { "types": { "import": "./tools/connery.d.ts", "require": "./tools/connery.d.cts", "default": "./tools/connery.d.ts" }, "import": "./tools/connery.js", "require": "./tools/connery.cjs" }, "./tools/dadjokeapi": { "types": { "import": "./tools/dadjokeapi.d.ts", "require": "./tools/dadjokeapi.d.cts", "default": "./tools/dadjokeapi.d.ts" }, "import": "./tools/dadjokeapi.js", "require": "./tools/dadjokeapi.cjs" }, "./tools/discord": { "types": { "import": "./tools/discord.d.ts", "require": "./tools/discord.d.cts", "default": "./tools/discord.d.ts" }, "import": "./tools/discord.js", "require": "./tools/discord.cjs" }, "./tools/dynamic": { "types": { "import": "./tools/dynamic.d.ts", "require": "./tools/dynamic.d.cts", "default": "./tools/dynamic.d.ts" }, "import": "./tools/dynamic.js", "require": "./tools/dynamic.cjs" }, "./tools/dataforseo_api_search": { "types": { "import": "./tools/dataforseo_api_search.d.ts", "require": "./tools/dataforseo_api_search.d.cts", "default": "./tools/dataforseo_api_search.d.ts" }, "import": "./tools/dataforseo_api_search.js", "require": "./tools/dataforseo_api_search.cjs" }, "./tools/gmail": { "types": { "import": "./tools/gmail.d.ts", "require": "./tools/gmail.d.cts", "default": "./tools/gmail.d.ts" }, "import": "./tools/gmail.js", "require": "./tools/gmail.cjs" }, "./tools/google_calendar": { "types": { "import": "./tools/google_calendar.d.ts", "require": "./tools/google_calendar.d.cts", "default": "./tools/google_calendar.d.ts" }, "import": "./tools/google_calendar.js", "require": "./tools/google_calendar.cjs" }, "./tools/google_custom_search": { "types": { "import": "./tools/google_custom_search.d.ts", "require": "./tools/google_custom_search.d.cts", "default": "./tools/google_custom_search.d.ts" }, "import": "./tools/google_custom_search.js", "require": "./tools/google_custom_search.cjs" }, "./tools/google_places": { "types": { "import": "./tools/google_places.d.ts", "require": "./tools/google_places.d.cts", "default": "./tools/google_places.d.ts" }, "import": "./tools/google_places.js", "require": "./tools/google_places.cjs" }, "./tools/google_routes": { "types": { "import": "./tools/google_routes.d.ts", "require": "./tools/google_routes.d.cts", "default": "./tools/google_routes.d.ts" }, "import": "./tools/google_routes.js", "require": "./tools/google_routes.cjs" }, "./tools/ifttt": { "types": { "import": "./tools/ifttt.d.ts", "require": "./tools/ifttt.d.cts", "default": "./tools/ifttt.d.ts" }, "import": "./tools/ifttt.js", "require": "./tools/ifttt.cjs" }, "./tools/searchapi": { "types": { "import": "./tools/searchapi.d.ts", "require": "./tools/searchapi.d.cts", "default": "./tools/searchapi.d.ts" }, "import": "./tools/searchapi.js", "require": "./tools/searchapi.cjs" }, "./tools/searxng_search": { "types": { "import": "./tools/searxng_search.d.ts", "require": "./tools/searxng_search.d.cts", "default": "./tools/searxng_search.d.ts" }, "import": "./tools/searxng_search.js", "require": "./tools/searxng_search.cjs" }, "./tools/serpapi": { "types": { "import": "./tools/serpapi.d.ts", "require": "./tools/serpapi.d.cts", "default": "./tools/serpapi.d.ts" }, "import": "./tools/serpapi.js", "require": "./tools/serpapi.cjs" }, "./tools/serper": { "types": { "import": "./tools/serper.d.ts", "require": "./tools/serper.d.cts", "default": "./tools/serper.d.ts" }, "import": "./tools/serper.js", "require": "./tools/serper.cjs" }, "./tools/stackexchange": { "types": { "import": "./tools/stackexchange.d.ts", "require": "./tools/stackexchange.d.cts", "default": "./tools/stackexchange.d.ts" }, "import": "./tools/stackexchange.js", "require": "./tools/stackexchange.cjs" }, "./tools/tavily_search": { "types": { "import": "./tools/tavily_search.d.ts", "require": "./tools/tavily_search.d.cts", "default": "./tools/tavily_search.d.ts" }, "import": "./tools/tavily_search.js", "require": "./tools/tavily_search.cjs" }, "./tools/wikipedia_query_run": { "types": { "import": "./tools/wikipedia_query_run.d.ts", "require": "./tools/wikipedia_query_run.d.cts", "default": "./tools/wikipedia_query_run.d.ts" }, "import": "./tools/wikipedia_query_run.js", "require": "./tools/wikipedia_query_run.cjs" }, "./tools/wolframalpha": { "types": { "import": "./tools/wolframalpha.d.ts", "require": "./tools/wolframalpha.d.cts", "default": "./tools/wolframalpha.d.ts" }, "import": "./tools/wolframalpha.js", "require": "./tools/wolframalpha.cjs" }, "./agents/toolkits/aws_sfn": { "types": { "import": "./agents/toolkits/aws_sfn.d.ts", "require": "./agents/toolkits/aws_sfn.d.cts", "default": "./agents/toolkits/aws_sfn.d.ts" }, "import": "./agents/toolkits/aws_sfn.js", "require": "./agents/toolkits/aws_sfn.cjs" }, "./agents/toolkits/base": { "types": { "import": "./agents/toolkits/base.d.ts", "require": "./agents/toolkits/base.d.cts", "default": "./agents/toolkits/base.d.ts" }, "import": "./agents/toolkits/base.js", "require": "./agents/toolkits/base.cjs" }, "./agents/toolkits/connery": { "types": { "import": "./agents/toolkits/connery.d.ts", "require": "./agents/toolkits/connery.d.cts", "default": "./agents/toolkits/connery.d.ts" }, "import": "./agents/toolkits/connery.js", "require": "./agents/toolkits/connery.cjs" }, "./agents/toolkits/stagehand": { "types": { "import": "./agents/toolkits/stagehand.d.ts", "require": "./agents/toolkits/stagehand.d.cts", "default": "./agents/toolkits/stagehand.d.ts" }, "import": "./agents/toolkits/stagehand.js", "require": "./agents/toolkits/stagehand.cjs" }, "./embeddings/alibaba_tongyi": { "types": { "import": "./embeddings/alibaba_tongyi.d.ts", "require": "./embeddings/alibaba_tongyi.d.cts", "default": "./embeddings/alibaba_tongyi.d.ts" }, "import": "./embeddings/alibaba_tongyi.js", "require": "./embeddings/alibaba_tongyi.cjs" }, "./embeddings/baidu_qianfan": { "types": { "import": "./embeddings/baidu_qianfan.d.ts", "require": "./embeddings/baidu_qianfan.d.cts", "default": "./embeddings/baidu_qianfan.d.ts" }, "import": "./embeddings/baidu_qianfan.js", "require": "./embeddings/baidu_qianfan.cjs" }, "./embeddings/bedrock": { "types": { "import": "./embeddings/bedrock.d.ts", "require": "./embeddings/bedrock.d.cts", "default": "./embeddings/bedrock.d.ts" }, "import": "./embeddings/bedrock.js", "require": "./embeddings/bedrock.cjs" }, "./embeddings/cloudflare_workersai": { "types": { "import": "./embeddings/cloudflare_workersai.d.ts", "require": "./embeddings/cloudflare_workersai.d.cts", "default": "./embeddings/cloudflare_workersai.d.ts" }, "import": "./embeddings/cloudflare_workersai.js", "require": "./embeddings/cloudflare_workersai.cjs" }, "./embeddings/cohere": { "types": { "import": "./embeddings/cohere.d.ts", "require": "./embeddings/cohere.d.cts", "default": "./embeddings/cohere.d.ts" }, "import": "./embeddings/cohere.js", "require": "./embeddings/cohere.cjs" }, "./embeddings/deepinfra": { "types": { "import": "./embeddings/deepinfra.d.ts", "require": "./embeddings/deepinfra.d.cts", "default": "./embeddings/deepinfra.d.ts" }, "import": "./embeddings/deepinfra.js", "require": "./embeddings/deepinfra.cjs" }, "./embeddings/fireworks": { "types": { "import": "./embeddings/fireworks.d.ts", "require": "./embeddings/fireworks.d.cts", "default": "./embeddings/fireworks.d.ts" }, "import": "./embeddings/fireworks.js", "require": "./embeddings/fireworks.cjs" }, "./embeddings/gradient_ai": { "types": { "import": "./embeddings/gradient_ai.d.ts", "require": "./embeddings/gradient_ai.d.cts", "default": "./embeddings/gradient_ai.d.ts" }, "import": "./embeddings/gradient_ai.js", "require": "./embeddings/gradient_ai.cjs" }, "./embeddings/hf": { "types": { "import": "./embeddings/hf.d.ts", "require": "./embeddings/hf.d.cts", "default": "./embeddings/hf.d.ts" }, "import": "./embeddings/hf.js", "require": "./embeddings/hf.cjs" }, "./embeddings/hf_transformers": { "types": { "import": "./embeddings/hf_transformers.d.ts", "require": "./embeddings/hf_transformers.d.cts", "default": "./embeddings/hf_transformers.d.ts" }, "import": "./embeddings/hf_transformers.js", "require": "./embeddings/hf_transformers.cjs" }, "./embeddings/ibm": { "types": { "import": "./embeddings/ibm.d.ts", "require": "./embeddings/ibm.d.cts", "default": "./embeddings/ibm.d.ts" }, "import": "./embeddings/ibm.js", "require": "./embeddings/ibm.cjs" }, "./embeddings/jina": { "types": { "import": "./embeddings/jina.d.ts", "require": "./embeddings/jina.d.cts", "default": "./embeddings/jina.d.ts" }, "import": "./embeddings/jina.js", "require": "./embeddings/jina.cjs" }, "./embeddings/llama_cpp": { "types": { "import": "./embeddings/llama_cpp.d.ts", "require": "./embeddings/llama_cpp.d.cts", "default": "./embeddings/llama_cpp.d.ts" }, "import": "./embeddings/llama_cpp.js", "require": "./embeddings/llama_cpp.cjs" }, "./embeddings/minimax": { "types": { "import": "./embeddings/minimax.d.ts", "require": "./embeddings/minimax.d.cts", "default": "./embeddings/minimax.d.ts" }, "import": "./embeddings/minimax.js", "require": "./embeddings/minimax.cjs" }, "./embeddings/ollama": { "types": { "import": "./embeddings/ollama.d.ts", "require": "./embeddings/ollama.d.cts", "default": "./embeddings/ollama.d.ts" }, "import": "./embeddings/ollama.js", "require": "./embeddings/ollama.cjs" }, "./embeddings/premai": { "types": { "import": "./embeddings/premai.d.ts", "require": "./embeddings/premai.d.cts", "default": "./embeddings/premai.d.ts" }, "import": "./embeddings/premai.js", "require": "./embeddings/premai.cjs" }, "./embeddings/tensorflow": { "types": { "import": "./embeddings/tensorflow.d.ts", "require": "./embeddings/tensorflow.d.cts", "default": "./embeddings/tensorflow.d.ts" }, "import": "./embeddings/tensorflow.js", "require": "./embeddings/tensorflow.cjs" }, "./embeddings/tencent_hunyuan": { "types": { "import": "./embeddings/tencent_hunyuan.d.ts", "require": "./embeddings/tencent_hunyuan.d.cts", "default": "./embeddings/tencent_hunyuan.d.ts" }, "import": "./embeddings/tencent_hunyuan.js", "require": "./embeddings/tencent_hunyuan.cjs" }, "./embeddings/tencent_hunyuan/web": { "types": { "import": "./embeddings/tencent_hunyuan/web.d.ts", "require": "./embeddings/tencent_hunyuan/web.d.cts", "default": "./embeddings/tencent_hunyuan/web.d.ts" }, "import": "./embeddings/tencent_hunyuan/web.js", "require": "./embeddings/tencent_hunyuan/web.cjs" }, "./embeddings/togetherai": { "types": { "import": "./embeddings/togetherai.d.ts", "require": "./embeddings/togetherai.d.cts", "default": "./embeddings/togetherai.d.ts" }, "import": "./embeddings/togetherai.js", "require": "./embeddings/togetherai.cjs" }, "./embeddings/voyage": { "types": { "import": "./embeddings/voyage.d.ts", "require": "./embeddings/voyage.d.cts", "default": "./embeddings/voyage.d.ts" }, "import": "./embeddings/voyage.js", "require": "./embeddings/voyage.cjs" }, "./embeddings/zhipuai": { "types": { "import": "./embeddings/zhipuai.d.ts", "require": "./embeddings/zhipuai.d.cts", "default": "./embeddings/zhipuai.d.ts" }, "import": "./embeddings/zhipuai.js", "require": "./embeddings/zhipuai.cjs" }, "./llms/ai21": { "types": { "import": "./llms/ai21.d.ts", "require": "./llms/ai21.d.cts", "default": "./llms/ai21.d.ts" }, "import": "./llms/ai21.js", "require": "./llms/ai21.cjs" }, "./llms/aleph_alpha": { "types": { "import": "./llms/aleph_alpha.d.ts", "require": "./llms/aleph_alpha.d.cts", "default": "./llms/aleph_alpha.d.ts" }, "import": "./llms/aleph_alpha.js", "require": "./llms/aleph_alpha.cjs" }, "./llms/arcjet": { "types": { "import": "./llms/arcjet.d.ts", "require": "./llms/arcjet.d.cts", "default": "./llms/arcjet.d.ts" }, "import": "./llms/arcjet.js", "require": "./llms/arcjet.cjs" }, "./llms/bedrock": { "types": { "import": "./llms/bedrock.d.ts", "require": "./llms/bedrock.d.cts", "default": "./llms/bedrock.d.ts" }, "import": "./llms/bedrock.js", "require": "./llms/bedrock.cjs" }, "./llms/bedrock/web": { "types": { "import": "./llms/bedrock/web.d.ts", "require": "./llms/bedrock/web.d.cts", "default": "./llms/bedrock/web.d.ts" }, "import": "./llms/bedrock/web.js", "require": "./llms/bedrock/web.cjs" }, "./llms/cloudflare_workersai": { "types": { "import": "./llms/cloudflare_workersai.d.ts", "require": "./llms/cloudflare_workersai.d.cts", "default": "./llms/cloudflare_workersai.d.ts" }, "import": "./llms/cloudflare_workersai.js", "require": "./llms/cloudflare_workersai.cjs" }, "./llms/cohere": { "types": { "import": "./llms/cohere.d.ts", "require": "./llms/cohere.d.cts", "default": "./llms/cohere.d.ts" }, "import": "./llms/cohere.js", "require": "./llms/cohere.cjs" }, "./llms/deepinfra": { "types": { "import": "./llms/deepinfra.d.ts", "require": "./llms/deepinfra.d.cts", "default": "./llms/deepinfra.d.ts" }, "import": "./llms/deepinfra.js", "require": "./llms/deepinfra.cjs" }, "./llms/fireworks": { "types": { "import": "./llms/fireworks.d.ts", "require": "./llms/fireworks.d.cts", "default": "./llms/fireworks.d.ts" }, "import": "./llms/fireworks.js", "require": "./llms/fireworks.cjs" }, "./llms/friendli": { "types": { "import": "./llms/friendli.d.ts", "require": "./llms/friendli.d.cts", "default": "./llms/friendli.d.ts" }, "import": "./llms/friendli.js", "require": "./llms/friendli.cjs" }, "./llms/gradient_ai": { "types": { "import": "./llms/gradient_ai.d.ts", "require": "./llms/gradient_ai.d.cts", "default": "./llms/gradient_ai.d.ts" }, "import": "./llms/gradient_ai.js", "require": "./llms/gradient_ai.cjs" }, "./llms/hf": { "types": { "import": "./llms/hf.d.ts", "require": "./llms/hf.d.cts", "default": "./llms/hf.d.ts" }, "import": "./llms/hf.js", "require": "./llms/hf.cjs" }, "./llms/ibm": { "types": { "import": "./llms/ibm.d.ts", "require": "./llms/ibm.d.cts", "default": "./llms/ibm.d.ts" }, "import": "./llms/ibm.js", "require": "./llms/ibm.cjs" }, "./llms/llama_cpp": { "types": { "import": "./llms/llama_cpp.d.ts", "require": "./llms/llama_cpp.d.cts", "default": "./llms/llama_cpp.d.ts" }, "import": "./llms/llama_cpp.js", "require": "./llms/llama_cpp.cjs" }, "./llms/ollama": { "types": { "import": "./llms/ollama.d.ts", "require": "./llms/ollama.d.cts", "default": "./llms/ollama.d.ts" }, "import": "./llms/ollama.js", "require": "./llms/ollama.cjs" }, "./llms/portkey": { "types": { "import": "./llms/portkey.d.ts", "require": "./llms/portkey.d.cts", "default": "./llms/portkey.d.ts" }, "import": "./llms/portkey.js", "require": "./llms/portkey.cjs" }, "./llms/raycast": { "types": { "import": "./llms/raycast.d.ts", "require": "./llms/raycast.d.cts", "default": "./llms/raycast.d.ts" }, "import": "./llms/raycast.js", "require": "./llms/raycast.cjs" }, "./llms/replicate": { "types": { "import": "./llms/replicate.d.ts", "require": "./llms/replicate.d.cts", "default": "./llms/replicate.d.ts" }, "import": "./llms/replicate.js", "require": "./llms/replicate.cjs" }, "./llms/sagemaker_endpoint": { "types": { "import": "./llms/sagemaker_endpoint.d.ts", "require": "./llms/sagemaker_endpoint.d.cts", "default": "./llms/sagemaker_endpoint.d.ts" }, "import": "./llms/sagemaker_endpoint.js", "require": "./llms/sagemaker_endpoint.cjs" }, "./llms/togetherai": { "types": { "import": "./llms/togetherai.d.ts", "require": "./llms/togetherai.d.cts", "default": "./llms/togetherai.d.ts" }, "import": "./llms/togetherai.js", "require": "./llms/togetherai.cjs" }, "./llms/watsonx_ai": { "types": { "import": "./llms/watsonx_ai.d.ts", "require": "./llms/watsonx_ai.d.cts", "default": "./llms/watsonx_ai.d.ts" }, "import": "./llms/watsonx_ai.js", "require": "./llms/watsonx_ai.cjs" }, "./llms/writer": { "types": { "import": "./llms/writer.d.ts", "require": "./llms/writer.d.cts", "default": "./llms/writer.d.ts" }, "import": "./llms/writer.js", "require": "./llms/writer.cjs" }, "./llms/yandex": { "types": { "import": "./llms/yandex.d.ts", "require": "./llms/yandex.d.cts", "default": "./llms/yandex.d.ts" }, "import": "./llms/yandex.js", "require": "./llms/yandex.cjs" }, "./llms/layerup_security": { "types": { "import": "./llms/layerup_security.d.ts", "require": "./llms/layerup_security.d.cts", "default": "./llms/layerup_security.d.ts" }, "import": "./llms/layerup_security.js", "require": "./llms/layerup_security.cjs" }, "./vectorstores/analyticdb": { "types": { "import": "./vectorstores/analyticdb.d.ts", "require": "./vectorstores/analyticdb.d.cts", "default": "./vectorstores/analyticdb.d.ts" }, "import": "./vectorstores/analyticdb.js", "require": "./vectorstores/analyticdb.cjs" }, "./vectorstores/astradb": { "types": { "import": "./vectorstores/astradb.d.ts", "require": "./vectorstores/astradb.d.cts", "default": "./vectorstores/astradb.d.ts" }, "import": "./vectorstores/astradb.js", "require": "./vectorstores/astradb.cjs" }, "./vectorstores/azure_aisearch": { "types": { "import": "./vectorstores/azure_aisearch.d.ts", "require": "./vectorstores/azure_aisearch.d.cts", "default": "./vectorstores/azure_aisearch.d.ts" }, "import": "./vectorstores/azure_aisearch.js", "require": "./vectorstores/azure_aisearch.cjs" }, "./vectorstores/azure_cosmosdb": { "types": { "import": "./vectorstores/azure_cosmosdb.d.ts", "require": "./vectorstores/azure_cosmosdb.d.cts", "default": "./vectorstores/azure_cosmosdb.d.ts" }, "import": "./vectorstores/azure_cosmosdb.js", "require": "./vectorstores/azure_cosmosdb.cjs" }, "./vectorstores/cassandra": { "types": { "import": "./vectorstores/cassandra.d.ts", "require": "./vectorstores/cassandra.d.cts", "default": "./vectorstores/cassandra.d.ts" }, "import": "./vectorstores/cassandra.js", "require": "./vectorstores/cassandra.cjs" }, "./vectorstores/chroma": { "types": { "import": "./vectorstores/chroma.d.ts", "require": "./vectorstores/chroma.d.cts", "default": "./vectorstores/chroma.d.ts" }, "import": "./vectorstores/chroma.js", "require": "./vectorstores/chroma.cjs" }, "./vectorstores/clickhouse": { "types": { "import": "./vectorstores/clickhouse.d.ts", "require": "./vectorstores/clickhouse.d.cts", "default": "./vectorstores/clickhouse.d.ts" }, "import": "./vectorstores/clickhouse.js", "require": "./vectorstores/clickhouse.cjs" }, "./vectorstores/closevector/node": { "types": { "import": "./vectorstores/closevector/node.d.ts", "require": "./vectorstores/closevector/node.d.cts", "default": "./vectorstores/closevector/node.d.ts" }, "import": "./vectorstores/closevector/node.js", "require": "./vectorstores/closevector/node.cjs" }, "./vectorstores/closevector/web": { "types": { "import": "./vectorstores/closevector/web.d.ts", "require": "./vectorstores/closevector/web.d.cts", "default": "./vectorstores/closevector/web.d.ts" }, "import": "./vectorstores/closevector/web.js", "require": "./vectorstores/closevector/web.cjs" }, "./vectorstores/cloudflare_vectorize": { "types": { "import": "./vectorstores/cloudflare_vectorize.d.ts", "require": "./vectorstores/cloudflare_vectorize.d.cts", "default": "./vectorstores/cloudflare_vectorize.d.ts" }, "import": "./vectorstores/cloudflare_vectorize.js", "require": "./vectorstores/cloudflare_vectorize.cjs" }, "./vectorstores/convex": { "types": { "import": "./vectorstores/convex.d.ts", "require": "./vectorstores/convex.d.cts", "default": "./vectorstores/convex.d.ts" }, "import": "./vectorstores/convex.js", "require": "./vectorstores/convex.cjs" }, "./vectorstores/couchbase": { "types": { "import": "./vectorstores/couchbase.d.ts", "require": "./vectorstores/couchbase.d.cts", "default": "./vectorstores/couchbase.d.ts" }, "import": "./vectorstores/couchbase.js", "require": "./vectorstores/couchbase.cjs" }, "./vectorstores/elasticsearch": { "types": { "import": "./vectorstores/elasticsearch.d.ts", "require": "./vectorstores/elasticsearch.d.cts", "default": "./vectorstores/elasticsearch.d.ts" }, "import": "./vectorstores/elasticsearch.js", "require": "./vectorstores/elasticsearch.cjs" }, "./vectorstores/faiss": { "types": { "import": "./vectorstores/faiss.d.ts", "require": "./vectorstores/faiss.d.cts", "default": "./vectorstores/faiss.d.ts" }, "import": "./vectorstores/faiss.js", "require": "./vectorstores/faiss.cjs" }, "./vectorstores/googlevertexai": { "types": { "import": "./vectorstores/googlevertexai.d.ts", "require": "./vectorstores/googlevertexai.d.cts", "default": "./vectorstores/googlevertexai.d.ts" }, "import": "./vectorstores/googlevertexai.js", "require": "./vectorstores/googlevertexai.cjs" }, "./vectorstores/hnswlib": { "types": { "import": "./vectorstores/hnswlib.d.ts", "require": "./vectorstores/hnswlib.d.cts", "default": "./vectorstores/hnswlib.d.ts" }, "import": "./vectorstores/hnswlib.js", "require": "./vectorstores/hnswlib.cjs" }, "./vectorstores/hanavector": { "types": { "import": "./vectorstores/hanavector.d.ts", "require": "./vectorstores/hanavector.d.cts", "default": "./vectorstores/hanavector.d.ts" }, "import": "./vectorstores/hanavector.js", "require": "./vectorstores/hanavector.cjs" }, "./vectorstores/lancedb": { "types": { "import": "./vectorstores/lancedb.d.ts", "require": "./vectorstores/lancedb.d.cts", "default": "./vectorstores/lancedb.d.ts" }, "import": "./vectorstores/lancedb.js", "require": "./vectorstores/lancedb.cjs" }, "./vectorstores/libsql": { "types": { "import": "./vectorstores/libsql.d.ts", "require": "./vectorstores/libsql.d.cts", "default": "./vectorstores/libsql.d.ts" }, "import": "./vectorstores/libsql.js", "require": "./vectorstores/libsql.cjs" }, "./vectorstores/milvus": { "types": { "import": "./vectorstores/milvus.d.ts", "require": "./vectorstores/milvus.d.cts", "default": "./vectorstores/milvus.d.ts" }, "import": "./vectorstores/milvus.js", "require": "./vectorstores/milvus.cjs" }, "./vectorstores/momento_vector_index": { "types": { "import": "./vectorstores/momento_vector_index.d.ts", "require": "./vectorstores/momento_vector_index.d.cts", "default": "./vectorstores/momento_vector_index.d.ts" }, "import": "./vectorstores/momento_vector_index.js", "require": "./vectorstores/momento_vector_index.cjs" }, "./vectorstores/mongodb_atlas": { "types": { "import": "./vectorstores/mongodb_atlas.d.ts", "require": "./vectorstores/mongodb_atlas.d.cts", "default": "./vectorstores/mongodb_atlas.d.ts" }, "import": "./vectorstores/mongodb_atlas.js", "require": "./vectorstores/mongodb_atlas.cjs" }, "./vectorstores/myscale": { "types": { "import": "./vectorstores/myscale.d.ts", "require": "./vectorstores/myscale.d.cts", "default": "./vectorstores/myscale.d.ts" }, "import": "./vectorstores/myscale.js", "require": "./vectorstores/myscale.cjs" }, "./vectorstores/neo4j_vector": { "types": { "import": "./vectorstores/neo4j_vector.d.ts", "require": "./vectorstores/neo4j_vector.d.cts", "default": "./vectorstores/neo4j_vector.d.ts" }, "import": "./vectorstores/neo4j_vector.js", "require": "./vectorstores/neo4j_vector.cjs" }, "./vectorstores/neon": { "types": { "import": "./vectorstores/neon.d.ts", "require": "./vectorstores/neon.d.cts", "default": "./vectorstores/neon.d.ts" }, "import": "./vectorstores/neon.js", "require": "./vectorstores/neon.cjs" }, "./vectorstores/opensearch": { "types": { "import": "./vectorstores/opensearch.d.ts", "require": "./vectorstores/opensearch.d.cts", "default": "./vectorstores/opensearch.d.ts" }, "import": "./vectorstores/opensearch.js", "require": "./vectorstores/opensearch.cjs" }, "./vectorstores/pgvector": { "types": { "import": "./vectorstores/pgvector.d.ts", "require": "./vectorstores/pgvector.d.cts", "default": "./vectorstores/pgvector.d.ts" }, "import": "./vectorstores/pgvector.js", "require": "./vectorstores/pgvector.cjs" }, "./vectorstores/pinecone": { "types": { "import": "./vectorstores/pinecone.d.ts", "require": "./vectorstores/pinecone.d.cts", "default": "./vectorstores/pinecone.d.ts" }, "import": "./vectorstores/pinecone.js", "require": "./vectorstores/pinecone.cjs" }, "./vectorstores/prisma": { "types": { "import": "./vectorstores/prisma.d.ts", "require": "./vectorstores/prisma.d.cts", "default": "./vectorstores/prisma.d.ts" }, "import": "./vectorstores/prisma.js", "require": "./vectorstores/prisma.cjs" }, "./vectorstores/qdrant": { "types": { "import": "./vectorstores/qdrant.d.ts", "require": "./vectorstores/qdrant.d.cts", "default": "./vectorstores/qdrant.d.ts" }, "import": "./vectorstores/qdrant.js", "require": "./vectorstores/qdrant.cjs" }, "./vectorstores/redis": { "types": { "import": "./vectorstores/redis.d.ts", "require": "./vectorstores/redis.d.cts", "default": "./vectorstores/redis.d.ts" }, "import": "./vectorstores/redis.js", "require": "./vectorstores/redis.cjs" }, "./vectorstores/rockset": { "types": { "import": "./vectorstores/rockset.d.ts", "require": "./vectorstores/rockset.d.cts", "default": "./vectorstores/rockset.d.ts" }, "import": "./vectorstores/rockset.js", "require": "./vectorstores/rockset.cjs" }, "./vectorstores/singlestore": { "types": { "import": "./vectorstores/singlestore.d.ts", "require": "./vectorstores/singlestore.d.cts", "default": "./vectorstores/singlestore.d.ts" }, "import": "./vectorstores/singlestore.js", "require": "./vectorstores/singlestore.cjs" }, "./vectorstores/supabase": { "types": { "import": "./vectorstores/supabase.d.ts", "require": "./vectorstores/supabase.d.cts", "default": "./vectorstores/supabase.d.ts" }, "import": "./vectorstores/supabase.js", "require": "./vectorstores/supabase.cjs" }, "./vectorstores/tigris": { "types": { "import": "./vectorstores/tigris.d.ts", "require": "./vectorstores/tigris.d.cts", "default": "./vectorstores/tigris.d.ts" }, "import": "./vectorstores/tigris.js", "require": "./vectorstores/tigris.cjs" }, "./vectorstores/turbopuffer": { "types": { "import": "./vectorstores/turbopuffer.d.ts", "require": "./vectorstores/turbopuffer.d.cts", "default": "./vectorstores/turbopuffer.d.ts" }, "import": "./vectorstores/turbopuffer.js", "require": "./vectorstores/turbopuffer.cjs" }, "./vectorstores/typeorm": { "types": { "import": "./vectorstores/typeorm.d.ts", "require": "./vectorstores/typeorm.d.cts", "default": "./vectorstores/typeorm.d.ts" }, "import": "./vectorstores/typeorm.js", "require": "./vectorstores/typeorm.cjs" }, "./vectorstores/typesense": { "types": { "import": "./vectorstores/typesense.d.ts", "require": "./vectorstores/typesense.d.cts", "default": "./vectorstores/typesense.d.ts" }, "import": "./vectorstores/typesense.js", "require": "./vectorstores/typesense.cjs" }, "./vectorstores/upstash": { "types": { "import": "./vectorstores/upstash.d.ts", "require": "./vectorstores/upstash.d.cts", "default": "./vectorstores/upstash.d.ts" }, "import": "./vectorstores/upstash.js", "require": "./vectorstores/upstash.cjs" }, "./vectorstores/usearch": { "types": { "import": "./vectorstores/usearch.d.ts", "require": "./vectorstores/usearch.d.cts", "default": "./vectorstores/usearch.d.ts" }, "import": "./vectorstores/usearch.js", "require": "./vectorstores/usearch.cjs" }, "./vectorstores/vectara": { "types": { "import": "./vectorstores/vectara.d.ts", "require": "./vectorstores/vectara.d.cts", "default": "./vectorstores/vectara.d.ts" }, "import": "./vectorstores/vectara.js", "require": "./vectorstores/vectara.cjs" }, "./vectorstores/vercel_postgres": { "types": { "import": "./vectorstores/vercel_postgres.d.ts", "require": "./vectorstores/vercel_postgres.d.cts", "default": "./vectorstores/vercel_postgres.d.ts" }, "import": "./vectorstores/vercel_postgres.js", "require": "./vectorstores/vercel_postgres.cjs" }, "./vectorstores/voy": { "types": { "import": "./vectorstores/voy.d.ts", "require": "./vectorstores/voy.d.cts", "default": "./vectorstores/voy.d.ts" }, "import": "./vectorstores/voy.js", "require": "./vectorstores/voy.cjs" }, "./vectorstores/weaviate": { "types": { "import": "./vectorstores/weaviate.d.ts", "require": "./vectorstores/weaviate.d.cts", "default": "./vectorstores/weaviate.d.ts" }, "import": "./vectorstores/weaviate.js", "require": "./vectorstores/weaviate.cjs" }, "./vectorstores/xata": { "types": { "import": "./vectorstores/xata.d.ts", "require": "./vectorstores/xata.d.cts", "default": "./vectorstores/xata.d.ts" }, "import": "./vectorstores/xata.js", "require": "./vectorstores/xata.cjs" }, "./vectorstores/zep": { "types": { "import": "./vectorstores/zep.d.ts", "require": "./vectorstores/zep.d.cts", "default": "./vectorstores/zep.d.ts" }, "import": "./vectorstores/zep.js", "require": "./vectorstores/zep.cjs" }, "./vectorstores/zep_cloud": { "types": { "import": "./vectorstores/zep_cloud.d.ts", "require": "./vectorstores/zep_cloud.d.cts", "default": "./vectorstores/zep_cloud.d.ts" }, "import": "./vectorstores/zep_cloud.js", "require": "./vectorstores/zep_cloud.cjs" }, "./chat_models/alibaba_tongyi": { "types": { "import": "./chat_models/alibaba_tongyi.d.ts", "require": "./chat_models/alibaba_tongyi.d.cts", "default": "./chat_models/alibaba_tongyi.d.ts" }, "import": "./chat_models/alibaba_tongyi.js", "require": "./chat_models/alibaba_tongyi.cjs" }, "./chat_models/arcjet": { "types": { "import": "./chat_models/arcjet.d.ts", "require": "./chat_models/arcjet.d.cts", "default": "./chat_models/arcjet.d.ts" }, "import": "./chat_models/arcjet.js", "require": "./chat_models/arcjet.cjs" }, "./chat_models/baiduwenxin": { "types": { "import": "./chat_models/baiduwenxin.d.ts", "require": "./chat_models/baiduwenxin.d.cts", "default": "./chat_models/baiduwenxin.d.ts" }, "import": "./chat_models/baiduwenxin.js", "require": "./chat_models/baiduwenxin.cjs" }, "./chat_models/bedrock": { "types": { "import": "./chat_models/bedrock.d.ts", "require": "./chat_models/bedrock.d.cts", "default": "./chat_models/bedrock.d.ts" }, "import": "./chat_models/bedrock.js", "require": "./chat_models/bedrock.cjs" }, "./chat_models/bedrock/web": { "types": { "import": "./chat_models/bedrock/web.d.ts", "require": "./chat_models/bedrock/web.d.cts", "default": "./chat_models/bedrock/web.d.ts" }, "import": "./chat_models/bedrock/web.js", "require": "./chat_models/bedrock/web.cjs" }, "./chat_models/cloudflare_workersai": { "types": { "import": "./chat_models/cloudflare_workersai.d.ts", "require": "./chat_models/cloudflare_workersai.d.cts", "default": "./chat_models/cloudflare_workersai.d.ts" }, "import": "./chat_models/cloudflare_workersai.js", "require": "./chat_models/cloudflare_workersai.cjs" }, "./chat_models/deepinfra": { "types": { "import": "./chat_models/deepinfra.d.ts", "require": "./chat_models/deepinfra.d.cts", "default": "./chat_models/deepinfra.d.ts" }, "import": "./chat_models/deepinfra.js", "require": "./chat_models/deepinfra.cjs" }, "./chat_models/fireworks": { "types": { "import": "./chat_models/fireworks.d.ts", "require": "./chat_models/fireworks.d.cts", "default": "./chat_models/fireworks.d.ts" }, "import": "./chat_models/fireworks.js", "require": "./chat_models/fireworks.cjs" }, "./chat_models/friendli": { "types": { "import": "./chat_models/friendli.d.ts", "require": "./chat_models/friendli.d.cts", "default": "./chat_models/friendli.d.ts" }, "import": "./chat_models/friendli.js", "require": "./chat_models/friendli.cjs" }, "./chat_models/ibm": { "types": { "import": "./chat_models/ibm.d.ts", "require": "./chat_models/ibm.d.cts", "default": "./chat_models/ibm.d.ts" }, "import": "./chat_models/ibm.js", "require": "./chat_models/ibm.cjs" }, "./chat_models/iflytek_xinghuo": { "types": { "import": "./chat_models/iflytek_xinghuo.d.ts", "require": "./chat_models/iflytek_xinghuo.d.cts", "default": "./chat_models/iflytek_xinghuo.d.ts" }, "import": "./chat_models/iflytek_xinghuo.js", "require": "./chat_models/iflytek_xinghuo.cjs" }, "./chat_models/iflytek_xinghuo/web": { "types": { "import": "./chat_models/iflytek_xinghuo/web.d.ts", "require": "./chat_models/iflytek_xinghuo/web.d.cts", "default": "./chat_models/iflytek_xinghuo/web.d.ts" }, "import": "./chat_models/iflytek_xinghuo/web.js", "require": "./chat_models/iflytek_xinghuo/web.cjs" }, "./chat_models/llama_cpp": { "types": { "import": "./chat_models/llama_cpp.d.ts", "require": "./chat_models/llama_cpp.d.cts", "default": "./chat_models/llama_cpp.d.ts" }, "import": "./chat_models/llama_cpp.js", "require": "./chat_models/llama_cpp.cjs" }, "./chat_models/minimax": { "types": { "import": "./chat_models/minimax.d.ts", "require": "./chat_models/minimax.d.cts", "default": "./chat_models/minimax.d.ts" }, "import": "./chat_models/minimax.js", "require": "./chat_models/minimax.cjs" }, "./chat_models/moonshot": { "types": { "import": "./chat_models/moonshot.d.ts", "require": "./chat_models/moonshot.d.cts", "default": "./chat_models/moonshot.d.ts" }, "import": "./chat_models/moonshot.js", "require": "./chat_models/moonshot.cjs" }, "./chat_models/novita": { "types": { "import": "./chat_models/novita.d.ts", "require": "./chat_models/novita.d.cts", "default": "./chat_models/novita.d.ts" }, "import": "./chat_models/novita.js", "require": "./chat_models/novita.cjs" }, "./chat_models/ollama": { "types": { "import": "./chat_models/ollama.d.ts", "require": "./chat_models/ollama.d.cts", "default": "./chat_models/ollama.d.ts" }, "import": "./chat_models/ollama.js", "require": "./chat_models/ollama.cjs" }, "./chat_models/portkey": { "types": { "import": "./chat_models/portkey.d.ts", "require": "./chat_models/portkey.d.cts", "default": "./chat_models/portkey.d.ts" }, "import": "./chat_models/portkey.js", "require": "./chat_models/portkey.cjs" }, "./chat_models/premai": { "types": { "import": "./chat_models/premai.d.ts", "require": "./chat_models/premai.d.cts", "default": "./chat_models/premai.d.ts" }, "import": "./chat_models/premai.js", "require": "./chat_models/premai.cjs" }, "./chat_models/tencent_hunyuan": { "types": { "import": "./chat_models/tencent_hunyuan.d.ts", "require": "./chat_models/tencent_hunyuan.d.cts", "default": "./chat_models/tencent_hunyuan.d.ts" }, "import": "./chat_models/tencent_hunyuan.js", "require": "./chat_models/tencent_hunyuan.cjs" }, "./chat_models/tencent_hunyuan/web": { "types": { "import": "./chat_models/tencent_hunyuan/web.d.ts", "require": "./chat_models/tencent_hunyuan/web.d.cts", "default": "./chat_models/tencent_hunyuan/web.d.ts" }, "import": "./chat_models/tencent_hunyuan/web.js", "require": "./chat_models/tencent_hunyuan/web.cjs" }, "./chat_models/togetherai": { "types": { "import": "./chat_models/togetherai.d.ts", "require": "./chat_models/togetherai.d.cts", "default": "./chat_models/togetherai.d.ts" }, "import": "./chat_models/togetherai.js", "require": "./chat_models/togetherai.cjs" }, "./chat_models/webllm": { "types": { "import": "./chat_models/webllm.d.ts", "require": "./chat_models/webllm.d.cts", "default": "./chat_models/webllm.d.ts" }, "import": "./chat_models/webllm.js", "require": "./chat_models/webllm.cjs" }, "./chat_models/yandex": { "types": { "import": "./chat_models/yandex.d.ts", "require": "./chat_models/yandex.d.cts", "default": "./chat_models/yandex.d.ts" }, "import": "./chat_models/yandex.js", "require": "./chat_models/yandex.cjs" }, "./chat_models/zhipuai": { "types": { "import": "./chat_models/zhipuai.d.ts", "require": "./chat_models/zhipuai.d.cts", "default": "./chat_models/zhipuai.d.ts" }, "import": "./chat_models/zhipuai.js", "require": "./chat_models/zhipuai.cjs" }, "./callbacks/handlers/llmonitor": { "types": { "import": "./callbacks/handlers/llmonitor.d.ts", "require": "./callbacks/handlers/llmonitor.d.cts", "default": "./callbacks/handlers/llmonitor.d.ts" }, "import": "./callbacks/handlers/llmonitor.js", "require": "./callbacks/handlers/llmonitor.cjs" }, "./callbacks/handlers/lunary": { "types": { "import": "./callbacks/handlers/lunary.d.ts", "require": "./callbacks/handlers/lunary.d.cts", "default": "./callbacks/handlers/lunary.d.ts" }, "import": "./callbacks/handlers/lunary.js", "require": "./callbacks/handlers/lunary.cjs" }, "./callbacks/handlers/upstash_ratelimit": { "types": { "import": "./callbacks/handlers/upstash_ratelimit.d.ts", "require": "./callbacks/handlers/upstash_ratelimit.d.cts", "default": "./callbacks/handlers/upstash_ratelimit.d.ts" }, "import": "./callbacks/handlers/upstash_ratelimit.js", "require": "./callbacks/handlers/upstash_ratelimit.cjs" }, "./retrievers/amazon_kendra": { "types": { "import": "./retrievers/amazon_kendra.d.ts", "require": "./retrievers/amazon_kendra.d.cts", "default": "./retrievers/amazon_kendra.d.ts" }, "import": "./retrievers/amazon_kendra.js", "require": "./retrievers/amazon_kendra.cjs" }, "./retrievers/amazon_knowledge_base": { "types": { "import": "./retrievers/amazon_knowledge_base.d.ts", "require": "./retrievers/amazon_knowledge_base.d.cts", "default": "./retrievers/amazon_knowledge_base.d.ts" }, "import": "./retrievers/amazon_knowledge_base.js", "require": "./retrievers/amazon_knowledge_base.cjs" }, "./retrievers/bm25": { "types": { "import": "./retrievers/bm25.d.ts", "require": "./retrievers/bm25.d.cts", "default": "./retrievers/bm25.d.ts" }, "import": "./retrievers/bm25.js", "require": "./retrievers/bm25.cjs" }, "./retrievers/chaindesk": { "types": { "import": "./retrievers/chaindesk.d.ts", "require": "./retrievers/chaindesk.d.cts", "default": "./retrievers/chaindesk.d.ts" }, "import": "./retrievers/chaindesk.js", "require": "./retrievers/chaindesk.cjs" }, "./retrievers/databerry": { "types": { "import": "./retrievers/databerry.d.ts", "require": "./retrievers/databerry.d.cts", "default": "./retrievers/databerry.d.ts" }, "import": "./retrievers/databerry.js", "require": "./retrievers/databerry.cjs" }, "./retrievers/dria": { "types": { "import": "./retrievers/dria.d.ts", "require": "./retrievers/dria.d.cts", "default": "./retrievers/dria.d.ts" }, "import": "./retrievers/dria.js", "require": "./retrievers/dria.cjs" }, "./retrievers/metal": { "types": { "import": "./retrievers/metal.d.ts", "require": "./retrievers/metal.d.cts", "default": "./retrievers/metal.d.ts" }, "import": "./retrievers/metal.js", "require": "./retrievers/metal.cjs" }, "./retrievers/remote": { "types": { "import": "./retrievers/remote.d.ts", "require": "./retrievers/remote.d.cts", "default": "./retrievers/remote.d.ts" }, "import": "./retrievers/remote.js", "require": "./retrievers/remote.cjs" }, "./retrievers/supabase": { "types": { "import": "./retrievers/supabase.d.ts", "require": "./retrievers/supabase.d.cts", "default": "./retrievers/supabase.d.ts" }, "import": "./retrievers/supabase.js", "require": "./retrievers/supabase.cjs" }, "./retrievers/tavily_search_api": { "types": { "import": "./retrievers/tavily_search_api.d.ts", "require": "./retrievers/tavily_search_api.d.cts", "default": "./retrievers/tavily_search_api.d.ts" }, "import": "./retrievers/tavily_search_api.js", "require": "./retrievers/tavily_search_api.cjs" }, "./retrievers/vectara_summary": { "types": { "import": "./retrievers/vectara_summary.d.ts", "require": "./retrievers/vectara_summary.d.cts", "default": "./retrievers/vectara_summary.d.ts" }, "import": "./retrievers/vectara_summary.js", "require": "./retrievers/vectara_summary.cjs" }, "./retrievers/vespa": { "types": { "import": "./retrievers/vespa.d.ts", "require": "./retrievers/vespa.d.cts", "default": "./retrievers/vespa.d.ts" }, "import": "./retrievers/vespa.js", "require": "./retrievers/vespa.cjs" }, "./retrievers/zep": { "types": { "import": "./retrievers/zep.d.ts", "require": "./retrievers/zep.d.cts", "default": "./retrievers/zep.d.ts" }, "import": "./retrievers/zep.js", "require": "./retrievers/zep.cjs" }, "./structured_query/chroma": { "types": { "import": "./structured_query/chroma.d.ts", "require": "./structured_query/chroma.d.cts", "default": "./structured_query/chroma.d.ts" }, "import": "./structured_query/chroma.js", "require": "./structured_query/chroma.cjs" }, "./structured_query/qdrant": { "types": { "import": "./structured_query/qdrant.d.ts", "require": "./structured_query/qdrant.d.cts", "default": "./structured_query/qdrant.d.ts" }, "import": "./structured_query/qdrant.js", "require": "./structured_query/qdrant.cjs" }, "./structured_query/supabase": { "types": { "import": "./structured_query/supabase.d.ts", "require": "./structured_query/supabase.d.cts", "default": "./structured_query/supabase.d.ts" }, "import": "./structured_query/supabase.js", "require": "./structured_query/supabase.cjs" }, "./structured_query/vectara": { "types": { "import": "./structured_query/vectara.d.ts", "require": "./structured_query/vectara.d.cts", "default": "./structured_query/vectara.d.ts" }, "import": "./structured_query/vectara.js", "require": "./structured_query/vectara.cjs" }, "./retrievers/zep_cloud": { "types": { "import": "./retrievers/zep_cloud.d.ts", "require": "./retrievers/zep_cloud.d.cts", "default": "./retrievers/zep_cloud.d.ts" }, "import": "./retrievers/zep_cloud.js", "require": "./retrievers/zep_cloud.cjs" }, "./caches/cloudflare_kv": { "types": { "import": "./caches/cloudflare_kv.d.ts", "require": "./caches/cloudflare_kv.d.cts", "default": "./caches/cloudflare_kv.d.ts" }, "import": "./caches/cloudflare_kv.js", "require": "./caches/cloudflare_kv.cjs" }, "./caches/ioredis": { "types": { "import": "./caches/ioredis.d.ts", "require": "./caches/ioredis.d.cts", "default": "./caches/ioredis.d.ts" }, "import": "./caches/ioredis.js", "require": "./caches/ioredis.cjs" }, "./caches/momento": { "types": { "import": "./caches/momento.d.ts", "require": "./caches/momento.d.cts", "default": "./caches/momento.d.ts" }, "import": "./caches/momento.js", "require": "./caches/momento.cjs" }, "./caches/upstash_redis": { "types": { "import": "./caches/upstash_redis.d.ts", "require": "./caches/upstash_redis.d.cts", "default": "./caches/upstash_redis.d.ts" }, "import": "./caches/upstash_redis.js", "require": "./caches/upstash_redis.cjs" }, "./graphs/neo4j_graph": { "types": { "import": "./graphs/neo4j_graph.d.ts", "require": "./graphs/neo4j_graph.d.cts", "default": "./graphs/neo4j_graph.d.ts" }, "import": "./graphs/neo4j_graph.js", "require": "./graphs/neo4j_graph.cjs" }, "./graphs/memgraph_graph": { "types": { "import": "./graphs/memgraph_graph.d.ts", "require": "./graphs/memgraph_graph.d.cts", "default": "./graphs/memgraph_graph.d.ts" }, "import": "./graphs/memgraph_graph.js", "require": "./graphs/memgraph_graph.cjs" }, "./document_compressors/ibm": { "types": { "import": "./document_compressors/ibm.d.ts", "require": "./document_compressors/ibm.d.cts", "default": "./document_compressors/ibm.d.ts" }, "import": "./document_compressors/ibm.js", "require": "./document_compressors/ibm.cjs" }, "./document_transformers/html_to_text": { "types": { "import": "./document_transformers/html_to_text.d.ts", "require": "./document_transformers/html_to_text.d.cts", "default": "./document_transformers/html_to_text.d.ts" }, "import": "./document_transformers/html_to_text.js", "require": "./document_transformers/html_to_text.cjs" }, "./document_transformers/mozilla_readability": { "types": { "import": "./document_transformers/mozilla_readability.d.ts", "require": "./document_transformers/mozilla_readability.d.cts", "default": "./document_transformers/mozilla_readability.d.ts" }, "import": "./document_transformers/mozilla_readability.js", "require": "./document_transformers/mozilla_readability.cjs" }, "./storage/cassandra": { "types": { "import": "./storage/cassandra.d.ts", "require": "./storage/cassandra.d.cts", "default": "./storage/cassandra.d.ts" }, "import": "./storage/cassandra.js", "require": "./storage/cassandra.cjs" }, "./storage/convex": { "types": { "import": "./storage/convex.d.ts", "require": "./storage/convex.d.cts", "default": "./storage/convex.d.ts" }, "import": "./storage/convex.js", "require": "./storage/convex.cjs" }, "./storage/ioredis": { "types": { "import": "./storage/ioredis.d.ts", "require": "./storage/ioredis.d.cts", "default": "./storage/ioredis.d.ts" }, "import": "./storage/ioredis.js", "require": "./storage/ioredis.cjs" }, "./storage/upstash_redis": { "types": { "import": "./storage/upstash_redis.d.ts", "require": "./storage/upstash_redis.d.cts", "default": "./storage/upstash_redis.d.ts" }, "import": "./storage/upstash_redis.js", "require": "./storage/upstash_redis.cjs" }, "./storage/vercel_kv": { "types": { "import": "./storage/vercel_kv.d.ts", "require": "./storage/vercel_kv.d.cts", "default": "./storage/vercel_kv.d.ts" }, "import": "./storage/vercel_kv.js", "require": "./storage/vercel_kv.cjs" }, "./stores/doc/base": { "types": { "import": "./stores/doc/base.d.ts", "require": "./stores/doc/base.d.cts", "default": "./stores/doc/base.d.ts" }, "import": "./stores/doc/base.js", "require": "./stores/doc/base.cjs" }, "./stores/doc/gcs": { "types": { "import": "./stores/doc/gcs.d.ts", "require": "./stores/doc/gcs.d.cts", "default": "./stores/doc/gcs.d.ts" }, "import": "./stores/doc/gcs.js", "require": "./stores/doc/gcs.cjs" }, "./stores/doc/in_memory": { "types": { "import": "./stores/doc/in_memory.d.ts", "require": "./stores/doc/in_memory.d.cts", "default": "./stores/doc/in_memory.d.ts" }, "import": "./stores/doc/in_memory.js", "require": "./stores/doc/in_memory.cjs" }, "./stores/message/astradb": { "types": { "import": "./stores/message/astradb.d.ts", "require": "./stores/message/astradb.d.cts", "default": "./stores/message/astradb.d.ts" }, "import": "./stores/message/astradb.js", "require": "./stores/message/astradb.cjs" }, "./stores/message/cassandra": { "types": { "import": "./stores/message/cassandra.d.ts", "require": "./stores/message/cassandra.d.cts", "default": "./stores/message/cassandra.d.ts" }, "import": "./stores/message/cassandra.js", "require": "./stores/message/cassandra.cjs" }, "./stores/message/cloudflare_d1": { "types": { "import": "./stores/message/cloudflare_d1.d.ts", "require": "./stores/message/cloudflare_d1.d.cts", "default": "./stores/message/cloudflare_d1.d.ts" }, "import": "./stores/message/cloudflare_d1.js", "require": "./stores/message/cloudflare_d1.cjs" }, "./stores/message/convex": { "types": { "import": "./stores/message/convex.d.ts", "require": "./stores/message/convex.d.cts", "default": "./stores/message/convex.d.ts" }, "import": "./stores/message/convex.js", "require": "./stores/message/convex.cjs" }, "./stores/message/dynamodb": { "types": { "import": "./stores/message/dynamodb.d.ts", "require": "./stores/message/dynamodb.d.cts", "default": "./stores/message/dynamodb.d.ts" }, "import": "./stores/message/dynamodb.js", "require": "./stores/message/dynamodb.cjs" }, "./stores/message/firestore": { "types": { "import": "./stores/message/firestore.d.ts", "require": "./stores/message/firestore.d.cts", "default": "./stores/message/firestore.d.ts" }, "import": "./stores/message/firestore.js", "require": "./stores/message/firestore.cjs" }, "./stores/message/file_system": { "types": { "import": "./stores/message/file_system.d.ts", "require": "./stores/message/file_system.d.cts", "default": "./stores/message/file_system.d.ts" }, "import": "./stores/message/file_system.js", "require": "./stores/message/file_system.cjs" }, "./stores/message/in_memory": { "types": { "import": "./stores/message/in_memory.d.ts", "require": "./stores/message/in_memory.d.cts", "default": "./stores/message/in_memory.d.ts" }, "import": "./stores/message/in_memory.js", "require": "./stores/message/in_memory.cjs" }, "./stores/message/ipfs_datastore": { "types": { "import": "./stores/message/ipfs_datastore.d.ts", "require": "./stores/message/ipfs_datastore.d.cts", "default": "./stores/message/ipfs_datastore.d.ts" }, "import": "./stores/message/ipfs_datastore.js", "require": "./stores/message/ipfs_datastore.cjs" }, "./stores/message/ioredis": { "types": { "import": "./stores/message/ioredis.d.ts", "require": "./stores/message/ioredis.d.cts", "default": "./stores/message/ioredis.d.ts" }, "import": "./stores/message/ioredis.js", "require": "./stores/message/ioredis.cjs" }, "./stores/message/momento": { "types": { "import": "./stores/message/momento.d.ts", "require": "./stores/message/momento.d.cts", "default": "./stores/message/momento.d.ts" }, "import": "./stores/message/momento.js", "require": "./stores/message/momento.cjs" }, "./stores/message/mongodb": { "types": { "import": "./stores/message/mongodb.d.ts", "require": "./stores/message/mongodb.d.cts", "default": "./stores/message/mongodb.d.ts" }, "import": "./stores/message/mongodb.js", "require": "./stores/message/mongodb.cjs" }, "./stores/message/planetscale": { "types": { "import": "./stores/message/planetscale.d.ts", "require": "./stores/message/planetscale.d.cts", "default": "./stores/message/planetscale.d.ts" }, "import": "./stores/message/planetscale.js", "require": "./stores/message/planetscale.cjs" }, "./stores/message/postgres": { "types": { "import": "./stores/message/postgres.d.ts", "require": "./stores/message/postgres.d.cts", "default": "./stores/message/postgres.d.ts" }, "import": "./stores/message/postgres.js", "require": "./stores/message/postgres.cjs" }, "./stores/message/redis": { "types": { "import": "./stores/message/redis.d.ts", "require": "./stores/message/redis.d.cts", "default": "./stores/message/redis.d.ts" }, "import": "./stores/message/redis.js", "require": "./stores/message/redis.cjs" }, "./stores/message/upstash_redis": { "types": { "import": "./stores/message/upstash_redis.d.ts", "require": "./stores/message/upstash_redis.d.cts", "default": "./stores/message/upstash_redis.d.ts" }, "import": "./stores/message/upstash_redis.js", "require": "./stores/message/upstash_redis.cjs" }, "./stores/message/xata": { "types": { "import": "./stores/message/xata.d.ts", "require": "./stores/message/xata.d.cts", "default": "./stores/message/xata.d.ts" }, "import": "./stores/message/xata.js", "require": "./stores/message/xata.cjs" }, "./stores/message/zep_cloud": { "types": { "import": "./stores/message/zep_cloud.d.ts", "require": "./stores/message/zep_cloud.d.cts", "default": "./stores/message/zep_cloud.d.ts" }, "import": "./stores/message/zep_cloud.js", "require": "./stores/message/zep_cloud.cjs" }, "./memory/chat_memory": { "types": { "import": "./memory/chat_memory.d.ts", "require": "./memory/chat_memory.d.cts", "default": "./memory/chat_memory.d.ts" }, "import": "./memory/chat_memory.js", "require": "./memory/chat_memory.cjs" }, "./memory/motorhead_memory": { "types": { "import": "./memory/motorhead_memory.d.ts", "require": "./memory/motorhead_memory.d.cts", "default": "./memory/motorhead_memory.d.ts" }, "import": "./memory/motorhead_memory.js", "require": "./memory/motorhead_memory.cjs" }, "./memory/zep": { "types": { "import": "./memory/zep.d.ts", "require": "./memory/zep.d.cts", "default": "./memory/zep.d.ts" }, "import": "./memory/zep.js", "require": "./memory/zep.cjs" }, "./memory/zep_cloud": { "types": { "import": "./memory/zep_cloud.d.ts", "require": "./memory/zep_cloud.d.cts", "default": "./memory/zep_cloud.d.ts" }, "import": "./memory/zep_cloud.js", "require": "./memory/zep_cloud.cjs" }, "./indexes/base": { "types": { "import": "./indexes/base.d.ts", "require": "./indexes/base.d.cts", "default": "./indexes/base.d.ts" }, "import": "./indexes/base.js", "require": "./indexes/base.cjs" }, "./indexes/postgres": { "types": { "import": "./indexes/postgres.d.ts", "require": "./indexes/postgres.d.cts", "default": "./indexes/postgres.d.ts" }, "import": "./indexes/postgres.js", "require": "./indexes/postgres.cjs" }, "./indexes/memory": { "types": { "import": "./indexes/memory.d.ts", "require": "./indexes/memory.d.cts", "default": "./indexes/memory.d.ts" }, "import": "./indexes/memory.js", "require": "./indexes/memory.cjs" }, "./indexes/sqlite": { "types": { "import": "./indexes/sqlite.d.ts", "require": "./indexes/sqlite.d.cts", "default": "./indexes/sqlite.d.ts" }, "import": "./indexes/sqlite.js", "require": "./indexes/sqlite.cjs" }, "./document_loaders/web/airtable": { "types": { "import": "./document_loaders/web/airtable.d.ts", "require": "./document_loaders/web/airtable.d.cts", "default": "./document_loaders/web/airtable.d.ts" }, "import": "./document_loaders/web/airtable.js", "require": "./document_loaders/web/airtable.cjs" }, "./document_loaders/web/apify_dataset": { "types": { "import": "./document_loaders/web/apify_dataset.d.ts", "require": "./document_loaders/web/apify_dataset.d.cts", "default": "./document_loaders/web/apify_dataset.d.ts" }, "import": "./document_loaders/web/apify_dataset.js", "require": "./document_loaders/web/apify_dataset.cjs" }, "./document_loaders/web/assemblyai": { "types": { "import": "./document_loaders/web/assemblyai.d.ts", "require": "./document_loaders/web/assemblyai.d.cts", "default": "./document_loaders/web/assemblyai.d.ts" }, "import": "./document_loaders/web/assemblyai.js", "require": "./document_loaders/web/assemblyai.cjs" }, "./document_loaders/web/azure_blob_storage_container": { "types": { "import": "./document_loaders/web/azure_blob_storage_container.d.ts", "require": "./document_loaders/web/azure_blob_storage_container.d.cts", "default": "./document_loaders/web/azure_blob_storage_container.d.ts" }, "import": "./document_loaders/web/azure_blob_storage_container.js", "require": "./document_loaders/web/azure_blob_storage_container.cjs" }, "./document_loaders/web/azure_blob_storage_file": { "types": { "import": "./document_loaders/web/azure_blob_storage_file.d.ts", "require": "./document_loaders/web/azure_blob_storage_file.d.cts", "default": "./document_loaders/web/azure_blob_storage_file.d.ts" }, "import": "./document_loaders/web/azure_blob_storage_file.js", "require": "./document_loaders/web/azure_blob_storage_file.cjs" }, "./document_loaders/web/browserbase": { "types": { "import": "./document_loaders/web/browserbase.d.ts", "require": "./document_loaders/web/browserbase.d.cts", "default": "./document_loaders/web/browserbase.d.ts" }, "import": "./document_loaders/web/browserbase.js", "require": "./document_loaders/web/browserbase.cjs" }, "./document_loaders/web/cheerio": { "types": { "import": "./document_loaders/web/cheerio.d.ts", "require": "./document_loaders/web/cheerio.d.cts", "default": "./document_loaders/web/cheerio.d.ts" }, "import": "./document_loaders/web/cheerio.js", "require": "./document_loaders/web/cheerio.cjs" }, "./document_loaders/web/html": { "types": { "import": "./document_loaders/web/html.d.ts", "require": "./document_loaders/web/html.d.cts", "default": "./document_loaders/web/html.d.ts" }, "import": "./document_loaders/web/html.js", "require": "./document_loaders/web/html.cjs" }, "./document_loaders/web/puppeteer": { "types": { "import": "./document_loaders/web/puppeteer.d.ts", "require": "./document_loaders/web/puppeteer.d.cts", "default": "./document_loaders/web/puppeteer.d.ts" }, "import": "./document_loaders/web/puppeteer.js", "require": "./document_loaders/web/puppeteer.cjs" }, "./document_loaders/web/playwright": { "types": { "import": "./document_loaders/web/playwright.d.ts", "require": "./document_loaders/web/playwright.d.cts", "default": "./document_loaders/web/playwright.d.ts" }, "import": "./document_loaders/web/playwright.js", "require": "./document_loaders/web/playwright.cjs" }, "./document_loaders/web/college_confidential": { "types": { "import": "./document_loaders/web/college_confidential.d.ts", "require": "./document_loaders/web/college_confidential.d.cts", "default": "./document_loaders/web/college_confidential.d.ts" }, "import": "./document_loaders/web/college_confidential.js", "require": "./document_loaders/web/college_confidential.cjs" }, "./document_loaders/web/gitbook": { "types": { "import": "./document_loaders/web/gitbook.d.ts", "require": "./document_loaders/web/gitbook.d.cts", "default": "./document_loaders/web/gitbook.d.ts" }, "import": "./document_loaders/web/gitbook.js", "require": "./document_loaders/web/gitbook.cjs" }, "./document_loaders/web/hn": { "types": { "import": "./document_loaders/web/hn.d.ts", "require": "./document_loaders/web/hn.d.cts", "default": "./document_loaders/web/hn.d.ts" }, "import": "./document_loaders/web/hn.js", "require": "./document_loaders/web/hn.cjs" }, "./document_loaders/web/imsdb": { "types": { "import": "./document_loaders/web/imsdb.d.ts", "require": "./document_loaders/web/imsdb.d.cts", "default": "./document_loaders/web/imsdb.d.ts" }, "import": "./document_loaders/web/imsdb.js", "require": "./document_loaders/web/imsdb.cjs" }, "./document_loaders/web/figma": { "types": { "import": "./document_loaders/web/figma.d.ts", "require": "./document_loaders/web/figma.d.cts", "default": "./document_loaders/web/figma.d.ts" }, "import": "./document_loaders/web/figma.js", "require": "./document_loaders/web/figma.cjs" }, "./document_loaders/web/firecrawl": { "types": { "import": "./document_loaders/web/firecrawl.d.ts", "require": "./document_loaders/web/firecrawl.d.cts", "default": "./document_loaders/web/firecrawl.d.ts" }, "import": "./document_loaders/web/firecrawl.js", "require": "./document_loaders/web/firecrawl.cjs" }, "./document_loaders/web/github": { "types": { "import": "./document_loaders/web/github.d.ts", "require": "./document_loaders/web/github.d.cts", "default": "./document_loaders/web/github.d.ts" }, "import": "./document_loaders/web/github.js", "require": "./document_loaders/web/github.cjs" }, "./document_loaders/web/taskade": { "types": { "import": "./document_loaders/web/taskade.d.ts", "require": "./document_loaders/web/taskade.d.cts", "default": "./document_loaders/web/taskade.d.ts" }, "import": "./document_loaders/web/taskade.js", "require": "./document_loaders/web/taskade.cjs" }, "./document_loaders/web/notionapi": { "types": { "import": "./document_loaders/web/notionapi.d.ts", "require": "./document_loaders/web/notionapi.d.cts", "default": "./document_loaders/web/notionapi.d.ts" }, "import": "./document_loaders/web/notionapi.js", "require": "./document_loaders/web/notionapi.cjs" }, "./document_loaders/web/pdf": { "types": { "import": "./document_loaders/web/pdf.d.ts", "require": "./document_loaders/web/pdf.d.cts", "default": "./document_loaders/web/pdf.d.ts" }, "import": "./document_loaders/web/pdf.js", "require": "./document_loaders/web/pdf.cjs" }, "./document_loaders/web/recursive_url": { "types": { "import": "./document_loaders/web/recursive_url.d.ts", "require": "./document_loaders/web/recursive_url.d.cts", "default": "./document_loaders/web/recursive_url.d.ts" }, "import": "./document_loaders/web/recursive_url.js", "require": "./document_loaders/web/recursive_url.cjs" }, "./document_loaders/web/s3": { "types": { "import": "./document_loaders/web/s3.d.ts", "require": "./document_loaders/web/s3.d.cts", "default": "./document_loaders/web/s3.d.ts" }, "import": "./document_loaders/web/s3.js", "require": "./document_loaders/web/s3.cjs" }, "./document_loaders/web/sitemap": { "types": { "import": "./document_loaders/web/sitemap.d.ts", "require": "./document_loaders/web/sitemap.d.cts", "default": "./document_loaders/web/sitemap.d.ts" }, "import": "./document_loaders/web/sitemap.js", "require": "./document_loaders/web/sitemap.cjs" }, "./document_loaders/web/sonix_audio": { "types": { "import": "./document_loaders/web/sonix_audio.d.ts", "require": "./document_loaders/web/sonix_audio.d.cts", "default": "./document_loaders/web/sonix_audio.d.ts" }, "import": "./document_loaders/web/sonix_audio.js", "require": "./document_loaders/web/sonix_audio.cjs" }, "./document_loaders/web/confluence": { "types": { "import": "./document_loaders/web/confluence.d.ts", "require": "./document_loaders/web/confluence.d.cts", "default": "./document_loaders/web/confluence.d.ts" }, "import": "./document_loaders/web/confluence.js", "require": "./document_loaders/web/confluence.cjs" }, "./document_loaders/web/couchbase": { "types": { "import": "./document_loaders/web/couchbase.d.ts", "require": "./document_loaders/web/couchbase.d.cts", "default": "./document_loaders/web/couchbase.d.ts" }, "import": "./document_loaders/web/couchbase.js", "require": "./document_loaders/web/couchbase.cjs" }, "./document_loaders/web/searchapi": { "types": { "import": "./document_loaders/web/searchapi.d.ts", "require": "./document_loaders/web/searchapi.d.cts", "default": "./document_loaders/web/searchapi.d.ts" }, "import": "./document_loaders/web/searchapi.js", "require": "./document_loaders/web/searchapi.cjs" }, "./document_loaders/web/serpapi": { "types": { "import": "./document_loaders/web/serpapi.d.ts", "require": "./document_loaders/web/serpapi.d.cts", "default": "./document_loaders/web/serpapi.d.ts" }, "import": "./document_loaders/web/serpapi.js", "require": "./document_loaders/web/serpapi.cjs" }, "./document_loaders/web/sort_xyz_blockchain": { "types": { "import": "./document_loaders/web/sort_xyz_blockchain.d.ts", "require": "./document_loaders/web/sort_xyz_blockchain.d.cts", "default": "./document_loaders/web/sort_xyz_blockchain.d.ts" }, "import": "./document_loaders/web/sort_xyz_blockchain.js", "require": "./document_loaders/web/sort_xyz_blockchain.cjs" }, "./document_loaders/web/spider": { "types": { "import": "./document_loaders/web/spider.d.ts", "require": "./document_loaders/web/spider.d.cts", "default": "./document_loaders/web/spider.d.ts" }, "import": "./document_loaders/web/spider.js", "require": "./document_loaders/web/spider.cjs" }, "./document_loaders/web/youtube": { "types": { "import": "./document_loaders/web/youtube.d.ts", "require": "./document_loaders/web/youtube.d.cts", "default": "./document_loaders/web/youtube.d.ts" }, "import": "./document_loaders/web/youtube.js", "require": "./document_loaders/web/youtube.cjs" }, "./document_loaders/fs/chatgpt": { "types": { "import": "./document_loaders/fs/chatgpt.d.ts", "require": "./document_loaders/fs/chatgpt.d.cts", "default": "./document_loaders/fs/chatgpt.d.ts" }, "import": "./document_loaders/fs/chatgpt.js", "require": "./document_loaders/fs/chatgpt.cjs" }, "./document_loaders/fs/srt": { "types": { "import": "./document_loaders/fs/srt.d.ts", "require": "./document_loaders/fs/srt.d.cts", "default": "./document_loaders/fs/srt.d.ts" }, "import": "./document_loaders/fs/srt.js", "require": "./document_loaders/fs/srt.cjs" }, "./document_loaders/fs/pdf": { "types": { "import": "./document_loaders/fs/pdf.d.ts", "require": "./document_loaders/fs/pdf.d.cts", "default": "./document_loaders/fs/pdf.d.ts" }, "import": "./document_loaders/fs/pdf.js", "require": "./document_loaders/fs/pdf.cjs" }, "./document_loaders/fs/docx": { "types": { "import": "./document_loaders/fs/docx.d.ts", "require": "./document_loaders/fs/docx.d.cts", "default": "./document_loaders/fs/docx.d.ts" }, "import": "./document_loaders/fs/docx.js", "require": "./document_loaders/fs/docx.cjs" }, "./document_loaders/fs/epub": { "types": { "import": "./document_loaders/fs/epub.d.ts", "require": "./document_loaders/fs/epub.d.cts", "default": "./document_loaders/fs/epub.d.ts" }, "import": "./document_loaders/fs/epub.js", "require": "./document_loaders/fs/epub.cjs" }, "./document_loaders/fs/csv": { "types": { "import": "./document_loaders/fs/csv.d.ts", "require": "./document_loaders/fs/csv.d.cts", "default": "./document_loaders/fs/csv.d.ts" }, "import": "./document_loaders/fs/csv.js", "require": "./document_loaders/fs/csv.cjs" }, "./document_loaders/fs/notion": { "types": { "import": "./document_loaders/fs/notion.d.ts", "require": "./document_loaders/fs/notion.d.cts", "default": "./document_loaders/fs/notion.d.ts" }, "import": "./document_loaders/fs/notion.js", "require": "./document_loaders/fs/notion.cjs" }, "./document_loaders/fs/obsidian": { "types": { "import": "./document_loaders/fs/obsidian.d.ts", "require": "./document_loaders/fs/obsidian.d.cts", "default": "./document_loaders/fs/obsidian.d.ts" }, "import": "./document_loaders/fs/obsidian.js", "require": "./document_loaders/fs/obsidian.cjs" }, "./document_loaders/fs/unstructured": { "types": { "import": "./document_loaders/fs/unstructured.d.ts", "require": "./document_loaders/fs/unstructured.d.cts", "default": "./document_loaders/fs/unstructured.d.ts" }, "import": "./document_loaders/fs/unstructured.js", "require": "./document_loaders/fs/unstructured.cjs" }, "./document_loaders/fs/openai_whisper_audio": { "types": { "import": "./document_loaders/fs/openai_whisper_audio.d.ts", "require": "./document_loaders/fs/openai_whisper_audio.d.cts", "default": "./document_loaders/fs/openai_whisper_audio.d.ts" }, "import": "./document_loaders/fs/openai_whisper_audio.js", "require": "./document_loaders/fs/openai_whisper_audio.cjs" }, "./document_loaders/fs/pptx": { "types": { "import": "./document_loaders/fs/pptx.d.ts", "require": "./document_loaders/fs/pptx.d.cts", "default": "./document_loaders/fs/pptx.d.ts" }, "import": "./document_loaders/fs/pptx.js", "require": "./document_loaders/fs/pptx.cjs" }, "./utils/convex": { "types": { "import": "./utils/convex.d.ts", "require": "./utils/convex.d.cts", "default": "./utils/convex.d.ts" }, "import": "./utils/convex.js", "require": "./utils/convex.cjs" }, "./utils/event_source_parse": { "types": { "import": "./utils/event_source_parse.d.ts", "require": "./utils/event_source_parse.d.cts", "default": "./utils/event_source_parse.d.ts" }, "import": "./utils/event_source_parse.js", "require": "./utils/event_source_parse.cjs" }, "./utils/cassandra": { "types": { "import": "./utils/cassandra.d.ts", "require": "./utils/cassandra.d.cts", "default": "./utils/cassandra.d.ts" }, "import": "./utils/cassandra.js", "require": "./utils/cassandra.cjs" }, "./experimental/callbacks/handlers/datadog": { "types": { "import": "./experimental/callbacks/handlers/datadog.d.ts", "require": "./experimental/callbacks/handlers/datadog.d.cts", "default": "./experimental/callbacks/handlers/datadog.d.ts" }, "import": "./experimental/callbacks/handlers/datadog.js", "require": "./experimental/callbacks/handlers/datadog.cjs" }, "./experimental/graph_transformers/llm": { "types": { "import": "./experimental/graph_transformers/llm.d.ts", "require": "./experimental/graph_transformers/llm.d.cts", "default": "./experimental/graph_transformers/llm.d.ts" }, "import": "./experimental/graph_transformers/llm.js", "require": "./experimental/graph_transformers/llm.cjs" }, "./experimental/multimodal_embeddings/googlevertexai": { "types": { "import": "./experimental/multimodal_embeddings/googlevertexai.d.ts", "require": "./experimental/multimodal_embeddings/googlevertexai.d.cts", "default": "./experimental/multimodal_embeddings/googlevertexai.d.ts" }, "import": "./experimental/multimodal_embeddings/googlevertexai.js", "require": "./experimental/multimodal_embeddings/googlevertexai.cjs" }, "./experimental/hubs/makersuite/googlemakersuitehub": { "types": { "import": "./experimental/hubs/makersuite/googlemakersuitehub.d.ts", "require": "./experimental/hubs/makersuite/googlemakersuitehub.d.cts", "default": "./experimental/hubs/makersuite/googlemakersuitehub.d.ts" }, "import": "./experimental/hubs/makersuite/googlemakersuitehub.js", "require": "./experimental/hubs/makersuite/googlemakersuitehub.cjs" }, "./experimental/chat_models/ollama_functions": { "types": { "import": "./experimental/chat_models/ollama_functions.d.ts", "require": "./experimental/chat_models/ollama_functions.d.cts", "default": "./experimental/chat_models/ollama_functions.d.ts" }, "import": "./experimental/chat_models/ollama_functions.js", "require": "./experimental/chat_models/ollama_functions.cjs" }, "./experimental/llms/chrome_ai": { "types": { "import": "./experimental/llms/chrome_ai.d.ts", "require": "./experimental/llms/chrome_ai.d.cts", "default": "./experimental/llms/chrome_ai.d.ts" }, "import": "./experimental/llms/chrome_ai.js", "require": "./experimental/llms/chrome_ai.cjs" }, "./experimental/tools/pyinterpreter": { "types": { "import": "./experimental/tools/pyinterpreter.d.ts", "require": "./experimental/tools/pyinterpreter.d.cts", "default": "./experimental/tools/pyinterpreter.d.ts" }, "import": "./experimental/tools/pyinterpreter.js", "require": "./experimental/tools/pyinterpreter.cjs" }, "./chains/graph_qa/cypher": { "types": { "import": "./chains/graph_qa/cypher.d.ts", "require": "./chains/graph_qa/cypher.d.cts", "default": "./chains/graph_qa/cypher.d.ts" }, "import": "./chains/graph_qa/cypher.js", "require": "./chains/graph_qa/cypher.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "load.cjs", "load.js", "load.d.ts", "load.d.cts", "load/serializable.cjs", "load/serializable.js", "load/serializable.d.ts", "load/serializable.d.cts", "tools/aiplugin.cjs", "tools/aiplugin.js", "tools/aiplugin.d.ts", "tools/aiplugin.d.cts", "tools/aws_lambda.cjs", "tools/aws_lambda.js", "tools/aws_lambda.d.ts", "tools/aws_lambda.d.cts", "tools/aws_sfn.cjs", "tools/aws_sfn.js", "tools/aws_sfn.d.ts", "tools/aws_sfn.d.cts", "tools/bingserpapi.cjs", "tools/bingserpapi.js", "tools/bingserpapi.d.ts", "tools/bingserpapi.d.cts", "tools/brave_search.cjs", "tools/brave_search.js", "tools/brave_search.d.ts", "tools/brave_search.d.cts", "tools/duckduckgo_search.cjs", "tools/duckduckgo_search.js", "tools/duckduckgo_search.d.ts", "tools/duckduckgo_search.d.cts", "tools/calculator.cjs", "tools/calculator.js", "tools/calculator.d.ts", "tools/calculator.d.cts", "tools/connery.cjs", "tools/connery.js", "tools/connery.d.ts", "tools/connery.d.cts", "tools/dadjokeapi.cjs", "tools/dadjokeapi.js", "tools/dadjokeapi.d.ts", "tools/dadjokeapi.d.cts", "tools/discord.cjs", "tools/discord.js", "tools/discord.d.ts", "tools/discord.d.cts", "tools/dynamic.cjs", "tools/dynamic.js", "tools/dynamic.d.ts", "tools/dynamic.d.cts", "tools/dataforseo_api_search.cjs", "tools/dataforseo_api_search.js", "tools/dataforseo_api_search.d.ts", "tools/dataforseo_api_search.d.cts", "tools/gmail.cjs", "tools/gmail.js", "tools/gmail.d.ts", "tools/gmail.d.cts", "tools/google_calendar.cjs", "tools/google_calendar.js", "tools/google_calendar.d.ts", "tools/google_calendar.d.cts", "tools/google_custom_search.cjs", "tools/google_custom_search.js", "tools/google_custom_search.d.ts", "tools/google_custom_search.d.cts", "tools/google_places.cjs", "tools/google_places.js", "tools/google_places.d.ts", "tools/google_places.d.cts", "tools/google_routes.cjs", "tools/google_routes.js", "tools/google_routes.d.ts", "tools/google_routes.d.cts", "tools/ifttt.cjs", "tools/ifttt.js", "tools/ifttt.d.ts", "tools/ifttt.d.cts", "tools/searchapi.cjs", "tools/searchapi.js", "tools/searchapi.d.ts", "tools/searchapi.d.cts", "tools/searxng_search.cjs", "tools/searxng_search.js", "tools/searxng_search.d.ts", "tools/searxng_search.d.cts", "tools/serpapi.cjs", "tools/serpapi.js", "tools/serpapi.d.ts", "tools/serpapi.d.cts", "tools/serper.cjs", "tools/serper.js", "tools/serper.d.ts", "tools/serper.d.cts", "tools/stackexchange.cjs", "tools/stackexchange.js", "tools/stackexchange.d.ts", "tools/stackexchange.d.cts", "tools/tavily_search.cjs", "tools/tavily_search.js", "tools/tavily_search.d.ts", "tools/tavily_search.d.cts", "tools/wikipedia_query_run.cjs", "tools/wikipedia_query_run.js", "tools/wikipedia_query_run.d.ts", "tools/wikipedia_query_run.d.cts", "tools/wolframalpha.cjs", "tools/wolframalpha.js", "tools/wolframalpha.d.ts", "tools/wolframalpha.d.cts", "agents/toolkits/aws_sfn.cjs", "agents/toolkits/aws_sfn.js", "agents/toolkits/aws_sfn.d.ts", "agents/toolkits/aws_sfn.d.cts", "agents/toolkits/base.cjs", "agents/toolkits/base.js", "agents/toolkits/base.d.ts", "agents/toolkits/base.d.cts", "agents/toolkits/connery.cjs", "agents/toolkits/connery.js", "agents/toolkits/connery.d.ts", "agents/toolkits/connery.d.cts", "agents/toolkits/stagehand.cjs", "agents/toolkits/stagehand.js", "agents/toolkits/stagehand.d.ts", "agents/toolkits/stagehand.d.cts", "embeddings/alibaba_tongyi.cjs", "embeddings/alibaba_tongyi.js", "embeddings/alibaba_tongyi.d.ts", "embeddings/alibaba_tongyi.d.cts", "embeddings/baidu_qianfan.cjs", "embeddings/baidu_qianfan.js", "embeddings/baidu_qianfan.d.ts", "embeddings/baidu_qianfan.d.cts", "embeddings/bedrock.cjs", "embeddings/bedrock.js", "embeddings/bedrock.d.ts", "embeddings/bedrock.d.cts", "embeddings/cloudflare_workersai.cjs", "embeddings/cloudflare_workersai.js", "embeddings/cloudflare_workersai.d.ts", "embeddings/cloudflare_workersai.d.cts", "embeddings/cohere.cjs", "embeddings/cohere.js", "embeddings/cohere.d.ts", "embeddings/cohere.d.cts", "embeddings/deepinfra.cjs", "embeddings/deepinfra.js", "embeddings/deepinfra.d.ts", "embeddings/deepinfra.d.cts", "embeddings/fireworks.cjs", "embeddings/fireworks.js", "embeddings/fireworks.d.ts", "embeddings/fireworks.d.cts", "embeddings/gradient_ai.cjs", "embeddings/gradient_ai.js", "embeddings/gradient_ai.d.ts", "embeddings/gradient_ai.d.cts", "embeddings/hf.cjs", "embeddings/hf.js", "embeddings/hf.d.ts", "embeddings/hf.d.cts", "embeddings/hf_transformers.cjs", "embeddings/hf_transformers.js", "embeddings/hf_transformers.d.ts", "embeddings/hf_transformers.d.cts", "embeddings/ibm.cjs", "embeddings/ibm.js", "embeddings/ibm.d.ts", "embeddings/ibm.d.cts", "embeddings/jina.cjs", "embeddings/jina.js", "embeddings/jina.d.ts", "embeddings/jina.d.cts", "embeddings/llama_cpp.cjs", "embeddings/llama_cpp.js", "embeddings/llama_cpp.d.ts", "embeddings/llama_cpp.d.cts", "embeddings/minimax.cjs", "embeddings/minimax.js", "embeddings/minimax.d.ts", "embeddings/minimax.d.cts", "embeddings/ollama.cjs", "embeddings/ollama.js", "embeddings/ollama.d.ts", "embeddings/ollama.d.cts", "embeddings/premai.cjs", "embeddings/premai.js", "embeddings/premai.d.ts", "embeddings/premai.d.cts", "embeddings/tensorflow.cjs", "embeddings/tensorflow.js", "embeddings/tensorflow.d.ts", "embeddings/tensorflow.d.cts", "embeddings/tencent_hunyuan.cjs", "embeddings/tencent_hunyuan.js", "embeddings/tencent_hunyuan.d.ts", "embeddings/tencent_hunyuan.d.cts", "embeddings/tencent_hunyuan/web.cjs", "embeddings/tencent_hunyuan/web.js", "embeddings/tencent_hunyuan/web.d.ts", "embeddings/tencent_hunyuan/web.d.cts", "embeddings/togetherai.cjs", "embeddings/togetherai.js", "embeddings/togetherai.d.ts", "embeddings/togetherai.d.cts", "embeddings/voyage.cjs", "embeddings/voyage.js", "embeddings/voyage.d.ts", "embeddings/voyage.d.cts", "embeddings/zhipuai.cjs", "embeddings/zhipuai.js", "embeddings/zhipuai.d.ts", "embeddings/zhipuai.d.cts", "llms/ai21.cjs", "llms/ai21.js", "llms/ai21.d.ts", "llms/ai21.d.cts", "llms/aleph_alpha.cjs", "llms/aleph_alpha.js", "llms/aleph_alpha.d.ts", "llms/aleph_alpha.d.cts", "llms/arcjet.cjs", "llms/arcjet.js", "llms/arcjet.d.ts", "llms/arcjet.d.cts", "llms/bedrock.cjs", "llms/bedrock.js", "llms/bedrock.d.ts", "llms/bedrock.d.cts", "llms/bedrock/web.cjs", "llms/bedrock/web.js", "llms/bedrock/web.d.ts", "llms/bedrock/web.d.cts", "llms/cloudflare_workersai.cjs", "llms/cloudflare_workersai.js", "llms/cloudflare_workersai.d.ts", "llms/cloudflare_workersai.d.cts", "llms/cohere.cjs", "llms/cohere.js", "llms/cohere.d.ts", "llms/cohere.d.cts", "llms/deepinfra.cjs", "llms/deepinfra.js", "llms/deepinfra.d.ts", "llms/deepinfra.d.cts", "llms/fireworks.cjs", "llms/fireworks.js", "llms/fireworks.d.ts", "llms/fireworks.d.cts", "llms/friendli.cjs", "llms/friendli.js", "llms/friendli.d.ts", "llms/friendli.d.cts", "llms/gradient_ai.cjs", "llms/gradient_ai.js", "llms/gradient_ai.d.ts", "llms/gradient_ai.d.cts", "llms/hf.cjs", "llms/hf.js", "llms/hf.d.ts", "llms/hf.d.cts", "llms/ibm.cjs", "llms/ibm.js", "llms/ibm.d.ts", "llms/ibm.d.cts", "llms/llama_cpp.cjs", "llms/llama_cpp.js", "llms/llama_cpp.d.ts", "llms/llama_cpp.d.cts", "llms/ollama.cjs", "llms/ollama.js", "llms/ollama.d.ts", "llms/ollama.d.cts", "llms/portkey.cjs", "llms/portkey.js", "llms/portkey.d.ts", "llms/portkey.d.cts", "llms/raycast.cjs", "llms/raycast.js", "llms/raycast.d.ts", "llms/raycast.d.cts", "llms/replicate.cjs", "llms/replicate.js", "llms/replicate.d.ts", "llms/replicate.d.cts", "llms/sagemaker_endpoint.cjs", "llms/sagemaker_endpoint.js", "llms/sagemaker_endpoint.d.ts", "llms/sagemaker_endpoint.d.cts", "llms/togetherai.cjs", "llms/togetherai.js", "llms/togetherai.d.ts", "llms/togetherai.d.cts", "llms/watsonx_ai.cjs", "llms/watsonx_ai.js", "llms/watsonx_ai.d.ts", "llms/watsonx_ai.d.cts", "llms/writer.cjs", "llms/writer.js", "llms/writer.d.ts", "llms/writer.d.cts", "llms/yandex.cjs", "llms/yandex.js", "llms/yandex.d.ts", "llms/yandex.d.cts", "llms/layerup_security.cjs", "llms/layerup_security.js", "llms/layerup_security.d.ts", "llms/layerup_security.d.cts", "vectorstores/analyticdb.cjs", "vectorstores/analyticdb.js", "vectorstores/analyticdb.d.ts", "vectorstores/analyticdb.d.cts", "vectorstores/astradb.cjs", "vectorstores/astradb.js", "vectorstores/astradb.d.ts", "vectorstores/astradb.d.cts", "vectorstores/azure_aisearch.cjs", "vectorstores/azure_aisearch.js", "vectorstores/azure_aisearch.d.ts", "vectorstores/azure_aisearch.d.cts", "vectorstores/azure_cosmosdb.cjs", "vectorstores/azure_cosmosdb.js", "vectorstores/azure_cosmosdb.d.ts", "vectorstores/azure_cosmosdb.d.cts", "vectorstores/cassandra.cjs", "vectorstores/cassandra.js", "vectorstores/cassandra.d.ts", "vectorstores/cassandra.d.cts", "vectorstores/chroma.cjs", "vectorstores/chroma.js", "vectorstores/chroma.d.ts", "vectorstores/chroma.d.cts", "vectorstores/clickhouse.cjs", "vectorstores/clickhouse.js", "vectorstores/clickhouse.d.ts", "vectorstores/clickhouse.d.cts", "vectorstores/closevector/node.cjs", "vectorstores/closevector/node.js", "vectorstores/closevector/node.d.ts", "vectorstores/closevector/node.d.cts", "vectorstores/closevector/web.cjs", "vectorstores/closevector/web.js", "vectorstores/closevector/web.d.ts", "vectorstores/closevector/web.d.cts", "vectorstores/cloudflare_vectorize.cjs", "vectorstores/cloudflare_vectorize.js", "vectorstores/cloudflare_vectorize.d.ts", "vectorstores/cloudflare_vectorize.d.cts", "vectorstores/convex.cjs", "vectorstores/convex.js", "vectorstores/convex.d.ts", "vectorstores/convex.d.cts", "vectorstores/couchbase.cjs", "vectorstores/couchbase.js", "vectorstores/couchbase.d.ts", "vectorstores/couchbase.d.cts", "vectorstores/elasticsearch.cjs", "vectorstores/elasticsearch.js", "vectorstores/elasticsearch.d.ts", "vectorstores/elasticsearch.d.cts", "vectorstores/faiss.cjs", "vectorstores/faiss.js", "vectorstores/faiss.d.ts", "vectorstores/faiss.d.cts", "vectorstores/googlevertexai.cjs", "vectorstores/googlevertexai.js", "vectorstores/googlevertexai.d.ts", "vectorstores/googlevertexai.d.cts", "vectorstores/hnswlib.cjs", "vectorstores/hnswlib.js", "vectorstores/hnswlib.d.ts", "vectorstores/hnswlib.d.cts", "vectorstores/hanavector.cjs", "vectorstores/hanavector.js", "vectorstores/hanavector.d.ts", "vectorstores/hanavector.d.cts", "vectorstores/lancedb.cjs", "vectorstores/lancedb.js", "vectorstores/lancedb.d.ts", "vectorstores/lancedb.d.cts", "vectorstores/libsql.cjs", "vectorstores/libsql.js", "vectorstores/libsql.d.ts", "vectorstores/libsql.d.cts", "vectorstores/milvus.cjs", "vectorstores/milvus.js", "vectorstores/milvus.d.ts", "vectorstores/milvus.d.cts", "vectorstores/momento_vector_index.cjs", "vectorstores/momento_vector_index.js", "vectorstores/momento_vector_index.d.ts", "vectorstores/momento_vector_index.d.cts", "vectorstores/mongodb_atlas.cjs", "vectorstores/mongodb_atlas.js", "vectorstores/mongodb_atlas.d.ts", "vectorstores/mongodb_atlas.d.cts", "vectorstores/myscale.cjs", "vectorstores/myscale.js", "vectorstores/myscale.d.ts", "vectorstores/myscale.d.cts", "vectorstores/neo4j_vector.cjs", "vectorstores/neo4j_vector.js", "vectorstores/neo4j_vector.d.ts", "vectorstores/neo4j_vector.d.cts", "vectorstores/neon.cjs", "vectorstores/neon.js", "vectorstores/neon.d.ts", "vectorstores/neon.d.cts", "vectorstores/opensearch.cjs", "vectorstores/opensearch.js", "vectorstores/opensearch.d.ts", "vectorstores/opensearch.d.cts", "vectorstores/pgvector.cjs", "vectorstores/pgvector.js", "vectorstores/pgvector.d.ts", "vectorstores/pgvector.d.cts", "vectorstores/pinecone.cjs", "vectorstores/pinecone.js", "vectorstores/pinecone.d.ts", "vectorstores/pinecone.d.cts", "vectorstores/prisma.cjs", "vectorstores/prisma.js", "vectorstores/prisma.d.ts", "vectorstores/prisma.d.cts", "vectorstores/qdrant.cjs", "vectorstores/qdrant.js", "vectorstores/qdrant.d.ts", "vectorstores/qdrant.d.cts", "vectorstores/redis.cjs", "vectorstores/redis.js", "vectorstores/redis.d.ts", "vectorstores/redis.d.cts", "vectorstores/rockset.cjs", "vectorstores/rockset.js", "vectorstores/rockset.d.ts", "vectorstores/rockset.d.cts", "vectorstores/singlestore.cjs", "vectorstores/singlestore.js", "vectorstores/singlestore.d.ts", "vectorstores/singlestore.d.cts", "vectorstores/supabase.cjs", "vectorstores/supabase.js", "vectorstores/supabase.d.ts", "vectorstores/supabase.d.cts", "vectorstores/tigris.cjs", "vectorstores/tigris.js", "vectorstores/tigris.d.ts", "vectorstores/tigris.d.cts", "vectorstores/turbopuffer.cjs", "vectorstores/turbopuffer.js", "vectorstores/turbopuffer.d.ts", "vectorstores/turbopuffer.d.cts", "vectorstores/typeorm.cjs", "vectorstores/typeorm.js", "vectorstores/typeorm.d.ts", "vectorstores/typeorm.d.cts", "vectorstores/typesense.cjs", "vectorstores/typesense.js", "vectorstores/typesense.d.ts", "vectorstores/typesense.d.cts", "vectorstores/upstash.cjs", "vectorstores/upstash.js", "vectorstores/upstash.d.ts", "vectorstores/upstash.d.cts", "vectorstores/usearch.cjs", "vectorstores/usearch.js", "vectorstores/usearch.d.ts", "vectorstores/usearch.d.cts", "vectorstores/vectara.cjs", "vectorstores/vectara.js", "vectorstores/vectara.d.ts", "vectorstores/vectara.d.cts", "vectorstores/vercel_postgres.cjs", "vectorstores/vercel_postgres.js", "vectorstores/vercel_postgres.d.ts", "vectorstores/vercel_postgres.d.cts", "vectorstores/voy.cjs", "vectorstores/voy.js", "vectorstores/voy.d.ts", "vectorstores/voy.d.cts", "vectorstores/weaviate.cjs", "vectorstores/weaviate.js", "vectorstores/weaviate.d.ts", "vectorstores/weaviate.d.cts", "vectorstores/xata.cjs", "vectorstores/xata.js", "vectorstores/xata.d.ts", "vectorstores/xata.d.cts", "vectorstores/zep.cjs", "vectorstores/zep.js", "vectorstores/zep.d.ts", "vectorstores/zep.d.cts", "vectorstores/zep_cloud.cjs", "vectorstores/zep_cloud.js", "vectorstores/zep_cloud.d.ts", "vectorstores/zep_cloud.d.cts", "chat_models/alibaba_tongyi.cjs", "chat_models/alibaba_tongyi.js", "chat_models/alibaba_tongyi.d.ts", "chat_models/alibaba_tongyi.d.cts", "chat_models/arcjet.cjs", "chat_models/arcjet.js", "chat_models/arcjet.d.ts", "chat_models/arcjet.d.cts", "chat_models/baiduwenxin.cjs", "chat_models/baiduwenxin.js", "chat_models/baiduwenxin.d.ts", "chat_models/baiduwenxin.d.cts", "chat_models/bedrock.cjs", "chat_models/bedrock.js", "chat_models/bedrock.d.ts", "chat_models/bedrock.d.cts", "chat_models/bedrock/web.cjs", "chat_models/bedrock/web.js", "chat_models/bedrock/web.d.ts", "chat_models/bedrock/web.d.cts", "chat_models/cloudflare_workersai.cjs", "chat_models/cloudflare_workersai.js", "chat_models/cloudflare_workersai.d.ts", "chat_models/cloudflare_workersai.d.cts", "chat_models/deepinfra.cjs", "chat_models/deepinfra.js", "chat_models/deepinfra.d.ts", "chat_models/deepinfra.d.cts", "chat_models/fireworks.cjs", "chat_models/fireworks.js", "chat_models/fireworks.d.ts", "chat_models/fireworks.d.cts", "chat_models/friendli.cjs", "chat_models/friendli.js", "chat_models/friendli.d.ts", "chat_models/friendli.d.cts", "chat_models/ibm.cjs", "chat_models/ibm.js", "chat_models/ibm.d.ts", "chat_models/ibm.d.cts", "chat_models/iflytek_xinghuo.cjs", "chat_models/iflytek_xinghuo.js", "chat_models/iflytek_xinghuo.d.ts", "chat_models/iflytek_xinghuo.d.cts", "chat_models/iflytek_xinghuo/web.cjs", "chat_models/iflytek_xinghuo/web.js", "chat_models/iflytek_xinghuo/web.d.ts", "chat_models/iflytek_xinghuo/web.d.cts", "chat_models/llama_cpp.cjs", "chat_models/llama_cpp.js", "chat_models/llama_cpp.d.ts", "chat_models/llama_cpp.d.cts", "chat_models/minimax.cjs", "chat_models/minimax.js", "chat_models/minimax.d.ts", "chat_models/minimax.d.cts", "chat_models/moonshot.cjs", "chat_models/moonshot.js", "chat_models/moonshot.d.ts", "chat_models/moonshot.d.cts", "chat_models/novita.cjs", "chat_models/novita.js", "chat_models/novita.d.ts", "chat_models/novita.d.cts", "chat_models/ollama.cjs", "chat_models/ollama.js", "chat_models/ollama.d.ts", "chat_models/ollama.d.cts", "chat_models/portkey.cjs", "chat_models/portkey.js", "chat_models/portkey.d.ts", "chat_models/portkey.d.cts", "chat_models/premai.cjs", "chat_models/premai.js", "chat_models/premai.d.ts", "chat_models/premai.d.cts", "chat_models/tencent_hunyuan.cjs", "chat_models/tencent_hunyuan.js", "chat_models/tencent_hunyuan.d.ts", "chat_models/tencent_hunyuan.d.cts", "chat_models/tencent_hunyuan/web.cjs", "chat_models/tencent_hunyuan/web.js", "chat_models/tencent_hunyuan/web.d.ts", "chat_models/tencent_hunyuan/web.d.cts", "chat_models/togetherai.cjs", "chat_models/togetherai.js", "chat_models/togetherai.d.ts", "chat_models/togetherai.d.cts", "chat_models/webllm.cjs", "chat_models/webllm.js", "chat_models/webllm.d.ts", "chat_models/webllm.d.cts", "chat_models/yandex.cjs", "chat_models/yandex.js", "chat_models/yandex.d.ts", "chat_models/yandex.d.cts", "chat_models/zhipuai.cjs", "chat_models/zhipuai.js", "chat_models/zhipuai.d.ts", "chat_models/zhipuai.d.cts", "callbacks/handlers/llmonitor.cjs", "callbacks/handlers/llmonitor.js", "callbacks/handlers/llmonitor.d.ts", "callbacks/handlers/llmonitor.d.cts", "callbacks/handlers/lunary.cjs", "callbacks/handlers/lunary.js", "callbacks/handlers/lunary.d.ts", "callbacks/handlers/lunary.d.cts", "callbacks/handlers/upstash_ratelimit.cjs", "callbacks/handlers/upstash_ratelimit.js", "callbacks/handlers/upstash_ratelimit.d.ts", "callbacks/handlers/upstash_ratelimit.d.cts", "retrievers/amazon_kendra.cjs", "retrievers/amazon_kendra.js", "retrievers/amazon_kendra.d.ts", "retrievers/amazon_kendra.d.cts", "retrievers/amazon_knowledge_base.cjs", "retrievers/amazon_knowledge_base.js", "retrievers/amazon_knowledge_base.d.ts", "retrievers/amazon_knowledge_base.d.cts", "retrievers/bm25.cjs", "retrievers/bm25.js", "retrievers/bm25.d.ts", "retrievers/bm25.d.cts", "retrievers/chaindesk.cjs", "retrievers/chaindesk.js", "retrievers/chaindesk.d.ts", "retrievers/chaindesk.d.cts", "retrievers/databerry.cjs", "retrievers/databerry.js", "retrievers/databerry.d.ts", "retrievers/databerry.d.cts", "retrievers/dria.cjs", "retrievers/dria.js", "retrievers/dria.d.ts", "retrievers/dria.d.cts", "retrievers/metal.cjs", "retrievers/metal.js", "retrievers/metal.d.ts", "retrievers/metal.d.cts", "retrievers/remote.cjs", "retrievers/remote.js", "retrievers/remote.d.ts", "retrievers/remote.d.cts", "retrievers/supabase.cjs", "retrievers/supabase.js", "retrievers/supabase.d.ts", "retrievers/supabase.d.cts", "retrievers/tavily_search_api.cjs", "retrievers/tavily_search_api.js", "retrievers/tavily_search_api.d.ts", "retrievers/tavily_search_api.d.cts", "retrievers/vectara_summary.cjs", "retrievers/vectara_summary.js", "retrievers/vectara_summary.d.ts", "retrievers/vectara_summary.d.cts", "retrievers/vespa.cjs", "retrievers/vespa.js", "retrievers/vespa.d.ts", "retrievers/vespa.d.cts", "retrievers/zep.cjs", "retrievers/zep.js", "retrievers/zep.d.ts", "retrievers/zep.d.cts", "structured_query/chroma.cjs", "structured_query/chroma.js", "structured_query/chroma.d.ts", "structured_query/chroma.d.cts", "structured_query/qdrant.cjs", "structured_query/qdrant.js", "structured_query/qdrant.d.ts", "structured_query/qdrant.d.cts", "structured_query/supabase.cjs", "structured_query/supabase.js", "structured_query/supabase.d.ts", "structured_query/supabase.d.cts", "structured_query/vectara.cjs", "structured_query/vectara.js", "structured_query/vectara.d.ts", "structured_query/vectara.d.cts", "retrievers/zep_cloud.cjs", "retrievers/zep_cloud.js", "retrievers/zep_cloud.d.ts", "retrievers/zep_cloud.d.cts", "caches/cloudflare_kv.cjs", "caches/cloudflare_kv.js", "caches/cloudflare_kv.d.ts", "caches/cloudflare_kv.d.cts", "caches/ioredis.cjs", "caches/ioredis.js", "caches/ioredis.d.ts", "caches/ioredis.d.cts", "caches/momento.cjs", "caches/momento.js", "caches/momento.d.ts", "caches/momento.d.cts", "caches/upstash_redis.cjs", "caches/upstash_redis.js", "caches/upstash_redis.d.ts", "caches/upstash_redis.d.cts", "graphs/neo4j_graph.cjs", "graphs/neo4j_graph.js", "graphs/neo4j_graph.d.ts", "graphs/neo4j_graph.d.cts", "graphs/memgraph_graph.cjs", "graphs/memgraph_graph.js", "graphs/memgraph_graph.d.ts", "graphs/memgraph_graph.d.cts", "document_compressors/ibm.cjs", "document_compressors/ibm.js", "document_compressors/ibm.d.ts", "document_compressors/ibm.d.cts", "document_transformers/html_to_text.cjs", "document_transformers/html_to_text.js", "document_transformers/html_to_text.d.ts", "document_transformers/html_to_text.d.cts", "document_transformers/mozilla_readability.cjs", "document_transformers/mozilla_readability.js", "document_transformers/mozilla_readability.d.ts", "document_transformers/mozilla_readability.d.cts", "storage/cassandra.cjs", "storage/cassandra.js", "storage/cassandra.d.ts", "storage/cassandra.d.cts", "storage/convex.cjs", "storage/convex.js", "storage/convex.d.ts", "storage/convex.d.cts", "storage/ioredis.cjs", "storage/ioredis.js", "storage/ioredis.d.ts", "storage/ioredis.d.cts", "storage/upstash_redis.cjs", "storage/upstash_redis.js", "storage/upstash_redis.d.ts", "storage/upstash_redis.d.cts", "storage/vercel_kv.cjs", "storage/vercel_kv.js", "storage/vercel_kv.d.ts", "storage/vercel_kv.d.cts", "stores/doc/base.cjs", "stores/doc/base.js", "stores/doc/base.d.ts", "stores/doc/base.d.cts", "stores/doc/gcs.cjs", "stores/doc/gcs.js", "stores/doc/gcs.d.ts", "stores/doc/gcs.d.cts", "stores/doc/in_memory.cjs", "stores/doc/in_memory.js", "stores/doc/in_memory.d.ts", "stores/doc/in_memory.d.cts", "stores/message/astradb.cjs", "stores/message/astradb.js", "stores/message/astradb.d.ts", "stores/message/astradb.d.cts", "stores/message/cassandra.cjs", "stores/message/cassandra.js", "stores/message/cassandra.d.ts", "stores/message/cassandra.d.cts", "stores/message/cloudflare_d1.cjs", "stores/message/cloudflare_d1.js", "stores/message/cloudflare_d1.d.ts", "stores/message/cloudflare_d1.d.cts", "stores/message/convex.cjs", "stores/message/convex.js", "stores/message/convex.d.ts", "stores/message/convex.d.cts", "stores/message/dynamodb.cjs", "stores/message/dynamodb.js", "stores/message/dynamodb.d.ts", "stores/message/dynamodb.d.cts", "stores/message/firestore.cjs", "stores/message/firestore.js", "stores/message/firestore.d.ts", "stores/message/firestore.d.cts", "stores/message/file_system.cjs", "stores/message/file_system.js", "stores/message/file_system.d.ts", "stores/message/file_system.d.cts", "stores/message/in_memory.cjs", "stores/message/in_memory.js", "stores/message/in_memory.d.ts", "stores/message/in_memory.d.cts", "stores/message/ipfs_datastore.cjs", "stores/message/ipfs_datastore.js", "stores/message/ipfs_datastore.d.ts", "stores/message/ipfs_datastore.d.cts", "stores/message/ioredis.cjs", "stores/message/ioredis.js", "stores/message/ioredis.d.ts", "stores/message/ioredis.d.cts", "stores/message/momento.cjs", "stores/message/momento.js", "stores/message/momento.d.ts", "stores/message/momento.d.cts", "stores/message/mongodb.cjs", "stores/message/mongodb.js", "stores/message/mongodb.d.ts", "stores/message/mongodb.d.cts", "stores/message/planetscale.cjs", "stores/message/planetscale.js", "stores/message/planetscale.d.ts", "stores/message/planetscale.d.cts", "stores/message/postgres.cjs", "stores/message/postgres.js", "stores/message/postgres.d.ts", "stores/message/postgres.d.cts", "stores/message/redis.cjs", "stores/message/redis.js", "stores/message/redis.d.ts", "stores/message/redis.d.cts", "stores/message/upstash_redis.cjs", "stores/message/upstash_redis.js", "stores/message/upstash_redis.d.ts", "stores/message/upstash_redis.d.cts", "stores/message/xata.cjs", "stores/message/xata.js", "stores/message/xata.d.ts", "stores/message/xata.d.cts", "stores/message/zep_cloud.cjs", "stores/message/zep_cloud.js", "stores/message/zep_cloud.d.ts", "stores/message/zep_cloud.d.cts", "memory/chat_memory.cjs", "memory/chat_memory.js", "memory/chat_memory.d.ts", "memory/chat_memory.d.cts", "memory/motorhead_memory.cjs", "memory/motorhead_memory.js", "memory/motorhead_memory.d.ts", "memory/motorhead_memory.d.cts", "memory/zep.cjs", "memory/zep.js", "memory/zep.d.ts", "memory/zep.d.cts", "memory/zep_cloud.cjs", "memory/zep_cloud.js", "memory/zep_cloud.d.ts", "memory/zep_cloud.d.cts", "indexes/base.cjs", "indexes/base.js", "indexes/base.d.ts", "indexes/base.d.cts", "indexes/postgres.cjs", "indexes/postgres.js", "indexes/postgres.d.ts", "indexes/postgres.d.cts", "indexes/memory.cjs", "indexes/memory.js", "indexes/memory.d.ts", "indexes/memory.d.cts", "indexes/sqlite.cjs", "indexes/sqlite.js", "indexes/sqlite.d.ts", "indexes/sqlite.d.cts", "document_loaders/web/airtable.cjs", "document_loaders/web/airtable.js", "document_loaders/web/airtable.d.ts", "document_loaders/web/airtable.d.cts", "document_loaders/web/apify_dataset.cjs", "document_loaders/web/apify_dataset.js", "document_loaders/web/apify_dataset.d.ts", "document_loaders/web/apify_dataset.d.cts", "document_loaders/web/assemblyai.cjs", "document_loaders/web/assemblyai.js", "document_loaders/web/assemblyai.d.ts", "document_loaders/web/assemblyai.d.cts", "document_loaders/web/azure_blob_storage_container.cjs", "document_loaders/web/azure_blob_storage_container.js", "document_loaders/web/azure_blob_storage_container.d.ts", "document_loaders/web/azure_blob_storage_container.d.cts", "document_loaders/web/azure_blob_storage_file.cjs", "document_loaders/web/azure_blob_storage_file.js", "document_loaders/web/azure_blob_storage_file.d.ts", "document_loaders/web/azure_blob_storage_file.d.cts", "document_loaders/web/browserbase.cjs", "document_loaders/web/browserbase.js", "document_loaders/web/browserbase.d.ts", "document_loaders/web/browserbase.d.cts", "document_loaders/web/cheerio.cjs", "document_loaders/web/cheerio.js", "document_loaders/web/cheerio.d.ts", "document_loaders/web/cheerio.d.cts", "document_loaders/web/html.cjs", "document_loaders/web/html.js", "document_loaders/web/html.d.ts", "document_loaders/web/html.d.cts", "document_loaders/web/puppeteer.cjs", "document_loaders/web/puppeteer.js", "document_loaders/web/puppeteer.d.ts", "document_loaders/web/puppeteer.d.cts", "document_loaders/web/playwright.cjs", "document_loaders/web/playwright.js", "document_loaders/web/playwright.d.ts", "document_loaders/web/playwright.d.cts", "document_loaders/web/college_confidential.cjs", "document_loaders/web/college_confidential.js", "document_loaders/web/college_confidential.d.ts", "document_loaders/web/college_confidential.d.cts", "document_loaders/web/gitbook.cjs", "document_loaders/web/gitbook.js", "document_loaders/web/gitbook.d.ts", "document_loaders/web/gitbook.d.cts", "document_loaders/web/hn.cjs", "document_loaders/web/hn.js", "document_loaders/web/hn.d.ts", "document_loaders/web/hn.d.cts", "document_loaders/web/imsdb.cjs", "document_loaders/web/imsdb.js", "document_loaders/web/imsdb.d.ts", "document_loaders/web/imsdb.d.cts", "document_loaders/web/figma.cjs", "document_loaders/web/figma.js", "document_loaders/web/figma.d.ts", "document_loaders/web/figma.d.cts", "document_loaders/web/firecrawl.cjs", "document_loaders/web/firecrawl.js", "document_loaders/web/firecrawl.d.ts", "document_loaders/web/firecrawl.d.cts", "document_loaders/web/github.cjs", "document_loaders/web/github.js", "document_loaders/web/github.d.ts", "document_loaders/web/github.d.cts", "document_loaders/web/taskade.cjs", "document_loaders/web/taskade.js", "document_loaders/web/taskade.d.ts", "document_loaders/web/taskade.d.cts", "document_loaders/web/notionapi.cjs", "document_loaders/web/notionapi.js", "document_loaders/web/notionapi.d.ts", "document_loaders/web/notionapi.d.cts", "document_loaders/web/pdf.cjs", "document_loaders/web/pdf.js", "document_loaders/web/pdf.d.ts", "document_loaders/web/pdf.d.cts", "document_loaders/web/recursive_url.cjs", "document_loaders/web/recursive_url.js", "document_loaders/web/recursive_url.d.ts", "document_loaders/web/recursive_url.d.cts", "document_loaders/web/s3.cjs", "document_loaders/web/s3.js", "document_loaders/web/s3.d.ts", "document_loaders/web/s3.d.cts", "document_loaders/web/sitemap.cjs", "document_loaders/web/sitemap.js", "document_loaders/web/sitemap.d.ts", "document_loaders/web/sitemap.d.cts", "document_loaders/web/sonix_audio.cjs", "document_loaders/web/sonix_audio.js", "document_loaders/web/sonix_audio.d.ts", "document_loaders/web/sonix_audio.d.cts", "document_loaders/web/confluence.cjs", "document_loaders/web/confluence.js", "document_loaders/web/confluence.d.ts", "document_loaders/web/confluence.d.cts", "document_loaders/web/couchbase.cjs", "document_loaders/web/couchbase.js", "document_loaders/web/couchbase.d.ts", "document_loaders/web/couchbase.d.cts", "document_loaders/web/searchapi.cjs", "document_loaders/web/searchapi.js", "document_loaders/web/searchapi.d.ts", "document_loaders/web/searchapi.d.cts", "document_loaders/web/serpapi.cjs", "document_loaders/web/serpapi.js", "document_loaders/web/serpapi.d.ts", "document_loaders/web/serpapi.d.cts", "document_loaders/web/sort_xyz_blockchain.cjs", "document_loaders/web/sort_xyz_blockchain.js", "document_loaders/web/sort_xyz_blockchain.d.ts", "document_loaders/web/sort_xyz_blockchain.d.cts", "document_loaders/web/spider.cjs", "document_loaders/web/spider.js", "document_loaders/web/spider.d.ts", "document_loaders/web/spider.d.cts", "document_loaders/web/youtube.cjs", "document_loaders/web/youtube.js", "document_loaders/web/youtube.d.ts", "document_loaders/web/youtube.d.cts", "document_loaders/fs/chatgpt.cjs", "document_loaders/fs/chatgpt.js", "document_loaders/fs/chatgpt.d.ts", "document_loaders/fs/chatgpt.d.cts", "document_loaders/fs/srt.cjs", "document_loaders/fs/srt.js", "document_loaders/fs/srt.d.ts", "document_loaders/fs/srt.d.cts", "document_loaders/fs/pdf.cjs", "document_loaders/fs/pdf.js", "document_loaders/fs/pdf.d.ts", "document_loaders/fs/pdf.d.cts", "document_loaders/fs/docx.cjs", "document_loaders/fs/docx.js", "document_loaders/fs/docx.d.ts", "document_loaders/fs/docx.d.cts", "document_loaders/fs/epub.cjs", "document_loaders/fs/epub.js", "document_loaders/fs/epub.d.ts", "document_loaders/fs/epub.d.cts", "document_loaders/fs/csv.cjs", "document_loaders/fs/csv.js", "document_loaders/fs/csv.d.ts", "document_loaders/fs/csv.d.cts", "document_loaders/fs/notion.cjs", "document_loaders/fs/notion.js", "document_loaders/fs/notion.d.ts", "document_loaders/fs/notion.d.cts", "document_loaders/fs/obsidian.cjs", "document_loaders/fs/obsidian.js", "document_loaders/fs/obsidian.d.ts", "document_loaders/fs/obsidian.d.cts", "document_loaders/fs/unstructured.cjs", "document_loaders/fs/unstructured.js", "document_loaders/fs/unstructured.d.ts", "document_loaders/fs/unstructured.d.cts", "document_loaders/fs/openai_whisper_audio.cjs", "document_loaders/fs/openai_whisper_audio.js", "document_loaders/fs/openai_whisper_audio.d.ts", "document_loaders/fs/openai_whisper_audio.d.cts", "document_loaders/fs/pptx.cjs", "document_loaders/fs/pptx.js", "document_loaders/fs/pptx.d.ts", "document_loaders/fs/pptx.d.cts", "utils/convex.cjs", "utils/convex.js", "utils/convex.d.ts", "utils/convex.d.cts", "utils/event_source_parse.cjs", "utils/event_source_parse.js", "utils/event_source_parse.d.ts", "utils/event_source_parse.d.cts", "utils/cassandra.cjs", "utils/cassandra.js", "utils/cassandra.d.ts", "utils/cassandra.d.cts", "experimental/callbacks/handlers/datadog.cjs", "experimental/callbacks/handlers/datadog.js", "experimental/callbacks/handlers/datadog.d.ts", "experimental/callbacks/handlers/datadog.d.cts", "experimental/graph_transformers/llm.cjs", "experimental/graph_transformers/llm.js", "experimental/graph_transformers/llm.d.ts", "experimental/graph_transformers/llm.d.cts", "experimental/multimodal_embeddings/googlevertexai.cjs", "experimental/multimodal_embeddings/googlevertexai.js", "experimental/multimodal_embeddings/googlevertexai.d.ts", "experimental/multimodal_embeddings/googlevertexai.d.cts", "experimental/hubs/makersuite/googlemakersuitehub.cjs", "experimental/hubs/makersuite/googlemakersuitehub.js", "experimental/hubs/makersuite/googlemakersuitehub.d.ts", "experimental/hubs/makersuite/googlemakersuitehub.d.cts", "experimental/chat_models/ollama_functions.cjs", "experimental/chat_models/ollama_functions.js", "experimental/chat_models/ollama_functions.d.ts", "experimental/chat_models/ollama_functions.d.cts", "experimental/llms/chrome_ai.cjs", "experimental/llms/chrome_ai.js", "experimental/llms/chrome_ai.d.ts", "experimental/llms/chrome_ai.d.cts", "experimental/tools/pyinterpreter.cjs", "experimental/tools/pyinterpreter.js", "experimental/tools/pyinterpreter.d.ts", "experimental/tools/pyinterpreter.d.cts", "chains/graph_qa/cypher.cjs", "chains/graph_qa/cypher.js", "chains/graph_qa/cypher.d.ts", "chains/graph_qa/cypher.d.cts" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-community/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": ["node_modules", "dist", "docs", "**/tests"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-community/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-community/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/libs/langchain-community
lc_public_repos/langchainjs/libs/langchain-community/src/index.ts
// Empty
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/graphs/graph_document.ts
import { Serializable } from "@langchain/core/load/serializable"; import { Document } from "@langchain/core/documents"; export class Node extends Serializable { id: string | number; type: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any properties: Record<string, any>; lc_namespace = ["langchain", "graph", "document_node"]; constructor({ id, type = "Node", properties = {}, }: { id: string | number; type: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any properties?: Record<string, any>; }) { super(); this.id = id; this.type = type; this.properties = properties; } } export class Relationship extends Serializable { source: Node; target: Node; type: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any properties: Record<string, any>; lc_namespace = ["langchain", "graph", "document_relationship"]; constructor({ source, target, type, properties = {}, }: { source: Node; target: Node; type: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any properties?: Record<string, any>; }) { super(); this.source = source; this.target = target; this.type = type; this.properties = properties; } } export class GraphDocument extends Serializable { nodes: Node[]; relationships: Relationship[]; source: Document; lc_namespace = ["langchain", "graph", "graph_document"]; constructor({ nodes, relationships, source, }: { nodes: Node[]; relationships: Relationship[]; source: Document; }) { super({ nodes, relationships, source, }); this.nodes = nodes; this.relationships = relationships; this.source = source; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/graphs/memgraph_graph.ts
import { Neo4jGraph } from "./neo4j_graph.js"; interface MemgraphGraphConfig { url: string; username: string; password: string; database?: string; } const rawSchemaQuery = ` CALL llm_util.schema("raw") YIELD * RETURN * `; /** * @security *Security note*: Make sure that the database connection uses credentials * that are narrowly-scoped to only include necessary permissions. * Failure to do so may result in data corruption or loss, since the calling * code may attempt commands that would result in deletion, mutation * of data if appropriately prompted or reading sensitive data if such * data is present in the database. * The best way to guard against such negative outcomes is to (as appropriate) * limit the permissions granted to the credentials used with this tool. * For example, creating read only users for the database is a good way to * ensure that the calling code cannot mutate or delete data. * * @link See https://js.langchain.com/docs/security for more information. */ class MemgraphGraph extends Neo4jGraph { constructor({ url, username, password, database = "memgraph", }: MemgraphGraphConfig) { super({ url, username, password, database }); } static async initialize(config: MemgraphGraphConfig): Promise<MemgraphGraph> { const graph = new MemgraphGraph(config); try { await graph.verifyConnectivity(); // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (error: any) { console.error("Failed to verify connection."); } try { await graph.refreshSchema(); console.debug("Schema refreshed successfully."); // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (error: any) { throw new Error(error.message); } return graph; } async refreshSchema() { const rawSchemaQueryResult = await this.query(rawSchemaQuery); if (rawSchemaQueryResult?.[0]?.schema) { const rawSchema = rawSchemaQueryResult?.[0]?.schema; this.structuredSchema = { nodeProps: rawSchema.node_props, relProps: rawSchema.rel_props, relationships: rawSchema.relationships, }; // Format node properties const formattedNodeProps = Object.entries(rawSchema.node_props) .map(([nodeName, properties]) => { const propertiesStr = JSON.stringify(properties); return `Node name: '${nodeName}', Node properties: ${propertiesStr}`; }) .join("\n"); // Format relationship properties const formattedRelProps = Object.entries(rawSchema.rel_props) .map(([relationshipName, properties]) => { const propertiesStr = JSON.stringify(properties); return `Relationship name: '${relationshipName}', Relationship properties: ${propertiesStr}`; }) .join("\n"); // Format relationships const formattedRels = rawSchema.relationships ?.map( (el: { end: string; start: string; type: string }) => `(:${el.start})-[:${el.type}]->(:${el.end})` ) .join("\n"); // Combine all formatted elements into a single string this.schema = [ "Node properties are the following:", formattedNodeProps, "Relationship properties are the following:", formattedRelProps, "The relationships are the following:", formattedRels, ].join("\n"); } } } export { MemgraphGraph };
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/graphs/neo4j_graph.ts
import neo4j, { RoutingControl } from "neo4j-driver"; import { insecureHash } from "@langchain/core/utils/hash"; import { GraphDocument } from "./graph_document.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any type Any = any; interface Neo4jGraphConfig { url: string; username: string; password: string; database?: string; timeoutMs?: number; enhancedSchema?: boolean; } interface StructuredSchema { nodeProps: { [key: NodeType["labels"]]: NodeType["properties"] }; relProps: { [key: RelType["type"]]: RelType["properties"] }; relationships: PathType[]; metadata?: { constraint: Record<string, Any>; index: Record<string, Any>; }; } export interface AddGraphDocumentsConfig { baseEntityLabel?: boolean; includeSource?: boolean; } export type NodeType = { labels: string; properties: { property: string; type: string }[]; }; export type RelType = { type: string; properties: { property: string; type: string }[]; }; export type PathType = { start: string; type: string; end: string }; export const BASE_ENTITY_LABEL = "__Entity__"; const DISTINCT_VALUE_LIMIT = 10; const LIST_LIMIT = 128; const EXHAUSTIVE_SEARCH_LIMIT = 10000; const EXCLUDED_LABELS = ["Bloom_Perspective", "Bloom_Scene"]; const EXCLUDED_RELS = ["Bloom_HAS_SCENE"]; const INCLUDE_DOCS_QUERY = ` MERGE (d:Document {id:$document.metadata.id}) SET d.text = $document.pageContent SET d += $document.metadata WITH d `; const NODE_PROPERTIES_QUERY = ` CALL apoc.meta.data() YIELD label, other, elementType, type, property WHERE NOT type = "RELATIONSHIP" AND elementType = "node" AND NOT label IN $EXCLUDED_LABELS WITH label AS nodeLabels, collect({property:property, type:type}) AS properties RETURN {labels: nodeLabels, properties: properties} AS output `; const REL_PROPERTIES_QUERY = ` CALL apoc.meta.data() YIELD label, other, elementType, type, property WHERE NOT type = "RELATIONSHIP" AND elementType = "relationship" AND NOT label in $EXCLUDED_LABELS WITH label AS nodeLabels, collect({property:property, type:type}) AS properties RETURN {type: nodeLabels, properties: properties} AS output `; const REL_QUERY = ` CALL apoc.meta.data() YIELD label, other, elementType, type, property WHERE type = "RELATIONSHIP" AND elementType = "node" UNWIND other AS other_node WITH * WHERE NOT label IN $EXCLUDED_LABELS AND NOT other_node IN $EXCLUDED_LABELS RETURN {start: label, type: property, end: toString(other_node)} AS output `; function isDistinctMoreThanLimit( distinct_count = 11, limit = DISTINCT_VALUE_LIMIT ): boolean { return distinct_count !== undefined && distinct_count > limit; } function cleanStringValues(text: string) { return text.replace("\n", " ").replace("\r", " "); } function formatSchema( schema: Record<string, Any>, isEnhanced: boolean ): string { let formattedNodeProps: string[] = []; let formattedRelProps: string[] = []; if (isEnhanced) { // Enhanced formatting for nodes for (const [nodeType, properties] of Object.entries(schema.nodeProps)) { formattedNodeProps.push(`- **${nodeType}**`); for (const prop of properties as Array<Record<string, Any>>) { let example = ""; if (prop.type === "STRING") { if (prop.values.length > 0) { if (isDistinctMoreThanLimit(prop.distinct_count)) { example = `Example: ${cleanStringValues(prop.values[0])}`; } else { example = `Available options: ${prop.values .map(cleanStringValues) .join(", ")}`; } } } else if ( ["INTEGER", "FLOAT", "DATE", "DATE_TIME", "LOCAL_DATE_TIME"].includes( prop.type ) ) { if (prop.min !== undefined) { example = `Min: ${prop.min}, Max: ${prop.max}`; } else { if (prop.values.length > 0) { example = `Example: ${prop.values[0]}`; } } } else if (prop.type === "LIST") { if (!prop.min_size || prop.min_size > LIST_LIMIT) { continue; } example = `Min Size: ${prop.min_size}, Max Size: ${prop.max_size}`; } formattedNodeProps.push( ` - \`${prop.property}\`: ${prop.type} ${example}` ); } } // Enhanced formatting for relationships for (const [relType, properties] of Object.entries(schema.relProps)) { formattedRelProps.push(`- **${relType}**`); for (const prop of properties as Array<Record<string, Any>>) { let example = ""; if (prop.type === "STRING") { if (prop.values.length > 0) { if (isDistinctMoreThanLimit(prop.distinct_count)) { example = `Example: ${cleanStringValues(prop.values[0])}`; } else { example = `Available options: ${prop.values .map(cleanStringValues) .join(", ")}`; } } } else if ( ["INTEGER", "FLOAT", "DATE", "DATE_TIME", "LOCAL_DATE_TIME"].includes( prop.type ) ) { if (prop.min) { example = `Min: ${prop.min}, Max: ${prop.max}`; } else { if (prop.values) { example = `Example: ${prop.values[0]}`; } } } else if (prop.type === "LIST") { if (prop.min_size > LIST_LIMIT) { continue; } example = `Min Size: ${prop.min_size}, Max Size: ${prop.max_size}`; } formattedRelProps.push( ` - \`${prop.property}\`: ${prop.type} ${example}` ); } } } else { // Format node properties formattedNodeProps = Object.entries(schema.nodeProps).map( ([key, value]: [string, Any]) => { const propsStr = value .map((prop: Record<string, Any>) => `${prop.property}: ${prop.type}`) .join(", "); return `${key} {${propsStr}}`; } ); // Format relationship properties formattedRelProps = Object.entries(schema.relProps).map( ([key, value]: [string, Any]) => { const propsStr = value .map((prop: Record<string, Any>) => `${prop.property}: ${prop.type} `) .join(", "); return `${key} {${propsStr} } `; } ); } // Format relationships const formattedRels = schema.relationships.map( (el: Record<string, string>) => `(: ${el.start}) - [: ${el.type}] -> (:${el.end})` ); return [ "Node properties are the following:", formattedNodeProps?.join(", "), "Relationship properties are the following:", formattedRelProps?.join(", "), "The relationships are the following:", formattedRels?.join(", "), ].join("\n"); } /** * @security *Security note*: Make sure that the database connection uses credentials * that are narrowly-scoped to only include necessary permissions. * Failure to do so may result in data corruption or loss, since the calling * code may attempt commands that would result in deletion, mutation * of data if appropriately prompted or reading sensitive data if such * data is present in the database. * The best way to guard against such negative outcomes is to (as appropriate) * limit the permissions granted to the credentials used with this tool. * For example, creating read only users for the database is a good way to * ensure that the calling code cannot mutate or delete data. * * @link See https://js.langchain.com/docs/security for more information. */ export class Neo4jGraph { private driver: neo4j.Driver; private database: string; private timeoutMs: number | undefined; private enhancedSchema: boolean; protected schema = ""; protected structuredSchema: StructuredSchema = { nodeProps: {}, relProps: {}, relationships: [], metadata: { constraint: {}, index: {}, }, }; constructor({ url, username, password, database = "neo4j", timeoutMs, enhancedSchema = false, }: Neo4jGraphConfig) { try { this.driver = neo4j.driver(url, neo4j.auth.basic(username, password)); this.database = database; this.timeoutMs = timeoutMs; this.enhancedSchema = enhancedSchema; } catch (error) { throw new Error( "Could not create a Neo4j driver instance. Please check the connection details." ); } } static async initialize(config: Neo4jGraphConfig): Promise<Neo4jGraph> { const graph = new Neo4jGraph(config); await graph.verifyConnectivity(); try { await graph.refreshSchema(); } catch (error: Any) { if (error.code === "Neo.ClientError.Procedure.ProcedureNotFound") { throw new Error( "Could not use APOC procedures. Please ensure the APOC plugin is installed in Neo4j and that 'apoc.meta.data()' is allowed in Neo4j configuration." ); } throw error; } finally { console.log("Schema refreshed successfully."); } return graph; } getSchema(): string { return this.schema; } getStructuredSchema() { return this.structuredSchema; } async query<RecordShape extends Record<string, Any> = Record<string, Any>>( query: string, params: Record<string, Any> = {}, routing: RoutingControl = neo4j.routing.WRITE ): Promise<RecordShape[]> { const result = await this.driver.executeQuery<RecordShape>(query, params, { database: this.database, routing, transactionConfig: { timeout: this.timeoutMs }, }); return toObjects<RecordShape>(result.records); } async verifyConnectivity() { await this.driver.getServerInfo(); } async refreshSchema() { // Assuming query method is defined and returns a Promise const nodeProperties = ( await this.query<{ output: NodeType }>(NODE_PROPERTIES_QUERY, { EXCLUDED_LABELS: EXCLUDED_LABELS.concat([BASE_ENTITY_LABEL]), }) )?.map((el) => el.output); const relationshipsProperties = ( await this.query<{ output: RelType }>(REL_PROPERTIES_QUERY, { EXCLUDED_LABELS: EXCLUDED_RELS, }) )?.map((el) => el.output); const relationships: PathType[] = ( await this.query<{ output: PathType }>(REL_QUERY, { EXCLUDED_LABELS: EXCLUDED_LABELS.concat([BASE_ENTITY_LABEL]), }) )?.map((el) => el.output); const constraint = await this.query("SHOW CONSTRAINTS"); const index = await this.query("SHOW INDEXES YIELD *"); // Structured schema similar to Python's dictionary comprehension this.structuredSchema = { nodeProps: Object.fromEntries( nodeProperties?.map((el) => [el.labels, el.properties]) || [] ), relProps: Object.fromEntries( relationshipsProperties?.map((el) => [el.type, el.properties]) || [] ), relationships: relationships || [], metadata: { constraint, index, }, }; if (this.enhancedSchema) { const schemaCounts = await this.query( `CALL apoc.meta.graphSample() YIELD nodes, relationships ` + `RETURN nodes, [rel in relationships | {name: apoc.any.property(rel, 'type'), count: apoc.any.property(rel, 'count')}] AS relationships` ); // Update node info for (const node of schemaCounts[0].nodes) { // Skip bloom labels if (EXCLUDED_LABELS.includes(node.name)) { continue; } const nodeProps = this.structuredSchema.nodeProps[node.name]; if (!nodeProps) { // The node has no properties continue; } const enhancedCypher = await this.enhancedSchemaCypher( node.name, nodeProps, node.count < EXHAUSTIVE_SEARCH_LIMIT ); const enhancedInfoPromise = await this.query(enhancedCypher); const enhancedInfo = enhancedInfoPromise[0].output; for (const prop of nodeProps) { if (enhancedInfo[prop.property]) { Object.assign(prop, enhancedInfo[prop.property]); } } } // Update rel info for (const rel of schemaCounts[0].relationships) { // Skip bloom labels if (EXCLUDED_RELS.includes(rel.name)) { continue; } const relProps = this.structuredSchema.relProps[rel.name]; if (!relProps) { // The rel has no properties continue; } const enhancedCypher = await this.enhancedSchemaCypher( rel.name, relProps, rel.count < EXHAUSTIVE_SEARCH_LIMIT, true ); const enhancedInfoPromise = await this.query(enhancedCypher); const enhancedInfo = enhancedInfoPromise[0].output; for (const prop of relProps) { if (prop.property in enhancedInfo) { Object.assign(prop, enhancedInfo[prop.property]); } } } } // Combine all formatted elements into a single string this.schema = formatSchema(this.structuredSchema, this.enhancedSchema); } async enhancedSchemaCypher( labelOrType: string, properties: { property: string; type: string }[], exhaustive: boolean, isRelationship = false ) { let matchClause = isRelationship ? `MATCH ()-[n:\`${labelOrType}\`]->()` : `MATCH (n:\`${labelOrType}\`)`; const withClauses: string[] = []; const returnClauses: string[] = []; const outputDict: { [key: string]: string } = {}; if (exhaustive) { for (const prop of properties) { const propName = prop.property; const propType = prop.type; if (propType === "STRING") { withClauses.push( `collect(distinct substring(n.\`${propName}\`, 0, 50)) AS \`${propName}_values\`` ); returnClauses.push( `values: \`${propName}_values\`[..${DISTINCT_VALUE_LIMIT}], distinct_count: size(\`${propName}_values\`)` ); } else if ( ["INTEGER", "FLOAT", "DATE", "DATE_TIME", "LOCAL_DATE_TIME"].includes( propType ) ) { withClauses.push(`min(n.\`${propName}\`) AS \`${propName}_min\``); withClauses.push(`max(n.\`${propName}\`) AS \`${propName}_max\``); withClauses.push( `count(distinct n.\`${propName}\`) AS \`${propName}_distinct\`` ); returnClauses.push( `min: toString(\`${propName}_min\`), max: toString(\`${propName}_max\`), distinct_count: \`${propName}_distinct\`` ); } else if (propType === "LIST") { withClauses.push( `min(size(n.\`${propName}\`)) AS \`${propName}_size_min\`, max(size(n.\`${propName}\`)) AS \`${propName}_size_max\`` ); returnClauses.push( `min_size: \`${propName}_size_min\`, max_size: \`${propName}_size_max\`` ); } else if (["BOOLEAN", "POINT", "DURATION"].includes(propType)) { continue; } outputDict[propName] = `{${returnClauses.pop()}}`; } } else { matchClause += ` WITH n LIMIT 5`; for (const prop of properties) { const propName = prop.property; const propType = prop.type; const propIndex = this.structuredSchema?.metadata?.index.filter( (el: Any) => el.label === labelOrType && el.properties[0] === propName && el.type === "RANGE" ); if (propType === "STRING") { if ( propIndex.length > 0 && propIndex[0].size > 0 && propIndex[0].distinctValues <= DISTINCT_VALUE_LIMIT ) { const distinctValuesPromise = await this.query( `CALL apoc.schema.properties.distinct('${labelOrType}', '${propName}') YIELD value` ); const distinctValues = distinctValuesPromise[0].value; returnClauses.push( `values: ${distinctValues}, distinct_count: ${distinctValues.length}` ); } else { withClauses.push( `collect(distinct substring(n.\`${propName}\`, 0, 50)) AS \`${propName}_values\`` ); returnClauses.push(`values: ${propName}_values`); } } else if ( ["INTEGER", "FLOAT", "DATE", "DATE_TIME", "LOCAL_DATE_TIME"].includes( propType ) ) { if (!propIndex) { withClauses.push( `collect(distinct toString(n.\`${propName}\`)) AS \`${propName}_values\`` ); returnClauses.push(`values: ${propName}_values`); } else { withClauses.push(`min(n.\`${propName}\`) AS \`${propName}_min\``); withClauses.push(`max(n.\`${propName}\`) AS \`${propName}_max\``); withClauses.push( `count(distinct n.\`${propName}\`) AS \`${propName}_distinct\`` ); returnClauses.push( `min: toString(\`${propName}_min\`), max: toString(\`${propName}_max\`), distinct_count: \`${propName}_distinct\`` ); } } else if (propType === "LIST") { withClauses.push( `min(size(n.\`${propName}\`)) AS \`${propName}_size_min\`, max(size(n.\`${propName}\`)) AS \`${propName}_size_max\`` ); returnClauses.push( `min_size: \`${propName}_size_min\`, max_size: \`${propName}_size_max\`` ); } else if (["BOOLEAN", "POINT", "DURATION"].includes(propType)) { continue; } outputDict[propName] = `{${returnClauses.pop()}}`; } } const withClause = `WITH ${withClauses.join(", ")}`; const returnClause = `RETURN {${Object.entries(outputDict) .map(([k, v]) => `\`${k}\`: ${v}`) .join(", ")}} AS output`; const cypherQuery = [matchClause, withClause, returnClause].join("\n"); return cypherQuery; } async addGraphDocuments( graphDocuments: GraphDocument[], config: AddGraphDocumentsConfig = {} ): Promise<void> { const { baseEntityLabel } = config; if (baseEntityLabel) { const constraintExists = this.structuredSchema?.metadata?.constraint?.some( (el: Any) => JSON.stringify(el.labelsOrTypes) === JSON.stringify([BASE_ENTITY_LABEL]) && JSON.stringify(el.properties) === JSON.stringify(["id"]) ) ?? false; if (!constraintExists) { await this.query(` CREATE CONSTRAINT IF NOT EXISTS FOR (b:${BASE_ENTITY_LABEL}) REQUIRE b.id IS UNIQUE; `); await this.refreshSchema(); } } const nodeImportQuery = getNodeImportQuery(config); const relImportQuery = getRelImportQuery(config); for (const document of graphDocuments) { if (!document.source.metadata.id) { document.source.metadata.id = insecureHash(document.source.pageContent); } // Import nodes await this.query(nodeImportQuery, { data: document.nodes.map((el: Any) => ({ ...el })), document: { ...document.source }, }); // Import relationships await this.query(relImportQuery, { data: document.relationships.map((el: Any) => ({ source: el.source.id, source_label: el.source.type, target: el.target.id, target_label: el.target.type, type: el.type.replace(/ /g, "_").toUpperCase(), properties: el.properties, })), }); } } async close() { await this.driver.close(); } } function getNodeImportQuery({ baseEntityLabel, includeSource, }: AddGraphDocumentsConfig): string { if (baseEntityLabel) { return ` ${includeSource ? INCLUDE_DOCS_QUERY : ""} UNWIND $data AS row MERGE(source: \`${BASE_ENTITY_LABEL}\` {id: row.id}) SET source += row.properties ${includeSource ? "MERGE (d)-[:MENTIONS]->(source)" : ""} WITH source, row CALL apoc.create.addLabels(source, [row.type]) YIELD node RETURN distinct 'done' AS result `; } else { return ` ${includeSource ? INCLUDE_DOCS_QUERY : ""} UNWIND $data AS row CALL apoc.merge.node([row.type], {id: row.id}, row.properties, {}) YIELD node ${includeSource ? "MERGE (d)-[:MENTIONS]->(node)" : ""} RETURN distinct 'done' AS result `; } } function getRelImportQuery({ baseEntityLabel, }: AddGraphDocumentsConfig): string { if (baseEntityLabel) { return ` UNWIND $data AS row MERGE (source:\`${BASE_ENTITY_LABEL}\` {id: row.source}) MERGE (target:\`${BASE_ENTITY_LABEL}\` {id: row.target}) WITH source, target, row CALL apoc.merge.relationship(source, row.type, {}, row.properties, target) YIELD rel RETURN distinct 'done' `; } else { return ` UNWIND $data AS row CALL apoc.merge.node([row.source_label], {id: row.source}, {}, {}) YIELD node as source CALL apoc.merge.node([row.target_label], {id: row.target}, {}, {}) YIELD node as target CALL apoc.merge.relationship(source, row.type, {}, row.properties, target) YIELD rel RETURN distinct 'done' `; } } function toObjects< RecordShape extends Record<string, Any> = Record<string, Any> >(records: neo4j.Record<RecordShape>): RecordShape[] { return records.map((record: Any) => { const rObj = record.toObject(); const out: Partial<RecordShape> = {}; Object.keys(rObj).forEach((key: keyof RecordShape) => { out[key] = itemIntToString(rObj[key]); }); return out as RecordShape; }); } function itemIntToString(item: Any): Any { if (neo4j.isInt(item)) return item.toString(); if (Array.isArray(item)) return item.map((ii) => itemIntToString(ii)); if (["number", "string", "boolean"].indexOf(typeof item) !== -1) return item; if (item === null) return item; if (typeof item === "object") return objIntToString(item); } function objIntToString(obj: Any) { const entry = extractFromNeoObjects(obj); let newObj: Any = null; if (Array.isArray(entry)) { newObj = entry.map((item) => itemIntToString(item)); } else if (entry !== null && typeof entry === "object") { newObj = {}; Object.keys(entry).forEach((key) => { newObj[key] = itemIntToString(entry[key]); }); } return newObj; } function extractFromNeoObjects(obj: Any) { if ( // eslint-disable-next-line obj instanceof (neo4j.types.Node as any) || // eslint-disable-next-line obj instanceof (neo4j.types.Relationship as any) ) { return obj.properties; // eslint-disable-next-line } else if (obj instanceof (neo4j.types.Path as any)) { // eslint-disable-next-line return [].concat.apply<any[], any[], any[]>([], extractPathForRows(obj)); } return obj; } const extractPathForRows = (path: neo4j.Path) => { let { segments } = path; // Zero length path. No relationship, end === start if (!Array.isArray(path.segments) || path.segments.length < 1) { segments = [{ ...path, end: null } as Any]; } return segments.map((segment: Any) => [ objIntToString(segment.start), objIntToString(segment.relationship), objIntToString(segment.end), ].filter((part) => part !== null) ); };
0
lc_public_repos/langchainjs/libs/langchain-community/src/graphs
lc_public_repos/langchainjs/libs/langchain-community/src/graphs/tests/neo4j_graph.int.test.ts
/* eslint-disable no-process-env */ import { test } from "@jest/globals"; import { Document } from "@langchain/core/documents"; import { BASE_ENTITY_LABEL, Neo4jGraph } from "../neo4j_graph.js"; import { GraphDocument, Relationship, Node } from "../graph_document.js"; const TEST_DATA = [ new GraphDocument({ nodes: [ new Node({ id: "foo", type: "foo" }), new Node({ id: "bar", type: "bar" }), ], relationships: [ new Relationship({ source: new Node({ id: "foo", type: "foo" }), target: new Node({ id: "bar", type: "bar" }), type: "REL", }), ], source: new Document({ pageContent: "source document" }), }), ]; describe.skip("Neo4j Graph Tests", () => { const url = process.env.NEO4J_URI as string; const username = process.env.NEO4J_USERNAME as string; const password = process.env.NEO4J_PASSWORD as string; let graph: Neo4jGraph; beforeEach(async () => { graph = await Neo4jGraph.initialize({ url, username, password }); }); afterEach(async () => { await graph.close(); }); test("Schema generation works correctly", async () => { expect(url).toBeDefined(); expect(username).toBeDefined(); expect(password).toBeDefined(); // Clear the database await graph.query("MATCH (n) DETACH DELETE n"); await graph.query( "CREATE (a:Actor {name:'Bruce Willis'})" + "-[:ACTED_IN {roles: ['Butch Coolidge']}]->(:Movie {title: 'Pulp Fiction'})" ); await graph.refreshSchema(); // console.log(graph.getSchema()); // expect(graph.getSchema()).toMatchInlineSnapshot(` // "Node properties are the following: // Actor {name: STRING}, Movie {title: STRING} // Relationship properties are the following: // ACTED_IN {roles: LIST} // The relationships are the following: // (:Actor)-[:ACTED_IN]->(:Movie)" // `); }); test("Test that Neo4j database is correctly instantiated and connected", async () => { expect(url).toBeDefined(); expect(username).toBeDefined(); expect(password).toBeDefined(); // Integers are casted to strings in the output const expectedOutput = [{ output: { str: "test", int: "1" } }]; const res = await graph.query('RETURN {str: "test", int: 1} AS output'); await graph.close(); expect(res).toEqual(expectedOutput); }); test("Test that neo4j correctly import graph document.", async () => { expect(url).toBeDefined(); expect(username).toBeDefined(); expect(password).toBeDefined(); await graph.query("MATCH (n) DETACH DELETE n"); await graph.query("CALL apoc.schema.assert({}, {})"); await graph.refreshSchema(); await graph.addGraphDocuments(TEST_DATA); const output = await graph.query( "MATCH (n) RETURN labels(n) AS label, count(*) AS count ORDER BY label" ); expect(output).toEqual([ { label: ["bar"], count: "1" }, { label: ["foo"], count: "1" }, ]); expect(graph.getStructuredSchema().metadata?.constraint).toEqual([]); }); test("Test that neo4j correctly import graph document with source.", async () => { expect(url).toBeDefined(); expect(username).toBeDefined(); expect(password).toBeDefined(); await graph.query("MATCH (n) DETACH DELETE n"); await graph.query("CALL apoc.schema.assert({}, {})"); await graph.refreshSchema(); await graph.addGraphDocuments(TEST_DATA, { includeSource: true }); const output = await graph.query( "MATCH (n) RETURN labels(n) AS label, count(*) AS count ORDER BY label" ); expect(output).toEqual([ { label: ["Document"], count: "1" }, { label: ["bar"], count: "1" }, { label: ["foo"], count: "1" }, ]); expect(graph.getStructuredSchema().metadata?.constraint).toEqual([]); }); test("Test that neo4j correctly import graph document with base_entity.", async () => { expect(url).toBeDefined(); expect(username).toBeDefined(); expect(password).toBeDefined(); await graph.query("MATCH (n) DETACH DELETE n"); await graph.query("CALL apoc.schema.assert({}, {})"); await graph.refreshSchema(); await graph.addGraphDocuments(TEST_DATA, { baseEntityLabel: true }); const output = await graph.query( "MATCH (n) RETURN apoc.coll.sort(labels(n)) AS label, count(*) AS count ORDER BY label" ); expect(output).toEqual([ { label: [BASE_ENTITY_LABEL, "bar"], count: "1" }, { label: [BASE_ENTITY_LABEL, "foo"], count: "1" }, ]); expect(graph.getStructuredSchema().metadata?.constraint).not.toEqual([]); }); test("Test that neo4j correctly import graph document with base_entity and source.", async () => { expect(url).toBeDefined(); expect(username).toBeDefined(); expect(password).toBeDefined(); await graph.query("MATCH (n) DETACH DELETE n"); await graph.query("CALL apoc.schema.assert({}, {})"); await graph.refreshSchema(); await graph.addGraphDocuments(TEST_DATA, { baseEntityLabel: true, includeSource: true, }); const output = await graph.query( "MATCH (n) RETURN apoc.coll.sort(labels(n)) AS label, count(*) AS count ORDER BY label" ); expect(output).toEqual([ { label: ["Document"], count: "1" }, { label: [BASE_ENTITY_LABEL, "bar"], count: "1" }, { label: [BASE_ENTITY_LABEL, "foo"], count: "1" }, ]); expect(graph.getStructuredSchema().metadata?.constraint).not.toEqual([]); }); }); describe.skip("Neo4j Graph with custom config", () => { const url = process.env.NEO4J_URI as string; const username = process.env.NEO4J_USERNAME as string; const password = process.env.NEO4J_PASSWORD as string; const DEMO_URL = "neo4j+s://demo.neo4jlabs.com"; const DEMO_DATABASES = [ "recommendations", "buzzoverflow", "bluesky", "companies", "fincen", "gameofthrones", "grandstack", "movies", "neoflix", "network", "northwind", "offshoreleaks", "stackoverflow2", "twitch", "twitter", ]; test("Test database timeout", async () => { expect(url).toBeDefined(); expect(username).toBeDefined(); expect(password).toBeDefined(); const graphWithTimeout = await Neo4jGraph.initialize({ url, username, password, timeoutMs: 100, }); const res = await graphWithTimeout.query( "UNWIND range(0,10000,1) AS i MERGE (f:Foo {id:i}) RETURN collect(i)[..5]" ); expect(res).toEqual(undefined); await graphWithTimeout.close(); }); test("Test enhancedSchema option", async () => { expect(url).toBeDefined(); expect(username).toBeDefined(); expect(password).toBeDefined(); const graphWithEnhancedSchema = await Neo4jGraph.initialize({ url, username, password, enhancedSchema: true, }); await graphWithEnhancedSchema.query("MATCH (n) DETACH DELETE n"); await graphWithEnhancedSchema.addGraphDocuments(TEST_DATA); // call refresh again await graphWithEnhancedSchema.refreshSchema(); const output = graphWithEnhancedSchema.getStructuredSchema(); delete output.metadata; // console.log(output); expect(output).toEqual({ nodeProps: { foo: [ { property: "id", type: "STRING", values: ["foo"], distinct_count: "1", }, ], bar: [ { property: "id", type: "STRING", values: ["bar"], distinct_count: "1", }, ], }, relProps: {}, relationships: [{ start: "foo", type: "REL", end: "bar" }], }); await graphWithEnhancedSchema.close(); }); test("Test running on multiple demo databases", async () => { for (const database of DEMO_DATABASES) { // console.log("Connecting demo database:", database); const graphDemo = await Neo4jGraph.initialize({ url: DEMO_URL, database, username: database, password: database, enhancedSchema: true, }); await graphDemo.close(); } // console.log("All database tests completed."); }, 10000000); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/graphs
lc_public_repos/langchainjs/libs/langchain-community/src/graphs/tests/memgraph_graph.int.test.ts
/* eslint-disable no-process-env */ import { test } from "@jest/globals"; import { MemgraphGraph } from "../memgraph_graph.js"; describe.skip("Memgraph Graph Tests", () => { const url = process.env.MEMGRAPH_URI as string; const username = process.env.MEMGRAPH_USERNAME as string; const password = process.env.MEMGRAPH_PASSWORD as string; let graph: MemgraphGraph; beforeEach(async () => { graph = await MemgraphGraph.initialize({ url, username, password }); }); afterEach(async () => { await graph.close(); }); test("Schema generation works correctly", async () => { expect(url).toBeDefined(); expect(username).toBeDefined(); expect(password).toBeDefined(); // Clear the database await graph.query("MATCH (n) DETACH DELETE n"); await graph.query( "CREATE (a:Actor {name:'Bruce Willis'})" + "-[:ACTED_IN {roles: ['Butch Coolidge']}]->(:Movie {title: 'Pulp Fiction'})" ); await graph.refreshSchema(); // console.log(graph.getSchema()); expect(graph.getSchema()).toMatchInlineSnapshot(` "Node properties are the following: Node name: 'Actor', Node properties: [{"property":"name","type":"str"}] Node name: 'Movie', Node properties: [{"property":"title","type":"str"}] Relationship properties are the following: Relationship name: 'ACTED_IN', Relationship properties: [{"property":"roles","type":"tuple"}] The relationships are the following: (:Actor)-[:ACTED_IN]->(:Movie)" `); }); test("Test that Memgraph database is correctly instantiated and connected", async () => { expect(url).toBeDefined(); expect(username).toBeDefined(); expect(password).toBeDefined(); // Integers are casted to strings in the output const expectedOutput = [{ output: { str: "test", int: "1" } }]; const res = await graph.query('RETURN {str: "test", int: 1} AS output'); await graph.close(); expect(res).toEqual(expectedOutput); }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query/qdrant.ts
import { isFilterEmpty, castValue, isInt, isFloat, BaseTranslator, Comparator, Comparators, Comparison, Operation, Operator, Operators, StructuredQuery, Visitor, } from "@langchain/core/structured_query"; import { QdrantVectorStore, QdrantFilter, QdrantCondition, } from "../vectorstores/qdrant.js"; /** * A class that translates or converts `StructuredQuery` to equivalent Qdrant filters. * @example * ```typescript * const selfQueryRetriever = new SelfQueryRetriever({ * llm: new ChatOpenAI(), * vectorStore: new QdrantVectorStore(...), * documentContents: "Brief summary of a movie", * attributeInfo: [], * structuredQueryTranslator: new QdrantTranslator(), * }); * * const relevantDocuments = await selfQueryRetriever.getRelevantDocuments( * "Which movies are rated higher than 8.5?", * ); * ``` */ export class QdrantTranslator< T extends QdrantVectorStore > extends BaseTranslator<T> { declare VisitOperationOutput: QdrantFilter; declare VisitComparisonOutput: QdrantCondition; allowedOperators: Operator[] = [Operators.and, Operators.or, Operators.not]; allowedComparators: Comparator[] = [ Comparators.eq, Comparators.ne, Comparators.lt, Comparators.lte, Comparators.gt, Comparators.gte, ]; /** * Visits an operation and returns a QdrantFilter. * @param operation The operation to visit. * @returns A QdrantFilter. */ visitOperation(operation: Operation): this["VisitOperationOutput"] { const args = operation.args?.map((arg) => arg.accept(this as Visitor)); const operator = { [Operators.and]: "must", [Operators.or]: "should", [Operators.not]: "must_not", }[operation.operator]; return { [operator]: args, }; } /** * Visits a comparison and returns a QdrantCondition. * The value is casted to the correct type. * The attribute is prefixed with "metadata.", * since metadata is nested in the Qdrant payload. * @param comparison The comparison to visit. * @returns A QdrantCondition. */ visitComparison(comparison: Comparison): this["VisitComparisonOutput"] { const attribute = `metadata.${comparison.attribute}`; const value = castValue(comparison.value); if (comparison.comparator === "eq") { return { key: attribute, match: { value, }, }; } else if (comparison.comparator === "ne") { return { key: attribute, match: { except: [value], }, }; } if (!isInt(value) && !isFloat(value)) { throw new Error("Value for gt, gte, lt, lte must be a number"); } // For gt, gte, lt, lte, we need to use the range filter return { key: attribute, range: { [comparison.comparator]: value, }, }; } /** * Visits a structured query and returns a VisitStructuredQueryOutput. * If the query has a filter, it is visited. * @param query The structured query to visit. * @returns An instance of VisitStructuredQueryOutput. */ visitStructuredQuery( query: StructuredQuery ): this["VisitStructuredQueryOutput"] { let nextArg = {}; if (query.filter) { nextArg = { filter: { must: [query.filter.accept(this as Visitor)] }, }; } return nextArg; } /** * Merges two filters into one. If both filters are empty, returns * undefined. If one filter is empty or the merge type is 'replace', * returns the other filter. If the merge type is 'and' or 'or', returns a * new filter with the merged results. Throws an error for unknown merge * types. * @param defaultFilter The default filter to merge. * @param generatedFilter The generated filter to merge. * @param mergeType The type of merge to perform. Can be 'and', 'or', or 'replace'. Defaults to 'and'. * @param forceDefaultFilter If true, the default filter is always returned if the generated filter is empty. Defaults to false. * @returns A merged QdrantFilter, or undefined if both filters are empty. */ mergeFilters( defaultFilter: QdrantFilter | undefined, generatedFilter: QdrantFilter | undefined, mergeType = "and", forceDefaultFilter = false ): QdrantFilter | undefined { if (isFilterEmpty(defaultFilter) && isFilterEmpty(generatedFilter)) { return undefined; } if (isFilterEmpty(defaultFilter) || mergeType === "replace") { if (isFilterEmpty(generatedFilter)) { return undefined; } return generatedFilter; } if (isFilterEmpty(generatedFilter)) { if (forceDefaultFilter) { return defaultFilter; } if (mergeType === "and") { return undefined; } return defaultFilter; } if (mergeType === "and") { return { must: [defaultFilter, generatedFilter], }; } else if (mergeType === "or") { return { should: [defaultFilter, generatedFilter], }; } else { throw new Error("Unknown merge type"); } } formatFunction(): string { throw new Error("Not implemented"); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query/vectara.ts
import { BaseTranslator, isFilterEmpty, Comparator, Comparators, Comparison, NOT, Operation, Operator, Operators, StructuredQuery, Visitor, } from "@langchain/core/structured_query"; import { VectaraFilter, VectaraStore } from "../vectorstores/vectara.js"; type AllowedOperator = Exclude<Operator, NOT>; export type VectaraVisitorResult = | VectaraOperationResult | VectaraComparisonResult | VectaraVisitorStructuredQueryResult; // eslint-disable-next-line @typescript-eslint/ban-types export type VectaraOperationResult = String; // eslint-disable-next-line @typescript-eslint/ban-types export type VectaraComparisonResult = String; export type VectaraVisitorStructuredQueryResult = { filter?: { filter?: VectaraOperationResult | VectaraComparisonResult }; }; type Value = number | string; function processValue(value: Value): string { /** Convert a value to a string and add single quotes if it is a string. */ if (typeof value === "string") { return `'${value}'`; } else { return String(value); } } export class VectaraTranslator< T extends VectaraStore > extends BaseTranslator<T> { declare VisitOperationOutput: VectaraOperationResult; declare VisitComparisonOutput: VectaraComparisonResult; allowedOperators: Operator[] = [Operators.and, Operators.or]; allowedComparators: Comparator[] = [ Comparators.eq, Comparators.ne, Comparators.lt, Comparators.lte, Comparators.gt, Comparators.gte, ]; formatFunction(func: Operator | Comparator): string { if (func in Comparators) { if ( this.allowedComparators.length > 0 && this.allowedComparators.indexOf(func as Comparator) === -1 ) { throw new Error( `Comparator ${func} not allowed. Allowed operators: ${this.allowedComparators.join( ", " )}` ); } } else if (func in Operators) { if ( this.allowedOperators.length > 0 && this.allowedOperators.indexOf(func as Operator) === -1 ) { throw new Error( `Operator ${func} not allowed. Allowed operators: ${this.allowedOperators.join( ", " )}` ); } } else { throw new Error("Unknown comparator or operator"); } const mapDict = { and: " and ", or: " or ", eq: "=", ne: "!=", lt: "<", lte: "<=", gt: ">", gte: ">=", }; return mapDict[func as Comparator | AllowedOperator]; } /** * Visits an operation and returns a VectaraOperationResult. The * operation's arguments are visited and the operator is formatted. * @param operation The operation to visit. * @returns A VectaraOperationResult. */ visitOperation(operation: Operation): this["VisitOperationOutput"] { const args = operation.args?.map((arg) => arg.accept(this as Visitor) ) as VectaraVisitorResult[]; const operator = this.formatFunction(operation.operator); return `( ${args.join(operator)} )`; } /** * Visits a comparison and returns a VectaraComparisonResult. The * comparison's value is checked for type and the comparator is formatted. * Throws an error if the value type is not supported. * @param comparison The comparison to visit. * @returns A VectaraComparisonResult. */ visitComparison(comparison: Comparison): this["VisitComparisonOutput"] { const comparator = this.formatFunction(comparison.comparator); return `( doc.${comparison.attribute} ${comparator} ${processValue( comparison.value )} )`; } /** * Visits a structured query and returns a VectaraStructuredQueryResult. * If the query has a filter, it is visited. * @param query The structured query to visit. * @returns A VectaraStructuredQueryResult. */ visitStructuredQuery( query: StructuredQuery ): this["VisitStructuredQueryOutput"] { let nextArg = {}; if (query.filter) { nextArg = { filter: { filter: query.filter.accept(this as Visitor) }, }; } return nextArg; } mergeFilters( defaultFilter: VectaraFilter | undefined, generatedFilter: VectaraFilter | undefined, mergeType = "and", forceDefaultFilter = false ): VectaraFilter | undefined { if (isFilterEmpty(defaultFilter) && isFilterEmpty(generatedFilter)) { return undefined; } if (isFilterEmpty(defaultFilter) || mergeType === "replace") { if (isFilterEmpty(generatedFilter)) { return undefined; } return generatedFilter; } if (isFilterEmpty(generatedFilter)) { if (forceDefaultFilter) { return defaultFilter; } if (mergeType === "and") { return undefined; } return defaultFilter; } if (mergeType === "and") { return { filter: `${defaultFilter} and ${generatedFilter}`, } as VectaraFilter; } else if (mergeType === "or") { return { filter: `${defaultFilter} or ${generatedFilter}`, }; } else { throw new Error("Unknown merge type"); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query/supabase.ts
import { isFilterEmpty, isFloat, isInt, isObject, isString, BaseTranslator, Comparator, Comparators, Comparison, Operation, Operator, Operators, StructuredQuery, } from "@langchain/core/structured_query"; import type { SupabaseFilterRPCCall, SupabaseMetadata, SupabaseVectorStore, } from "../vectorstores/supabase.js"; import { ProxyParamsDuplicator, convertObjectFilterToStructuredQuery, } from "./supabase_utils.js"; /** * Represents the possible values that can be used in a comparison in a * structured query. It can be a string or a number. */ type ValueType = { eq: string | number; ne: string | number; lt: string | number; lte: string | number; gt: string | number; gte: string | number; }; /** * A specialized translator designed to work with Supabase, extending the * BaseTranslator class. It translates structured queries into a format * that can be understood by the Supabase database. * @example * ```typescript * const selfQueryRetriever = new SelfQueryRetriever({ * llm: new ChatOpenAI(), * vectorStore: new SupabaseVectorStore(), * documentContents: "Brief summary of a movie", * attributeInfo: [], * structuredQueryTranslator: new SupabaseTranslator(), * }); * * const queryResult = await selfQueryRetriever.getRelevantDocuments( * "Which movies are directed by Greta Gerwig?", * ); * ``` */ export class SupabaseTranslator< T extends SupabaseVectorStore > extends BaseTranslator<T> { declare VisitOperationOutput: SupabaseFilterRPCCall; declare VisitComparisonOutput: SupabaseFilterRPCCall; allowedOperators: Operator[] = [Operators.and, Operators.or]; allowedComparators: Comparator[] = [ Comparators.eq, Comparators.ne, Comparators.gt, Comparators.gte, Comparators.lt, Comparators.lte, ]; formatFunction(): string { throw new Error("Not implemented"); } /** * Returns a function that applies the appropriate comparator operation on * the attribute and value provided. The function returned is used to * filter data in a Supabase database. * @param comparator The comparator to be used in the operation. * @returns A function that applies the comparator operation on the attribute and value provided. */ getComparatorFunction<C extends Comparator>( comparator: Comparator ): (attr: string, value: ValueType[C]) => SupabaseFilterRPCCall { switch (comparator) { case Comparators.eq: { return (attr: string, value: ValueType[C]) => (rpc) => rpc.eq(this.buildColumnName(attr, value), value); } case Comparators.ne: { return (attr: string, value: ValueType[C]) => (rpc) => rpc.neq(this.buildColumnName(attr, value), value); } case Comparators.gt: { return (attr: string, value: ValueType[C]) => (rpc) => rpc.gt(this.buildColumnName(attr, value), value); } case Comparators.gte: { return (attr: string, value: ValueType[C]) => (rpc) => rpc.gte(this.buildColumnName(attr, value), value); } case Comparators.lt: { return (attr: string, value: ValueType[C]) => (rpc) => rpc.lt(this.buildColumnName(attr, value), value); } case Comparators.lte: { return (attr: string, value: ValueType[C]) => (rpc) => rpc.lte(this.buildColumnName(attr, value), value); } default: { throw new Error("Unknown comparator"); } } } /** * Builds a column name based on the attribute and value provided. The * column name is used in filtering data in a Supabase database. * @param attr The attribute to be used in the column name. * @param value The value to be used in the column name. * @param includeType Whether to include the data type in the column name. * @returns The built column name. */ buildColumnName(attr: string, value: string | number, includeType = true) { let column = ""; if (isString(value)) { column = `metadata->>${attr}`; } else if (isInt(value)) { column = `metadata->${attr}${includeType ? "::int" : ""}`; } else if (isFloat(value)) { column = `metadata->${attr}${includeType ? "::float" : ""}`; } else { throw new Error("Data type not supported"); } return column; } /** * Visits an operation and returns a string representation of it. This is * used in translating a structured query into a format that can be * understood by Supabase. * @param operation The operation to be visited. * @returns A string representation of the operation. */ visitOperationAsString(operation: Operation): string { const { args } = operation; if (!args) { return ""; } return args ?.reduce((acc, arg) => { if (arg.exprName === "Comparison") { acc.push(this.visitComparisonAsString(arg as Comparison)); } else if (arg.exprName === "Operation") { const { operator: innerOperator } = arg as Operation; acc.push( `${innerOperator}(${this.visitOperationAsString(arg as Operation)})` ); } return acc; }, [] as string[]) .join(","); } /** * Visits an operation and returns a function that applies the operation * on a Supabase database. This is used in translating a structured query * into a format that can be understood by Supabase. * @param operation The operation to be visited. * @returns A function that applies the operation on a Supabase database. */ visitOperation(operation: Operation): this["VisitOperationOutput"] { const { operator, args } = operation; if (this.allowedOperators.includes(operator)) { if (operator === Operators.and) { if (!args) { return (rpc) => rpc; } const filter: SupabaseFilterRPCCall = (rpc) => args.reduce((acc, arg) => { const filter = arg.accept(this) as SupabaseFilterRPCCall; return filter(acc); }, rpc); return filter; } else if (operator === Operators.or) { return (rpc) => rpc.or(this.visitOperationAsString(operation)); } else { throw new Error("Unknown operator"); } } else { throw new Error("Operator not allowed"); } } /** * Visits a comparison and returns a string representation of it. This is * used in translating a structured query into a format that can be * understood by Supabase. * @param comparison The comparison to be visited. * @returns A string representation of the comparison. */ visitComparisonAsString(comparison: Comparison): string { let { value } = comparison; const { comparator: _comparator, attribute } = comparison; let comparator = _comparator as string; if (comparator === Comparators.ne) { comparator = "neq"; } if (Array.isArray(value)) { value = `(${value .map((v) => { if (typeof v === "string" && /[,()]/.test(v)) return `"${v}"`; return v; }) .join(",")})`; } return `${this.buildColumnName( attribute, value, false )}.${comparator}.${value}`; } /** * Visits a comparison and returns a function that applies the comparison * on a Supabase database. This is used in translating a structured query * into a format that can be understood by Supabase. * @param comparison The comparison to be visited. * @returns A function that applies the comparison on a Supabase database. */ visitComparison(comparison: Comparison): this["VisitComparisonOutput"] { const { comparator, attribute, value } = comparison; if (this.allowedComparators.includes(comparator)) { const comparatorFunction = this.getComparatorFunction( comparator as Comparator ); return comparatorFunction(attribute, value); } else { throw new Error("Comparator not allowed"); } } /** * Visits a structured query and returns a function that applies the query * on a Supabase database. This is used in translating a structured query * into a format that can be understood by Supabase. * @param query The structured query to be visited. * @returns A function that applies the query on a Supabase database. */ visitStructuredQuery( query: StructuredQuery ): this["VisitStructuredQueryOutput"] { if (!query.filter) { return {}; } const filterFunction = query.filter?.accept(this); return { filter: (filterFunction as SupabaseFilterRPCCall) ?? {} }; } /** * Merges two filters into one. The merged filter can be used to filter * data in a Supabase database. * @param defaultFilter The default filter to be merged. * @param generatedFilter The generated filter to be merged. * @param mergeType The type of merge to be performed. It can be 'and', 'or', or 'replace'. * @returns The merged filter. */ mergeFilters( defaultFilter: SupabaseFilterRPCCall | SupabaseMetadata | undefined, generatedFilter: SupabaseFilterRPCCall | undefined, mergeType = "and" ): SupabaseFilterRPCCall | SupabaseMetadata | undefined { if (isFilterEmpty(defaultFilter) && isFilterEmpty(generatedFilter)) { return undefined; } if (isFilterEmpty(defaultFilter) || mergeType === "replace") { if (isFilterEmpty(generatedFilter)) { return undefined; } return generatedFilter; } if (isFilterEmpty(generatedFilter)) { if (mergeType === "and") { return undefined; } return defaultFilter; } let myDefaultFilter = defaultFilter; if (isObject(defaultFilter)) { const { filter } = this.visitStructuredQuery( convertObjectFilterToStructuredQuery(defaultFilter) ); // just in case the built filter is empty somehow if (isFilterEmpty(filter)) { if (isFilterEmpty(generatedFilter)) { return undefined; } return generatedFilter; } myDefaultFilter = filter; } // After this point, myDefaultFilter will always be SupabaseFilterRPCCall if (mergeType === "or") { return (rpc) => { const defaultFlattenedParams = ProxyParamsDuplicator.getFlattenedParams( rpc, myDefaultFilter as SupabaseFilterRPCCall ); const generatedFlattenedParams = ProxyParamsDuplicator.getFlattenedParams(rpc, generatedFilter); return rpc.or(`${defaultFlattenedParams},${generatedFlattenedParams}`); }; } else if (mergeType === "and") { return (rpc) => generatedFilter((myDefaultFilter as SupabaseFilterRPCCall)(rpc)); } else { throw new Error("Unknown merge type"); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query/supabase_utils.ts
import { Comparators, Comparison, Operation, Operators, StructuredQuery, } from "@langchain/core/structured_query"; import type { SupabaseFilter, SupabaseFilterRPCCall, SupabaseMetadata, } from "../vectorstores/supabase.js"; type SupabaseFilterProps = keyof SupabaseFilter; /** * Utility class used to duplicate parameters for a proxy object, * specifically designed to work with `SupabaseFilter` objects. It * contains methods to handle different types of operations such as "or", * "filter", "in", "contains", "textSearch", "match", "not", and default * operations. */ export class ProxyParamsDuplicator { duplicationAllowedOps: string[] = [ "eq", "neq", "lt", "lte", "gt", "gte", "like", "ilike", "or", "in", "contains", "match", "not", "textSearch", "filter", ]; values: [string, string][] = []; /** * Creates a proxy handler for a `SupabaseFilter` object. The handler * intercepts get operations and applies specific logic based on the * property being accessed. * @returns A proxy handler for a `SupabaseFilter` object. */ buildProxyHandler() { const proxyHandler: ProxyHandler<SupabaseFilter> = { get: (target, prop, receiver) => { if (typeof target[prop as SupabaseFilterProps] === "function") { // eslint-disable-next-line @typescript-eslint/no-explicit-any return (...args: any[]) => { if (this.duplicationAllowedOps.includes(String(prop))) { switch (String(prop)) { case "or": // args[0]: filters, args[1]: { foreignTable } this.addOrClause(args[0], args[1]); break; case "filter": // args[0]: column, args[1]: operator, args[2]: value this.addFilterClause(args[0], args[1], args[2]); break; case "in": // args[0]: column, args[1]: values this.addInClause(args[0], args[1]); break; case "contains": // args[0]: column, args[1]: value this.addContainsClause(args[0], args[1]); break; case "textSearch": // args[0]: column, args[1]: query, args[2]: { config, type } this.addTextSearchClause(args[0], args[1], args[2]); break; case "match": // args[0]: query this.addMatchClause(args[0]); break; case "not": // args[0]: column, args[1]: operator, args[2]: value this.addNotClause(args[0], args[1], args[2]); break; default: // args[0]: column, args[1]: value this.addDefaultOpClause(prop as string, args[0], args[1]); } return new Proxy(target, proxyHandler); } else { throw new Error( "Filter operation not supported for 'or' mergeFiltersOperator" ); } }; } else { return Reflect.get(target, prop, receiver); } }, }; return proxyHandler; } /** * Removes type annotations from a value string. * @param value The value string to clean. * @returns The cleaned value string. */ removeType(value: string) { let cleanedValue = value; if (cleanedValue.includes("::float")) { cleanedValue = cleanedValue.replace("::float", ""); } if (cleanedValue.includes("::int")) { cleanedValue = cleanedValue.replace("::int", ""); } return cleanedValue; } /** * Adds a default operation clause to the values array. * @param prop The operation property. * @param column The column to apply the operation to. * @param value The value for the operation. */ addDefaultOpClause(prop: string, column: string, value: unknown) { this.values.push([this.removeType(column), `${String(prop)}.${value}`]); } /** * Adds an 'or' clause to the values array. * @param filters The filters for the 'or' clause. * @param foreignTable Optional foreign table for the 'or' clause. */ addOrClause( filters: string, { foreignTable }: { foreignTable?: string } = {} ) { const key = foreignTable ? `${foreignTable}.or` : "or"; this.values.push([this.removeType(key), `(${filters})`]); } /** * Adds a 'filter' clause to the values array. * @param column The column to apply the filter to. * @param operator The operator for the filter. * @param value The value for the filter. */ addFilterClause(column: string, operator: string, value: unknown) { this.values.push([this.removeType(column), `${operator}.${value}`]); } /** * Adds an 'in' clause to the values array. * @param column The column to apply the 'in' clause to. * @param values The values for the 'in' clause. */ addInClause(column: string, values: unknown[]) { const cleanedValues = values .map((s) => { if (typeof s === "string" && /[,()]/.test(s)) return `"${s}"`; else return `${s}`; }) .join(","); this.values.push([this.removeType(column), `in.(${cleanedValues})`]); } /** * Adds a 'contains' clause to the values array. * @param column The column to apply the 'contains' clause to. * @param value The value for the 'contains' clause. */ addContainsClause(column: string, value: unknown) { if (typeof value === "string") { this.values.push([this.removeType(column), `cs.${value}`]); } else if (Array.isArray(value)) { this.values.push([this.removeType(column), `cs.{${value.join(",")}}`]); } else { this.values.push([ this.removeType(column), `cs.${JSON.stringify(value)}`, ]); } } /** * Adds a 'textSearch' clause to the values array. * @param column The column to apply the 'textSearch' clause to. * @param query The query for the 'textSearch' clause. * @param config Optional configuration for the 'textSearch' clause. * @param type Optional type for the 'textSearch' clause. */ addTextSearchClause( column: string, query: string[], { config, type, }: { config?: string; type?: "plain" | "phrase" | "websearch" } = {} ) { let typePart = ""; if (type === "plain") { typePart = "pl"; } else if (type === "phrase") { typePart = "ph"; } else if (type === "websearch") { typePart = "w"; } const configPart = config === undefined ? "" : `(${config})`; this.values.push([ this.removeType(column), `${typePart}fts${configPart}.${query}`, ]); } /** * Adds a 'not' clause to the values array. * @param column The column to apply the 'not' clause to. * @param operator The operator for the 'not' clause. * @param value The value for the 'not' clause. */ addNotClause(column: string, operator: string, value: unknown) { this.values.push([column, `not.${operator}.${value}`]); } /** * Adds a 'match' clause to the values array. * @param query The query for the 'match' clause. */ addMatchClause(query: Record<string, unknown>) { Object.entries(query).forEach(([column, value]) => { this.values.push([column, `eq.${value}`]); }); } /** * Returns the flattened parameters as a string. * @returns The flattened parameters as a string. */ flattenedParams() { const mapped = this.values.map(([k, v]) => `${k}.${v}`); if (mapped.length === 1) return mapped[0]; return `and(${mapped.join(",")})`; } /** * Gets flattened parameters from a `SupabaseFilter` and a * `SupabaseFilterRPCCall`. * @param rpc The `SupabaseFilter` object. * @param filter The `SupabaseFilterRPCCall` object. * @returns The flattened parameters as a string. */ static getFlattenedParams( rpc: SupabaseFilter, filter: SupabaseFilterRPCCall ) { const proxiedDuplicator = new ProxyParamsDuplicator(); const proxiedRpc = new Proxy(rpc, proxiedDuplicator.buildProxyHandler()); void filter(proxiedRpc); return proxiedDuplicator.flattenedParams(); } } /** * Converts a `SupabaseMetadata` object into a `StructuredQuery` object. * The function creates a new `StructuredQuery` object and uses the * `Operation` and `Comparison` classes to build the query. */ export function convertObjectFilterToStructuredQuery( objFilter: SupabaseMetadata ): StructuredQuery { return new StructuredQuery( "", new Operation( Operators.and, Object.entries(objFilter).map( ([column, value]) => new Comparison(Comparators.eq, column, value) ) ) ); }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query/chroma.ts
import { BasicTranslator, Comparators, Operators, } from "@langchain/core/structured_query"; import { Chroma } from "../vectorstores/chroma.js"; /** * Specialized translator for the Chroma vector database. It extends the * BasicTranslator class and translates internal query language elements * to valid filters. The class defines a subset of allowed logical * operators and comparators that can be used in the translation process. * @example * ```typescript * const chromaTranslator = new ChromaTranslator(); * const selfQueryRetriever = new SelfQueryRetriever({ * llm: new ChatOpenAI(), * vectorStore: new Chroma(), * documentContents: "Brief summary of a movie", * attributeInfo: [], * structuredQueryTranslator: chromaTranslator, * }); * * const relevantDocuments = await selfQueryRetriever.getRelevantDocuments( * "Which movies are directed by Greta Gerwig?", * ); * ``` */ export class ChromaTranslator<T extends Chroma> extends BasicTranslator<T> { constructor() { super({ allowedOperators: [Operators.and, Operators.or], allowedComparators: [ Comparators.eq, Comparators.ne, Comparators.gt, Comparators.gte, Comparators.lt, Comparators.lte, ], }); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query/tests/chroma_self_query.int.test.ts
import { test } from "@jest/globals"; import { Document } from "@langchain/core/documents"; import { OpenAIEmbeddings, OpenAI } from "@langchain/openai"; import { AttributeInfo } from "langchain/chains/query_constructor"; import { SelfQueryRetriever } from "langchain/retrievers/self_query"; import { Chroma } from "../../vectorstores/chroma.js"; import { ChromaTranslator } from "../chroma.js"; test.skip("Chroma Store Self Query Retriever Test", async () => { const docs = [ new Document({ pageContent: "A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata: { year: 1993, rating: 7.7, genre: "science fiction" }, }), new Document({ pageContent: "Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 }, }), new Document({ pageContent: "A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 }, }), new Document({ pageContent: "A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 }, }), new Document({ pageContent: "Toys come alive and have a blast doing so", metadata: { year: 1995, genre: "animated" }, }), new Document({ pageContent: "Three men walk into the Zone, three men walk out of the Zone", metadata: { year: 1979, director: "Andrei Tarkovsky", genre: "science fiction", rating: 9.9, }, }), ]; const attributeInfo: AttributeInfo[] = [ { name: "genre", description: "The genre of the movie", type: "string or array of strings", }, { name: "year", description: "The year the movie was released", type: "number", }, { name: "director", description: "The director of the movie", type: "string", }, { name: "rating", description: "The rating of the movie (1-10)", type: "number", }, { name: "length", description: "The length of the movie in minutes", type: "number", }, ]; const embeddings = new OpenAIEmbeddings(); const llm = new OpenAI(); const documentContents = "Brief summary of a movie"; const vectorStore = await Chroma.fromDocuments(docs, embeddings, { collectionName: "a-movie-collection", }); const selfQueryRetriever = SelfQueryRetriever.fromLLM({ llm, vectorStore, documentContents, attributeInfo, structuredQueryTranslator: new ChromaTranslator(), }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const query4 = await selfQueryRetriever.getRelevantDocuments( "Which movies are either comedy or drama and are less than 90 minutes?" ); // console.log(query1, query2, query3, query4); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query/tests/vectara_self_query.int.test.ts
/* eslint-disable no-process-env */ import { test } from "@jest/globals"; import { Document } from "@langchain/core/documents"; import { OpenAI } from "@langchain/openai"; import { FakeEmbeddings } from "@langchain/core/utils/testing"; import { AttributeInfo } from "langchain/chains/query_constructor"; import { SelfQueryRetriever } from "langchain/retrievers/self_query"; import { VectaraStore } from "../../vectorstores/vectara.js"; import { VectaraTranslator } from "../vectara.js"; test.skip("Vectara Self Query Retriever Test", async () => { const docs = [ new Document({ pageContent: "A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata: { year: 1993, rating: 7.7, genre: "science fiction" }, }), new Document({ pageContent: "Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 }, }), new Document({ pageContent: "A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 }, }), new Document({ pageContent: "A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 }, }), new Document({ pageContent: "Toys come alive and have a blast doing so", metadata: { year: 1995, genre: "animated" }, }), new Document({ pageContent: "Three men walk into the Zone, three men walk out of the Zone", metadata: { year: 1979, rating: 9.9, director: "Andrei Tarkovsky", genre: "science fiction", }, }), ]; const attributeInfo: AttributeInfo[] = [ { name: "genre", description: "The genre of the movie", type: "string or array of strings", }, { name: "year", description: "The year the movie was released", type: "number", }, { name: "director", description: "The director of the movie", type: "string", }, { name: "rating", description: "The rating of the movie (1-10)", type: "number", }, ]; const config = { customerId: Number(process.env.VECTARA_CUSTOMER_ID), corpusId: Number(process.env.VECTARA_CORPUS_ID), apiKey: String(process.env.VECTARA_API_KEY), verbose: true, }; const vectorStore = await VectaraStore.fromDocuments( docs, new FakeEmbeddings(), config ); const llm = new OpenAI(); const documentContents = "Brief summary of a movie"; const selfQueryRetriever = SelfQueryRetriever.fromLLM({ llm, vectorStore, documentContents, attributeInfo, structuredQueryTranslator: new VectaraTranslator(), }); const query1 = await selfQueryRetriever.getRelevantDocuments( "I want to watch a movie rated higher than 8.5" ); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are either comedy or science fiction and are rated higher than 8.5?" ); const query4 = await selfQueryRetriever.getRelevantDocuments( "Wau wau wau wau hello gello hello?" ); // console.log(query1, query2, query3, query4); expect(query1.length).toBe(2); expect(query2.length).toBe(1); expect(query3.length).toBe(1); expect(query4.length).toBe(0); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query/tests/supabase_self_query.int.test.ts
/* eslint-disable no-process-env */ import { test } from "@jest/globals"; import { createClient } from "@supabase/supabase-js"; import { Document } from "@langchain/core/documents"; import { OpenAIEmbeddings, OpenAI } from "@langchain/openai"; import { AttributeInfo } from "langchain/chains/query_constructor"; import { SelfQueryRetriever } from "langchain/retrievers/self_query"; import { SupabaseFilter, SupabaseVectorStore, } from "../../vectorstores/supabase.js"; import { SupabaseTranslator } from "../supabase.js"; test("Supabase Store Self Query Retriever Test", async () => { const docs = [ new Document({ pageContent: "A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata: { year: 1993, rating: 7.7, genre: "science fiction" }, }), new Document({ pageContent: "Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 }, }), new Document({ pageContent: "A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 }, }), new Document({ pageContent: "A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3, genre: "drama", }, }), new Document({ pageContent: "Toys come alive and have a blast doing so", metadata: { year: 1995, genre: "animated" }, }), new Document({ pageContent: "Three men walk into the Zone, three men walk out of the Zone", metadata: { year: 1979, director: "Andrei Tarkovsky", genre: "science fiction", rating: 9.9, }, }), new Document({ pageContent: "10x the previous gecs", metadata: { year: 2023, title: "10000 gecs", artist: "100 gecs", rating: 9.9, type: "album", }, }), ]; const attributeInfo: AttributeInfo[] = [ { name: "genre", description: "The genre of the movie", type: "string or array of strings", }, { name: "year", description: "The year the movie was released", type: "number", }, { name: "director", description: "The director of the movie", type: "string", }, { name: "rating", description: "The rating of the movie (1-10)", type: "number", }, { name: "length", description: "The length of the movie in minutes", type: "number", }, ]; if ( !process.env.SUPABASE_VECTOR_STORE_URL || !process.env.SUPABASE_VECTOR_STORE_PRIVATE_KEY ) { throw new Error( "Supabase URL or private key not set. Please set it in the .env file" ); } const embeddings = new OpenAIEmbeddings(); const llm = new OpenAI(); const documentContents = "Brief summary of a movie"; const client = createClient( process.env.SUPABASE_VECTOR_STORE_URL, process.env.SUPABASE_VECTOR_STORE_PRIVATE_KEY ); const vectorStore = new SupabaseVectorStore(embeddings, { client }); // idempotency const opts = { ids: docs.map((_, idx) => idx) }; await vectorStore.addDocuments(docs, opts); const selfQueryRetriever = SelfQueryRetriever.fromLLM({ llm, vectorStore, documentContents, attributeInfo, structuredQueryTranslator: new SupabaseTranslator(), }); const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); // console.log(query1); expect(query1.length).toEqual(0); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); // console.log(query2); expect(query2.length).toEqual(3); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); // console.log(query3); expect(query3.length).toEqual(1); const query4 = await selfQueryRetriever.getRelevantDocuments("What is what"); // this should return empty since it'll create empty filter // console.log(query4); expect(query4.length).toEqual(0); }); test("Supabase Store Self Query Retriever Test With Default Filter And Merge Operator", async () => { const docs = [ new Document({ pageContent: "A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata: { type: "movie", year: 1993, rating: 7.7, genre: "science fiction", }, }), new Document({ pageContent: "Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata: { type: "movie", year: 2010, director: "Christopher Nolan", rating: 8.2, }, }), new Document({ pageContent: "A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata: { type: "movie", year: 2006, director: "Satoshi Kon", rating: 8.6, }, }), new Document({ pageContent: "A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata: { type: "movie", year: 2019, director: "Greta Gerwig", rating: 8.3, genre: "drama", }, }), new Document({ pageContent: "Toys come alive and have a blast doing so", metadata: { type: "movie", year: 1995, genre: "animated" }, }), new Document({ pageContent: "Three men walk into the Zone, three men walk out of the Zone", metadata: { type: "movie", year: 1979, director: "Andrei Tarkovsky", genre: "science fiction", rating: 9.9, }, }), new Document({ pageContent: "10x the previous gecs", metadata: { year: 2023, title: "10000 gecs", artist: "100 gecs", rating: 9.9, type: "album", }, }), ]; const attributeInfo: AttributeInfo[] = [ { name: "genre", description: "The genre of the movie", type: "string or array of strings", }, { name: "year", description: "The year the movie was released", type: "number", }, { name: "director", description: "The director of the movie", type: "string", }, { name: "rating", description: "The rating of the movie (1-10)", type: "number", }, { name: "length", description: "The length of the movie in minutes", type: "number", }, ]; if ( !process.env.SUPABASE_VECTOR_STORE_URL || !process.env.SUPABASE_VECTOR_STORE_PRIVATE_KEY ) { throw new Error( "Supabase URL or private key not set. Please set it in the .env file" ); } const embeddings = new OpenAIEmbeddings(); const llm = new OpenAI(); const documentContents = "Brief summary of a movie"; const client = createClient( process.env.SUPABASE_VECTOR_STORE_URL, process.env.SUPABASE_VECTOR_STORE_PRIVATE_KEY ); const vectorStore = new SupabaseVectorStore(embeddings, { client }); // idempotency const opts = { ids: docs.map((_, idx) => idx) }; await vectorStore.addDocuments(docs, opts); const selfQueryRetriever = SelfQueryRetriever.fromLLM({ llm, vectorStore, documentContents, attributeInfo, structuredQueryTranslator: new SupabaseTranslator(), searchParams: { filter: (rpc: SupabaseFilter) => rpc.filter("metadata->>type", "eq", "movie"), mergeFiltersOperator: "and", // Supabase self-query filter does not support "or" operator for merging two filters k: docs.length, }, }); const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); // console.log(query1); expect(query1.length).toEqual(0); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); // console.log(query2); expect(query2.length).toEqual(2); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); // console.log(query3); expect(query3.length).toEqual(1); const query4 = await selfQueryRetriever.getRelevantDocuments("What is what"); // query4 has to empty document, since we can't use "or" operator // console.log(query4); expect(query4.length).toEqual(0); }); test("Supabase Store Self Query Retriever Test With Default Filter Or Merge Operator", async () => { const docs = [ new Document({ pageContent: "A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata: { type: "movie", year: 1993, rating: 7.7, genre: "science fiction", }, }), new Document({ pageContent: "Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata: { type: "movie", year: 2010, director: "Christopher Nolan", rating: 8.2, }, }), new Document({ pageContent: "A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata: { type: "movie", year: 2006, director: "Satoshi Kon", rating: 8.6, }, }), new Document({ pageContent: "A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata: { type: "movie", year: 2019, director: "Greta Gerwig", rating: 8.3, genre: "drama", }, }), new Document({ pageContent: "Toys come alive and have a blast doing so", metadata: { type: "movie", year: 1995, genre: "animated" }, }), new Document({ pageContent: "Three men walk into the Zone, three men walk out of the Zone", metadata: { type: "movie", year: 1979, director: "Andrei Tarkovsky", genre: "science fiction", rating: 9.9, }, }), new Document({ pageContent: "10x the previous gecs", metadata: { year: 2023, title: "10000 gecs", artist: "100 gecs", rating: 9.9, type: "album", }, }), ]; const attributeInfo: AttributeInfo[] = [ { name: "genre", description: "The genre of the movie", type: "string or array of strings", }, { name: "year", description: "The year the movie was released", type: "number", }, { name: "director", description: "The director of the movie", type: "string", }, { name: "rating", description: "The rating of the movie (1-10)", type: "number", }, { name: "length", description: "The length of the movie in minutes", type: "number", }, ]; if ( !process.env.SUPABASE_VECTOR_STORE_URL || !process.env.SUPABASE_VECTOR_STORE_PRIVATE_KEY ) { throw new Error( "Supabase URL or private key not set. Please set it in the .env file" ); } const embeddings = new OpenAIEmbeddings(); const llm = new OpenAI(); const documentContents = "Brief summary of a movie"; const client = createClient( process.env.SUPABASE_VECTOR_STORE_URL, process.env.SUPABASE_VECTOR_STORE_PRIVATE_KEY ); const vectorStore = new SupabaseVectorStore(embeddings, { client }); // idempotency const opts = { ids: docs.map((_, idx) => idx) }; await vectorStore.addDocuments(docs, opts); const selfQueryRetriever = SelfQueryRetriever.fromLLM({ llm, vectorStore, documentContents, attributeInfo, structuredQueryTranslator: new SupabaseTranslator(), searchParams: { filter: (rpc: SupabaseFilter) => rpc .filter("metadata->>type", "eq", "movie") .filter("metadata->rating", "gt", 0.01), mergeFiltersOperator: "or", k: docs.length, }, }); const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); // console.log(query1); expect(query1.length).toEqual(5); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); // console.log(query2); expect(query2.length).toEqual(6); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); // console.log(query3); expect(query3.length).toEqual(5); const query4 = await selfQueryRetriever.getRelevantDocuments("What is what"); // console.log(query4); expect(query4.length).toEqual(5); }); test("Supabase Store Self Query Retriever Test With Default Filter And Merge Operator, Object default filter", async () => { const docs = [ new Document({ pageContent: "A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata: { type: "movie", year: 1993, rating: 7.7, genre: "science fiction", }, }), new Document({ pageContent: "Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata: { type: "movie", year: 2010, director: "Christopher Nolan", rating: 8.2, }, }), new Document({ pageContent: "A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata: { type: "movie", year: 2006, director: "Satoshi Kon", rating: 8.6, }, }), new Document({ pageContent: "A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata: { type: "movie", year: 2019, director: "Greta Gerwig", rating: 8.3, genre: "drama", }, }), new Document({ pageContent: "Toys come alive and have a blast doing so", metadata: { type: "movie", year: 1995, genre: "animated" }, }), new Document({ pageContent: "Three men walk into the Zone, three men walk out of the Zone", metadata: { type: "movie", year: 1979, director: "Andrei Tarkovsky", genre: "science fiction", rating: 9.9, }, }), new Document({ pageContent: "10x the previous gecs", metadata: { year: 2023, title: "10000 gecs", artist: "100 gecs", rating: 9.9, type: "album", }, }), ]; const attributeInfo: AttributeInfo[] = [ { name: "genre", description: "The genre of the movie", type: "string or array of strings", }, { name: "year", description: "The year the movie was released", type: "number", }, { name: "director", description: "The director of the movie", type: "string", }, { name: "rating", description: "The rating of the movie (1-10)", type: "number", }, { name: "length", description: "The length of the movie in minutes", type: "number", }, ]; if ( !process.env.SUPABASE_VECTOR_STORE_URL || !process.env.SUPABASE_VECTOR_STORE_PRIVATE_KEY ) { throw new Error( "Supabase URL or private key not set. Please set it in the .env file" ); } const embeddings = new OpenAIEmbeddings(); const llm = new OpenAI(); const documentContents = "Brief summary of a movie"; const client = createClient( process.env.SUPABASE_VECTOR_STORE_URL, process.env.SUPABASE_VECTOR_STORE_PRIVATE_KEY ); const vectorStore = new SupabaseVectorStore(embeddings, { client }); // idempotency const opts = { ids: docs.map((_, idx) => idx) }; await vectorStore.addDocuments(docs, opts); const selfQueryRetriever = SelfQueryRetriever.fromLLM({ llm, vectorStore, documentContents, attributeInfo, structuredQueryTranslator: new SupabaseTranslator(), searchParams: { filter: { type: "movie", }, mergeFiltersOperator: "and", // Supabase self-query filter does not support "or" operator for merging two filters k: docs.length, }, }); const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); // console.log(query1); expect(query1.length).toEqual(0); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); // console.log(query2); expect(query2.length).toEqual(2); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); // console.log(query3); expect(query3.length).toEqual(1); const query4 = await selfQueryRetriever.getRelevantDocuments("What is what"); // query4 has to empty document, since we can't use "or" operator // console.log(query4); expect(query4.length).toEqual(0); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query/tests/hnswlib_self_query.int.test.ts
import { test } from "@jest/globals"; import { Document } from "@langchain/core/documents"; import { OpenAIEmbeddings, OpenAI } from "@langchain/openai"; import { AttributeInfo } from "langchain/chains/query_constructor"; import { FunctionalTranslator, SelfQueryRetriever, } from "langchain/retrievers/self_query"; import { HNSWLib } from "../../vectorstores/hnswlib.js"; test("HNSWLib Store Self Query Retriever Test", async () => { const docs = [ new Document({ pageContent: "A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata: { year: 1993, rating: 7.7, genre: "science fiction" }, }), new Document({ pageContent: "Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 }, }), new Document({ pageContent: "A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 }, }), new Document({ pageContent: "A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 }, }), new Document({ pageContent: "Toys come alive and have a blast doing so", metadata: { year: 1995, genre: "animated" }, }), new Document({ pageContent: "Three men walk into the Zone, three men walk out of the Zone", metadata: { year: 1979, director: "Andrei Tarkovsky", genre: "science fiction", rating: 9.9, }, }), ]; const attributeInfo: AttributeInfo[] = [ { name: "genre", description: "The genre of the movie", type: "string or array of strings", }, { name: "year", description: "The year the movie was released", type: "number", }, { name: "director", description: "The director of the movie", type: "string", }, { name: "rating", description: "The rating of the movie (1-10)", type: "number", }, { name: "length", description: "The length of the movie in minutes", type: "number", }, ]; const embeddings = new OpenAIEmbeddings(); const llm = new OpenAI({ modelName: "gpt-3.5-turbo", temperature: 0.01, }); const documentContents = "Brief summary of a movie"; const vectorStore = await HNSWLib.fromDocuments(docs, embeddings); const selfQueryRetriever = SelfQueryRetriever.fromLLM({ llm, vectorStore, documentContents, attributeInfo, structuredQueryTranslator: new FunctionalTranslator(), }); const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); // console.log(query1); expect(query1.length).toEqual(0); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); // console.log(query2); expect(query2.length).toEqual(2); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); // console.log(query3); expect(query3.length).toEqual(1); }); test("HNSWLib shouldn't throw an error if a filter can't be generated, but should return no items", async () => { const docs = [ new Document({ pageContent: "A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata: { year: 1993, rating: 7.7, genre: "science fiction" }, }), new Document({ pageContent: "Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 }, }), new Document({ pageContent: "A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 }, }), new Document({ pageContent: "A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 }, }), new Document({ pageContent: "Toys come alive and have a blast doing so", metadata: { year: 1995, genre: "animated" }, }), new Document({ pageContent: "Three men walk into the Zone, three men walk out of the Zone", metadata: { year: 1979, director: "Andrei Tarkovsky", genre: "science fiction", rating: 9.9, }, }), ]; const attributeInfo = [ { name: "sectionNumber", description: "The section number of the rule", type: "number", }, { name: "sectionTitle", description: "The section title of the rule", type: "string", }, { name: "sectionScope", description: "The section scope of the rule", type: "string", }, { name: "codeRule", description: "The code rule of the rule", type: "string", }, ]; const embeddings = new OpenAIEmbeddings(); const llm = new OpenAI({ modelName: "gpt-3.5-turbo", temperature: 0.01, }); const documentContents = "Brief summary of a movie"; const vectorStore = await HNSWLib.fromDocuments(docs, embeddings); const selfQueryRetriever = SelfQueryRetriever.fromLLM({ llm, vectorStore, documentContents, attributeInfo, structuredQueryTranslator: new FunctionalTranslator(), }); const query1 = await selfQueryRetriever.getRelevantDocuments( "Which sectionTitle talks about pools?" ); // console.log(query1); expect(query1.length).toEqual(0); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query
lc_public_repos/langchainjs/libs/langchain-community/src/structured_query/tests/qdrant_self_query.int.test.ts
import { test } from "@jest/globals"; import { Document } from "@langchain/core/documents"; import { OpenAIEmbeddings, OpenAI } from "@langchain/openai"; import { AttributeInfo } from "langchain/chains/query_constructor"; import { SelfQueryRetriever } from "langchain/retrievers/self_query"; import { QdrantClient } from "@qdrant/js-client-rest"; import { QdrantVectorStore } from "../../vectorstores/qdrant.js"; import { QdrantTranslator } from "../qdrant.js"; test("Qdrant Vector Store Self Query Retriever Test", async () => { const docs = [ new Document({ pageContent: "A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata: { year: 1993, rating: 7.7, genre: "science fiction" }, }), new Document({ pageContent: "Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 }, }), new Document({ pageContent: "A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 }, }), new Document({ pageContent: "A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 }, }), new Document({ pageContent: "Toys come alive and have a blast doing so", metadata: { year: 1995, genre: "animated" }, }), new Document({ pageContent: "Three men walk into the Zone, three men walk out of the Zone", metadata: { year: 1979, director: "Andrei Tarkovsky", genre: "science fiction", rating: 9.9, }, }), ]; const attributeInfo: AttributeInfo[] = [ { name: "genre", description: "The genre of the movie", type: "string or array of strings", }, { name: "year", description: "The year the movie was released", type: "number", }, { name: "director", description: "The director of the movie", type: "string", }, { name: "rating", description: "The rating of the movie (1-10)", type: "number", }, { name: "length", description: "The length of the movie in minutes", type: "number", }, ]; const embeddings = new OpenAIEmbeddings(); const llm = new OpenAI({ modelName: "gpt-3.5-turbo", temperature: 0, }); const documentContents = "Brief summary of a movie"; const client = new QdrantClient({ url: "http://127.0.0.1:6333" }); const vectorStore = await QdrantVectorStore.fromDocuments(docs, embeddings, { client, collectionName: crypto.randomUUID(), }); const selfQueryRetriever = SelfQueryRetriever.fromLLM({ llm, vectorStore, documentContents, attributeInfo, structuredQueryTranslator: new QdrantTranslator(), }); const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); expect(query1.length).toEqual(0); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); expect(query2.length).toEqual(2); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which cool movies are directed by Greta Gerwig?" ); expect(query3.length).toEqual(1); }); test("Qdrant Vector Store Self Query Retriever Test With Default Filter Or Merge Operator", async () => { const docs = [ new Document({ pageContent: "A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata: { year: 1993, rating: 7.7, genre: "science fiction", type: "movie", }, }), new Document({ pageContent: "Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2, type: "movie", }, }), new Document({ pageContent: "A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6, type: "movie", }, }), new Document({ pageContent: "A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3, type: "movie", }, }), new Document({ pageContent: "Toys come alive and have a blast doing so", metadata: { year: 1995, genre: "animated", type: "movie" }, }), new Document({ pageContent: "Three men walk into the Zone, three men walk out of the Zone", metadata: { year: 1979, director: "Andrei Tarkovsky", genre: "science fiction", rating: 9.9, type: "movie", }, }), new Document({ pageContent: "10x the previous gecs", metadata: { year: 2023, title: "10000 gecs", artist: "100 gecs", rating: 9.9, type: "album", }, }), ]; const attributeInfo: AttributeInfo[] = [ { name: "genre", description: "The genre of the movie", type: "string or array of strings", }, { name: "year", description: "The year the movie was released", type: "number", }, { name: "director", description: "The director of the movie", type: "string", }, { name: "rating", description: "The rating of the movie (1-10)", type: "number", }, { name: "length", description: "The length of the movie in minutes", type: "number", }, ]; const embeddings = new OpenAIEmbeddings(); const llm = new OpenAI({ modelName: "gpt-3.5-turbo", }); const documentContents = "Brief summary of a movie"; const client = new QdrantClient({ url: "http://127.0.0.1:6333" }); const vectorStore = await QdrantVectorStore.fromDocuments(docs, embeddings, { client, collectionName: crypto.randomUUID(), }); const selfQueryRetriever = SelfQueryRetriever.fromLLM({ llm, vectorStore, documentContents, attributeInfo, structuredQueryTranslator: new QdrantTranslator(), searchParams: { filter: { must: [{ key: "metadata.type", match: { value: "movie" } }], }, mergeFiltersOperator: "or", k: docs.length, }, }); const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); expect(query1.length).toEqual(6); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); expect(query2.length).toEqual(7); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); expect(query3.length).toEqual(6); const query4 = await selfQueryRetriever.getRelevantDocuments( "Awawawa au au au wawawawa hello?" ); expect(query4.length).toEqual(6); // this one should return documents since default filter takes over }); test("Qdrant Vector Store Self Query Retriever Test With Default Filter And Merge Operator", async () => { const docs = [ new Document({ pageContent: "A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata: { year: 1993, rating: 7.7, genre: "science fiction", type: "movie", }, }), new Document({ pageContent: "Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2, type: "movie", }, }), new Document({ pageContent: "A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6, type: "movie", }, }), new Document({ pageContent: "A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3, type: "movie", }, }), new Document({ pageContent: "Toys come alive and have a blast doing so", metadata: { year: 1995, genre: "animated", type: "movie" }, }), new Document({ pageContent: "Three men walk into the Zone, three men walk out of the Zone", metadata: { year: 1979, director: "Andrei Tarkovsky", genre: "science fiction", rating: 9.9, type: "movie", }, }), new Document({ pageContent: "10x the previous gecs", metadata: { year: 2023, title: "10000 gecs", artist: "100 gecs", rating: 9.9, type: "album", }, }), ]; const attributeInfo: AttributeInfo[] = [ { name: "genre", description: "The genre of the movie", type: "string or array of strings", }, { name: "year", description: "The year the movie was released", type: "number", }, { name: "director", description: "The director of the movie", type: "string", }, { name: "rating", description: "The rating of the movie (1-10)", type: "number", }, { name: "length", description: "The length of the movie in minutes", type: "number", }, ]; const embeddings = new OpenAIEmbeddings(); const llm = new OpenAI({ modelName: "gpt-3.5-turbo", }); const documentContents = "Brief summary of a movie"; const client = new QdrantClient({ url: "http://127.0.0.1:6333" }); const vectorStore = await QdrantVectorStore.fromDocuments(docs, embeddings, { client, collectionName: crypto.randomUUID(), }); const selfQueryRetriever = SelfQueryRetriever.fromLLM({ llm, vectorStore, documentContents, attributeInfo, structuredQueryTranslator: new QdrantTranslator(), searchParams: { filter: { must: [{ key: "metadata.type", match: { value: "movie" } }], }, mergeFiltersOperator: "and", k: docs.length, }, }); const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); expect(query1.length).toEqual(0); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); expect(query2.length).toEqual(2); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which cool movies are directed by Greta Gerwig?" ); expect(query3.length).toEqual(1); const query4 = await selfQueryRetriever.getRelevantDocuments( "Awawawa au au au wawawawa hello?" ); expect(query4.length).toBeGreaterThan(0); // this one should return documents since default filter takes over });
0
lc_public_repos/langchainjs/libs/langchain-community/src/agents
lc_public_repos/langchainjs/libs/langchain-community/src/agents/toolkits/aws_sfn.ts
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base"; import { ToolInterface } from "@langchain/core/tools"; import { renderTemplate } from "@langchain/core/prompts"; import { LLMChain } from "langchain/chains"; import { AgentExecutor, ZeroShotAgent, ZeroShotCreatePromptArgs, } from "langchain/agents"; import { SfnConfig, StartExecutionAWSSfnTool, DescribeExecutionAWSSfnTool, SendTaskSuccessAWSSfnTool, } from "../../tools/aws_sfn.js"; import { Toolkit } from "./base.js"; /** * Interface for the arguments required to create an AWS Step Functions * toolkit. */ export interface AWSSfnToolkitArgs { name: string; description: string; stateMachineArn: string; asl?: string; llm?: BaseLanguageModelInterface; } /** * Class representing a toolkit for interacting with AWS Step Functions. * It initializes the AWS Step Functions tools and provides them as tools * for the agent. * @example * ```typescript * * const toolkit = new AWSSfnToolkit({ * name: "onboard-new-client-workflow", * description: * "Onboard new client workflow. Can also be used to get status of any executing workflow or state machine.", * stateMachineArn: * "arn:aws:states:us-east-1:1234567890:stateMachine:my-state-machine", * region: "<your Sfn's region>", * accessKeyId: "<your access key id>", * secretAccessKey: "<your secret access key>", * }); * * * const result = await toolkit.invoke({ * input: "Onboard john doe (john@example.com) as a new client.", * }); * * ``` */ export class AWSSfnToolkit extends Toolkit { tools: ToolInterface[]; stateMachineArn: string; asl: string; constructor(args: AWSSfnToolkitArgs & SfnConfig) { super(); this.stateMachineArn = args.stateMachineArn; if (args.asl) { this.asl = args.asl; } this.tools = [ new StartExecutionAWSSfnTool({ name: args.name, description: StartExecutionAWSSfnTool.formatDescription( args.name, args.description ), stateMachineArn: args.stateMachineArn, }), new DescribeExecutionAWSSfnTool( Object.assign( args.region ? { region: args.region } : {}, args.accessKeyId && args.secretAccessKey ? { accessKeyId: args.accessKeyId, secretAccessKey: args.secretAccessKey, } : {} ) ), new SendTaskSuccessAWSSfnTool( Object.assign( args.region ? { region: args.region } : {}, args.accessKeyId && args.secretAccessKey ? { accessKeyId: args.accessKeyId, secretAccessKey: args.secretAccessKey, } : {} ) ), ]; } } export const SFN_PREFIX = `You are an agent designed to interact with AWS Step Functions state machines to execute and coordinate asynchronous workflows and tasks. Given an input question, command, or task use the appropriate tool to execute a command to interact with AWS Step Functions and return the result. You have access to tools for interacting with AWS Step Functions. Given an input question, command, or task use the correct tool to complete the task. Only use the below tools. Only use the information returned by the below tools to construct your final answer. If the question does not seem related to AWS Step Functions or an existing state machine, just return "I don't know" as the answer.`; export const SFN_SUFFIX = `Begin! Question: {input} Thought: I should look at state machines within AWS Step Functions to see what actions I can perform. {agent_scratchpad}`; export interface AWSSfnCreatePromptArgs extends ZeroShotCreatePromptArgs {} export function createAWSSfnAgent( llm: BaseLanguageModelInterface, toolkit: AWSSfnToolkit, args?: AWSSfnCreatePromptArgs ) { const { prefix = SFN_PREFIX, suffix = SFN_SUFFIX, inputVariables = ["input", "agent_scratchpad"], } = args ?? {}; const { tools } = toolkit; const formattedPrefix = renderTemplate(prefix, "f-string", {}); const prompt = ZeroShotAgent.createPrompt(tools, { prefix: formattedPrefix, suffix, inputVariables, }); const chain = new LLMChain({ prompt, llm }); const agent = new ZeroShotAgent({ llmChain: chain, allowedTools: tools.map((t) => t.name), }); return AgentExecutor.fromAgentAndTools({ agent, tools, returnIntermediateSteps: true, }); }
0
lc_public_repos/langchainjs/libs/langchain-community/src/agents
lc_public_repos/langchainjs/libs/langchain-community/src/agents/toolkits/base.ts
export { BaseToolkit as Toolkit } from "@langchain/core/tools";
0
lc_public_repos/langchainjs/libs/langchain-community/src/agents
lc_public_repos/langchainjs/libs/langchain-community/src/agents/toolkits/stagehand.ts
import { Tool, BaseToolkit as Toolkit, ToolInterface, StructuredTool, } from "@langchain/core/tools"; import { Stagehand } from "@browserbasehq/stagehand"; import { AnyZodObject, z } from "zod"; // Documentation is here: // https://js.langchain.com/docs/integrations/tools/stagehand abstract class StagehandToolBase extends Tool { protected stagehand?: Stagehand; private localStagehand?: Stagehand; constructor(stagehandInstance?: Stagehand) { super(); this.stagehand = stagehandInstance; } protected async getStagehand(): Promise<Stagehand> { if (this.stagehand) return this.stagehand; if (!this.localStagehand) { this.localStagehand = new Stagehand({ env: "LOCAL", enableCaching: true, }); await this.localStagehand.init(); } return this.localStagehand; } } function isErrorWithMessage(error: unknown): error is { message: string } { return ( typeof error === "object" && error !== null && "message" in error && typeof (error as { message: unknown }).message === "string" ); } export class StagehandNavigateTool extends StagehandToolBase { name = "stagehand_navigate"; description = "Use this tool to navigate to a specific URL using Stagehand. The input should be a valid URL as a string."; async _call(input: string): Promise<string> { const stagehand = await this.getStagehand(); try { await stagehand.page.goto(input); return `Successfully navigated to ${input}.`; } catch (error: unknown) { const message = isErrorWithMessage(error) ? error.message : String(error); return `Failed to navigate: ${message}`; } } } export class StagehandActTool extends StagehandToolBase { name = "stagehand_act"; description = "Use this tool to perform an action on the current web page using Stagehand. The input should be a string describing the action to perform."; async _call(input: string): Promise<string> { const stagehand = await this.getStagehand(); const result = await stagehand.act({ action: input }); if (result.success) { return `Action performed successfully: ${result.message}`; } else { return `Failed to perform action: ${result.message}`; } } } export class StagehandExtractTool extends StructuredTool { name = "stagehand_extract"; description = "Use this tool to extract structured information from the current web page using Stagehand. The input should include an 'instruction' string and a 'schema' object representing the extraction schema in JSON Schema format."; // Define the input schema for the tool schema = z.object({ instruction: z.string().describe("Instruction on what to extract"), schema: z .record(z.any()) .describe("Extraction schema in JSON Schema format"), }); private stagehand?: Stagehand; constructor(stagehandInstance?: Stagehand) { super(); this.stagehand = stagehandInstance; } async _call(input: { instruction: string; schema: AnyZodObject; }): Promise<string> { const stagehand = await this.getStagehand(); const { instruction, schema } = input; try { const result = await stagehand.extract({ instruction, schema, // Assuming Stagehand accepts the schema in JSON Schema format }); return JSON.stringify(result); } catch (error: unknown) { const message = isErrorWithMessage(error) ? error.message : String(error); return `Failed to extract information: ${message}`; } } protected async getStagehand(): Promise<Stagehand> { if (this.stagehand) return this.stagehand; // Initialize local Stagehand instance if not provided this.stagehand = new Stagehand({ env: "LOCAL", enableCaching: true, }); await this.stagehand.init(); return this.stagehand; } } export class StagehandObserveTool extends StagehandToolBase { name = "stagehand_observe"; description = "Use this tool to observe the current web page and retrieve possible actions using Stagehand. The input can be an optional instruction string."; async _call(input: string): Promise<string> { const stagehand = await this.getStagehand(); const instruction = input || undefined; try { const result = await stagehand.observe({ instruction }); return JSON.stringify(result); } catch (error: unknown) { const message = isErrorWithMessage(error) ? error.message : String(error); return `Failed to observe: ${message}`; } } } export class StagehandToolkit extends Toolkit { tools: ToolInterface[]; stagehand?: Stagehand; constructor(stagehand?: Stagehand) { super(); this.stagehand = stagehand; this.tools = this.initializeTools(); } private initializeTools(): ToolInterface[] { return [ new StagehandNavigateTool(this.stagehand), new StagehandActTool(this.stagehand), new StagehandExtractTool(this.stagehand), new StagehandObserveTool(this.stagehand), ]; } static async fromStagehand(stagehand: Stagehand): Promise<StagehandToolkit> { return new StagehandToolkit(stagehand); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/agents/toolkits
lc_public_repos/langchainjs/libs/langchain-community/src/agents/toolkits/connery/index.ts
import { Tool, ToolInterface } from "@langchain/core/tools"; import { Toolkit } from "../base.js"; import { ConneryService } from "../../../tools/connery.js"; /** * ConneryToolkit provides access to all the available actions from the Connery Runner. * @extends Toolkit */ export class ConneryToolkit extends Toolkit { tools: ToolInterface[]; /** * Creates a ConneryToolkit instance based on the provided ConneryService instance. * It populates the tools property of the ConneryToolkit instance with the list of * available tools from the Connery Runner. * @param conneryService The ConneryService instance. * @returns A Promise that resolves to a ConneryToolkit instance. */ static async createInstance( conneryService: ConneryService ): Promise<ConneryToolkit> { const toolkit = new ConneryToolkit(); toolkit.tools = []; const actions = await conneryService.listActions(); toolkit.tools.push(...(actions as unknown as Tool[])); // This is a hack to make TypeScript happy, as TypeScript doesn't know that ConneryAction (StructuredTool) extends Tool. return toolkit; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src/agents/toolkits
lc_public_repos/langchainjs/libs/langchain-community/src/agents/toolkits/tests/stagehand.int.test.ts
import { expect, describe, test, beforeEach, afterEach } from "@jest/globals"; import { Stagehand } from "@browserbasehq/stagehand"; import { z } from "zod"; import { ChatOpenAI } from "@langchain/openai"; // import { createReactAgent } from "@langchain/langgraph/prebuilt"; import { StagehandToolkit } from "../stagehand.js"; describe("StagehandToolkit Integration Tests", () => { let stagehand: Stagehand; let toolkit: StagehandToolkit; beforeEach(async () => { stagehand = new Stagehand({ env: "LOCAL", headless: false, verbose: 2, debugDom: true, enableCaching: false, }); await stagehand.init({ modelName: "gpt-4o-mini" }); toolkit = await StagehandToolkit.fromStagehand(stagehand); }); afterEach(async () => { await stagehand.context.close().catch(() => {}); }); test("should perform basic navigation and search", async () => { const navigateTool = toolkit.tools.find( (t) => t.name === "stagehand_navigate" ); if (!navigateTool) { throw new Error("Navigate tool not found"); } await navigateTool.invoke("https://www.google.com"); const actionTool = toolkit.tools.find((t) => t.name === "stagehand_act"); if (!actionTool) { throw new Error("Action tool not found"); } await actionTool.invoke('Search for "OpenAI"'); const currentUrl = stagehand.page.url(); expect(currentUrl).toContain("google.com/search?q=OpenAI"); }); test("should extract structured data from webpage", async () => { const navigateTool = toolkit.tools.find( (t) => t.name === "stagehand_navigate" ); if (!navigateTool) { throw new Error("Navigate tool not found"); } await navigateTool.invoke( "https://github.com/facebook/react/graphs/contributors" ); const extractTool = toolkit.tools.find( (t) => t.name === "stagehand_extract" ); if (!extractTool) { throw new Error("Extract tool not found"); } const input = { instruction: "extract the top contributor", schema: z.object({ username: z.string(), url: z.string(), }), }; const result = await extractTool.invoke(input); const parsedResult = JSON.parse(result); const { username, url } = parsedResult; expect(username).toBeDefined(); expect(url).toBeDefined(); }); test("should handle tab navigation", async () => { const navigateTool = toolkit.tools.find( (t) => t.name === "stagehand_navigate" ); if (!navigateTool) { throw new Error("Navigate tool not found"); } await navigateTool.invoke("https://www.google.com/"); const actionTool = toolkit.tools.find((t) => t.name === "stagehand_act"); if (!actionTool) { throw new Error("Action tool not found"); } await actionTool.invoke("click on the about page"); const currentUrl = stagehand.page.url(); expect(currentUrl).toContain("about"); }); test("should use observe tool to get page information", async () => { await stagehand.page.goto("https://github.com/browserbase/stagehand"); const observeTool = toolkit.tools.find( (t) => t.name === "stagehand_observe" ); if (!observeTool) { throw new Error("Observe tool not found"); } const result = await observeTool.invoke( "What actions can be performed on the repository page?" ); const observations = JSON.parse(result); expect(Array.isArray(observations)).toBe(true); expect(observations.length).toBeGreaterThan(0); expect(observations[0]).toHaveProperty("description"); expect(observations[0]).toHaveProperty("selector"); expect(typeof observations[0].description).toBe("string"); expect(typeof observations[0].selector).toBe("string"); }); test("should perform navigation and search using llm with tools", async () => { const llm = new ChatOpenAI({ temperature: 0 }); if (!llm.bindTools) { throw new Error("Language model does not support tools."); } // Bind tools to the LLM const llmWithTools = llm.bindTools(toolkit.tools); // Execute queries atomically const result = await llmWithTools.invoke( "Navigate to https://www.google.com" ); expect(result.tool_calls).toBeDefined(); expect(result.tool_calls?.length).toBe(1); const toolCall = result.tool_calls?.[0]; expect(toolCall?.name).toBe("stagehand_navigate"); const navigateTool = toolkit.tools.find( (t) => t.name === "stagehand_navigate" ); if (!navigateTool) { throw new Error("Navigate tool not found"); } const navigateResult = await navigateTool?.invoke(toolCall?.args?.input); expect(navigateResult).toContain("Successfully navigated"); const result2 = await llmWithTools.invoke('Search for "OpenAI"'); expect(result2.tool_calls).toBeDefined(); expect(result2.tool_calls?.length).toBe(1); const actionToolCall = result2.tool_calls?.[0]; expect(actionToolCall?.name).toBe("stagehand_act"); expect(actionToolCall?.args?.input).toBe("search for OpenAI"); const actionTool = toolkit.tools.find((t) => t.name === "stagehand_act"); if (!actionTool) { throw new Error("Action tool not found"); } const actionResult = await actionTool.invoke(actionToolCall?.args?.input); expect(actionResult).toContain("successfully"); // Verify the current URL const currentUrl = stagehand.page.url(); expect(currentUrl).toContain("google.com/search?q=OpenAI"); }); // test("should work with langgraph", async () => { // const actTool = toolkit.tools.find((t) => t.name === "stagehand_act"); // const navigateTool = toolkit.tools.find( // (t) => t.name === "stagehand_navigate" // ); // if (!actTool || !navigateTool) { // throw new Error("Required tools not found"); // } // const tools = [actTool, navigateTool]; // const model = new ChatOpenAI({ // modelName: "gpt-4", // temperature: 0, // }); // const agent = createReactAgent({ // llm: model, // tools, // }); // // Navigate to Google // const inputs1 = { // messages: [ // { // role: "user", // content: "Navigate to https://www.google.com", // }, // ], // }; // const stream1 = await agent.stream(inputs1, { // streamMode: "values", // }); // for await (const { messages } of stream1) { // const msg = // messages && messages.length > 0 // ? messages[messages.length - 1] // : undefined; // if (msg?.content) { // console.log(msg.content); // } else if (msg?.tool_calls && msg.tool_calls.length > 0) { // console.log(msg.tool_calls); // } else { // console.log(msg); // } // } // // Click through to careers page and search // const inputs2 = { // messages: [ // { // role: "user", // content: "Click on the About page", // }, // ], // }; // const stream2 = await agent.stream(inputs2, { // streamMode: "values", // }); // for await (const { messages } of stream2) { // const msg = messages ? messages[messages.length - 1] : undefined; // if (msg?.content) { // console.log(msg.content); // } else if (msg?.tool_calls && msg.tool_calls.length > 0) { // console.log(msg.tool_calls); // } else { // console.log(msg); // } // } // const currentUrl = stagehand.page.url(); // expect(currentUrl).toContain("about"); // }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/pinecone.ts
/* eslint-disable no-process-env */ import * as uuid from "uuid"; import flatten from "flat"; import { RecordMetadata, PineconeRecord, Index as PineconeIndex, } from "@pinecone-database/pinecone"; import { MaxMarginalRelevanceSearchOptions, VectorStore, } from "@langchain/core/vectorstores"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { Document } from "@langchain/core/documents"; import { AsyncCaller, AsyncCallerParams, } from "@langchain/core/utils/async_caller"; import { maximalMarginalRelevance } from "@langchain/core/utils/math"; import { chunkArray } from "@langchain/core/utils/chunk_array"; /** @deprecated Install and import from @langchain/pinecone instead. */ // eslint-disable-next-line @typescript-eslint/ban-types, @typescript-eslint/no-explicit-any type PineconeMetadata = Record<string, any>; /** @deprecated Install and import from @langchain/pinecone instead. */ export interface PineconeLibArgs extends AsyncCallerParams { pineconeIndex: PineconeIndex; textKey?: string; namespace?: string; filter?: PineconeMetadata; } /** * @deprecated Install and import from @langchain/pinecone instead. * Type that defines the parameters for the delete operation in the * PineconeStore class. It includes ids, filter, deleteAll flag, and namespace. */ export type PineconeDeleteParams = { ids?: string[]; deleteAll?: boolean; filter?: object; namespace?: string; }; /** * @deprecated Install and import from @langchain/pinecone instead. * Class that extends the VectorStore class and provides methods to * interact with the Pinecone vector database. */ export class PineconeStore extends VectorStore { declare FilterType: PineconeMetadata; textKey: string; namespace?: string; pineconeIndex: PineconeIndex; filter?: PineconeMetadata; caller: AsyncCaller; _vectorstoreType(): string { return "pinecone"; } constructor(embeddings: EmbeddingsInterface, args: PineconeLibArgs) { super(embeddings, args); this.embeddings = embeddings; const { namespace, pineconeIndex, textKey, filter, ...asyncCallerArgs } = args; this.namespace = namespace; this.pineconeIndex = pineconeIndex; this.textKey = textKey ?? "text"; this.filter = filter; this.caller = new AsyncCaller(asyncCallerArgs); } /** * Method that adds documents to the Pinecone database. * @param documents Array of documents to add to the Pinecone database. * @param options Optional ids for the documents. * @returns Promise that resolves with the ids of the added documents. */ async addDocuments( documents: Document[], options?: { ids?: string[] } | string[] ) { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents, options ); } /** * Method that adds vectors to the Pinecone database. * @param vectors Array of vectors to add to the Pinecone database. * @param documents Array of documents associated with the vectors. * @param options Optional ids for the vectors. * @returns Promise that resolves with the ids of the added vectors. */ async addVectors( vectors: number[][], documents: Document[], options?: { ids?: string[] } | string[] ) { const ids = Array.isArray(options) ? options : options?.ids; const documentIds = ids == null ? documents.map(() => uuid.v4()) : ids; const pineconeVectors = vectors.map((values, idx) => { // Pinecone doesn't support nested objects, so we flatten them const documentMetadata = { ...documents[idx].metadata }; // preserve string arrays which are allowed const stringArrays: Record<string, string[]> = {}; for (const key of Object.keys(documentMetadata)) { if ( Array.isArray(documentMetadata[key]) && // eslint-disable-next-line @typescript-eslint/ban-types, @typescript-eslint/no-explicit-any documentMetadata[key].every((el: any) => typeof el === "string") ) { stringArrays[key] = documentMetadata[key]; delete documentMetadata[key]; } } const metadata: { [key: string]: string | number | boolean | string[] | null; } = { ...flatten(documentMetadata), ...stringArrays, [this.textKey]: documents[idx].pageContent, }; // Pinecone doesn't support null values, so we remove them for (const key of Object.keys(metadata)) { if (metadata[key] == null) { delete metadata[key]; } else if ( typeof metadata[key] === "object" && Object.keys(metadata[key] as unknown as object).length === 0 ) { delete metadata[key]; } } return { id: documentIds[idx], metadata, values, } as PineconeRecord<RecordMetadata>; }); const namespace = this.pineconeIndex.namespace(this.namespace ?? ""); // Pinecone recommends a limit of 100 vectors per upsert request const chunkSize = 100; const chunkedVectors = chunkArray(pineconeVectors, chunkSize); const batchRequests = chunkedVectors.map((chunk) => this.caller.call(async () => namespace.upsert(chunk)) ); await Promise.all(batchRequests); return documentIds; } /** * Method that deletes vectors from the Pinecone database. * @param params Parameters for the delete operation. * @returns Promise that resolves when the delete operation is complete. */ async delete(params: PineconeDeleteParams): Promise<void> { const { deleteAll, ids, filter } = params; const namespace = this.pineconeIndex.namespace(this.namespace ?? ""); if (deleteAll) { await namespace.deleteAll(); } else if (ids) { const batchSize = 1000; for (let i = 0; i < ids.length; i += batchSize) { const batchIds = ids.slice(i, i + batchSize); await namespace.deleteMany(batchIds); } } else if (filter) { await namespace.deleteMany(filter); } else { throw new Error("Either ids or delete_all must be provided."); } } protected async _runPineconeQuery( query: number[], k: number, filter?: PineconeMetadata, options?: { includeValues: boolean } ) { if (filter && this.filter) { throw new Error("cannot provide both `filter` and `this.filter`"); } const _filter = filter ?? this.filter; const namespace = this.pineconeIndex.namespace(this.namespace ?? ""); const results = await namespace.query({ includeMetadata: true, topK: k, vector: query, filter: _filter, ...options, }); return results; } /** * Method that performs a similarity search in the Pinecone database and * returns the results along with their scores. * @param query Query vector for the similarity search. * @param k Number of top results to return. * @param filter Optional filter to apply to the search. * @returns Promise that resolves with an array of documents and their scores. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: PineconeMetadata ): Promise<[Document, number][]> { const results = await this._runPineconeQuery(query, k, filter); const result: [Document, number][] = []; if (results.matches) { for (const res of results.matches) { const { [this.textKey]: pageContent, ...metadata } = (res.metadata ?? {}) as PineconeMetadata; if (res.score) { result.push([new Document({ metadata, pageContent }), res.score]); } } } return result; } /** * Return documents selected using the maximal marginal relevance. * Maximal marginal relevance optimizes for similarity to the query AND diversity * among selected documents. * * @param {string} query - Text to look up documents similar to. * @param {number} options.k - Number of documents to return. * @param {number} options.fetchK=20 - Number of documents to fetch before passing to the MMR algorithm. * @param {number} options.lambda=0.5 - Number between 0 and 1 that determines the degree of diversity among the results, * where 0 corresponds to maximum diversity and 1 to minimum diversity. * @param {PineconeMetadata} options.filter - Optional filter to apply to the search. * * @returns {Promise<Document[]>} - List of documents selected by maximal marginal relevance. */ async maxMarginalRelevanceSearch( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]> ): Promise<Document[]> { const queryEmbedding = await this.embeddings.embedQuery(query); const results = await this._runPineconeQuery( queryEmbedding, options.fetchK ?? 20, options.filter, { includeValues: true } ); const matches = results?.matches ?? []; const embeddingList = matches.map((match) => match.values); const mmrIndexes = maximalMarginalRelevance( queryEmbedding, embeddingList, options.lambda, options.k ); const topMmrMatches = mmrIndexes.map((idx) => matches[idx]); const finalResult: Document[] = []; for (const res of topMmrMatches) { const { [this.textKey]: pageContent, ...metadata } = (res.metadata ?? {}) as PineconeMetadata; if (res.score) { finalResult.push(new Document({ metadata, pageContent })); } } return finalResult; } /** * Static method that creates a new instance of the PineconeStore class * from texts. * @param texts Array of texts to add to the Pinecone database. * @param metadatas Metadata associated with the texts. * @param embeddings Embeddings to use for the texts. * @param dbConfig Configuration for the Pinecone database. * @returns Promise that resolves with a new instance of the PineconeStore class. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: | { pineconeIndex: PineconeIndex; textKey?: string; namespace?: string | undefined; } | PineconeLibArgs ): Promise<PineconeStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } const args: PineconeLibArgs = { pineconeIndex: dbConfig.pineconeIndex, textKey: dbConfig.textKey, namespace: dbConfig.namespace, }; return PineconeStore.fromDocuments(docs, embeddings, args); } /** * Static method that creates a new instance of the PineconeStore class * from documents. * @param docs Array of documents to add to the Pinecone database. * @param embeddings Embeddings to use for the documents. * @param dbConfig Configuration for the Pinecone database. * @returns Promise that resolves with a new instance of the PineconeStore class. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: PineconeLibArgs ): Promise<PineconeStore> { const args = dbConfig; args.textKey = dbConfig.textKey ?? "text"; const instance = new this(embeddings, args); await instance.addDocuments(docs); return instance; } /** * Static method that creates a new instance of the PineconeStore class * from an existing index. * @param embeddings Embeddings to use for the documents. * @param dbConfig Configuration for the Pinecone database. * @returns Promise that resolves with a new instance of the PineconeStore class. */ static async fromExistingIndex( embeddings: EmbeddingsInterface, dbConfig: PineconeLibArgs ): Promise<PineconeStore> { const instance = new this(embeddings, dbConfig); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/voy.ts
import type { Voy as VoyOriginClient, SearchResult } from "voy-search"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; export type VoyClient = Omit< VoyOriginClient, "remove" | "size" | "serialize" | "free" >; /** * Internal interface for storing documents mappings. */ interface InternalDoc { embeddings: number[]; document: Document; } /** * Class that extends `VectorStore`. It allows to perform similarity search using * Voi similarity search engine. The class requires passing Voy Client as an input parameter. */ export class VoyVectorStore extends VectorStore { client: VoyClient; numDimensions: number | null = null; docstore: InternalDoc[] = []; _vectorstoreType(): string { return "voi"; } constructor(client: VoyClient, embeddings: EmbeddingsInterface) { super(embeddings, {}); this.client = client; this.embeddings = embeddings; } /** * Adds documents to the Voy database. The documents are embedded using embeddings provided while instantiating the class. * @param documents An array of `Document` instances associated with the vectors. */ async addDocuments(documents: Document[]): Promise<void> { const texts = documents.map(({ pageContent }) => pageContent); if (documents.length === 0) { return; } const firstVector = ( await this.embeddings.embedDocuments(texts.slice(0, 1)) )[0]; if (this.numDimensions === null) { this.numDimensions = firstVector.length; } else if (this.numDimensions !== firstVector.length) { throw new Error( `Vectors must have the same length as the number of dimensions (${this.numDimensions})` ); } const restResults = await this.embeddings.embedDocuments(texts.slice(1)); await this.addVectors([firstVector, ...restResults], documents); } /** * Adds vectors to the Voy database. The vectors are associated with * the provided documents. * @param vectors An array of vectors to be added to the database. * @param documents An array of `Document` instances associated with the vectors. */ async addVectors(vectors: number[][], documents: Document[]): Promise<void> { if (vectors.length === 0) { return; } if (this.numDimensions === null) { this.numDimensions = vectors[0].length; } if (vectors.length !== documents.length) { throw new Error(`Vectors and metadata must have the same length`); } if (!vectors.every((v) => v.length === this.numDimensions)) { throw new Error( `Vectors must have the same length as the number of dimensions (${this.numDimensions})` ); } vectors.forEach((item, idx) => { const doc = documents[idx]; this.docstore.push({ embeddings: item, document: doc }); }); const embeddings = this.docstore.map((item, idx) => ({ id: String(idx), embeddings: item.embeddings, title: "", url: "", })); this.client.index({ embeddings }); } /** * Searches for vectors in the Voy database that are similar to the * provided query vector. * @param query The query vector. * @param k The number of similar vectors to return. * @returns A promise that resolves with an array of tuples, each containing a `Document` instance and a similarity score. */ async similaritySearchVectorWithScore(query: number[], k: number) { if (this.numDimensions === null) { throw new Error("There aren't any elements in the index yet."); } if (query.length !== this.numDimensions) { throw new Error( `Query vector must have the same length as the number of dimensions (${this.numDimensions})` ); } const itemsToQuery = Math.min(this.docstore.length, k); if (itemsToQuery > this.docstore.length) { console.warn( `k (${k}) is greater than the number of elements in the index (${this.docstore.length}), setting k to ${itemsToQuery}` ); } const results: SearchResult = this.client.search( new Float32Array(query), itemsToQuery ); return results.neighbors.map( ({ id }, idx) => [this.docstore[parseInt(id, 10)].document, idx] as [Document, number] ); } /** * Method to delete data from the Voy index. It can delete data based * on specific IDs or a filter. * @param params Object that includes either an array of IDs or a filter for the data to be deleted. * @returns Promise that resolves when the deletion is complete. */ async delete(params: { deleteAll?: boolean }): Promise<void> { if (params.deleteAll === true) { await this.client.clear(); } else { throw new Error(`You must provide a "deleteAll" parameter.`); } } /** * Creates a new `VoyVectorStore` instance from an array of text strings. The text * strings are converted to `Document` instances and added to the Voy * database. * @param texts An array of text strings. * @param metadatas An array of metadata objects or a single metadata object. If an array is provided, it must have the same length as the `texts` array. * @param embeddings An `Embeddings` instance used to generate embeddings for the documents. * @param client An instance of Voy client to use in the underlying operations. * @returns A promise that resolves with a new `VoyVectorStore` instance. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, client: VoyClient ): Promise<VoyVectorStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return VoyVectorStore.fromDocuments(docs, embeddings, client); } /** * Creates a new `VoyVectorStore` instance from an array of `Document` instances. * The documents are added to the Voy database. * @param docs An array of `Document` instances. * @param embeddings An `Embeddings` instance used to generate embeddings for the documents. * @param client An instance of Voy client to use in the underlying operations. * @returns A promise that resolves with a new `VoyVectorStore` instance. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, client: VoyClient ): Promise<VoyVectorStore> { const instance = new VoyVectorStore(client, embeddings); await instance.addDocuments(docs); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/zep.ts
import { DocumentCollection, IDocument, NotFoundError, ZepClient, } from "@getzep/zep-js"; import { MaxMarginalRelevanceSearchOptions, VectorStore, } from "@langchain/core/vectorstores"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { Document } from "@langchain/core/documents"; import { Callbacks } from "@langchain/core/callbacks/manager"; import { maximalMarginalRelevance } from "@langchain/core/utils/math"; import { FakeEmbeddings } from "@langchain/core/utils/testing"; /** * Interface for the arguments required to initialize a ZepVectorStore * instance. */ export interface IZepArgs { collection: DocumentCollection; } /** * Interface for the configuration options for a ZepVectorStore instance. */ export interface IZepConfig { apiUrl: string; apiKey?: string; collectionName: string; description?: string; metadata?: Record<string, never>; embeddingDimensions?: number; isAutoEmbedded?: boolean; } /** * Interface for the parameters required to delete documents from a * ZepVectorStore instance. */ export interface IZepDeleteParams { uuids: string[]; } /** * ZepVectorStore is a VectorStore implementation that uses the Zep long-term memory store as a backend. * * If the collection does not exist, it will be created automatically. * * Requires `zep-js` to be installed: * ```bash * npm install @getzep/zep-js * ``` * * @property {ZepClient} client - The ZepClient instance used to interact with Zep's API. * @property {Promise<void>} initPromise - A promise that resolves when the collection is initialized. * @property {DocumentCollection} collection - The Zep document collection. */ export class ZepVectorStore extends VectorStore { public client: ZepClient; public collection: DocumentCollection; private initPromise: Promise<void>; private autoEmbed = false; constructor(embeddings: EmbeddingsInterface, args: IZepConfig) { super(embeddings, args); this.embeddings = embeddings; // eslint-disable-next-line no-instanceof/no-instanceof if (this.embeddings instanceof FakeEmbeddings) { this.autoEmbed = true; } this.initPromise = this.initCollection(args).catch((err) => { console.error("Error initializing collection:", err); throw err; }); } /** * Initializes the document collection. If the collection does not exist, it creates a new one. * * @param {IZepConfig} args - The configuration object for the Zep API. */ private async initCollection(args: IZepConfig) { this.client = await ZepClient.init(args.apiUrl, args.apiKey); try { this.collection = await this.client.document.getCollection( args.collectionName ); // If the Embedding passed in is fake, but the collection is not auto embedded, throw an error // eslint-disable-next-line no-instanceof/no-instanceof if (!this.collection.is_auto_embedded && this.autoEmbed) { throw new Error(`You can't pass in FakeEmbeddings when collection ${args.collectionName} is not set to auto-embed.`); } } catch (err) { // eslint-disable-next-line no-instanceof/no-instanceof if (err instanceof Error) { // eslint-disable-next-line no-instanceof/no-instanceof if (err instanceof NotFoundError || err.name === "NotFoundError") { await this.createCollection(args); } else { throw err; } } } } /** * Creates a new document collection. * * @param {IZepConfig} args - The configuration object for the Zep API. */ private async createCollection(args: IZepConfig) { if (!args.embeddingDimensions) { throw new Error(`Collection ${args.collectionName} not found. You can create a new Collection by providing embeddingDimensions.`); } this.collection = await this.client.document.addCollection({ name: args.collectionName, description: args.description, metadata: args.metadata, embeddingDimensions: args.embeddingDimensions, isAutoEmbedded: this.autoEmbed, }); console.info("Created new collection:", args.collectionName); } /** * Adds vectors and corresponding documents to the collection. * * @param {number[][]} vectors - The vectors to add. * @param {Document[]} documents - The corresponding documents to add. * @returns {Promise<string[]>} - A promise that resolves with the UUIDs of the added documents. */ async addVectors( vectors: number[][], documents: Document[] ): Promise<string[]> { if (!this.autoEmbed && vectors.length === 0) { throw new Error(`Vectors must be provided if autoEmbed is false`); } if (!this.autoEmbed && vectors.length !== documents.length) { throw new Error(`Vectors and documents must have the same length`); } const docs: Array<IDocument> = []; for (let i = 0; i < documents.length; i += 1) { const doc: IDocument = { content: documents[i].pageContent, metadata: documents[i].metadata, embedding: vectors.length > 0 ? vectors[i] : undefined, }; docs.push(doc); } // Wait for collection to be initialized await this.initPromise; return await this.collection.addDocuments(docs); } /** * Adds documents to the collection. The documents are first embedded into vectors * using the provided embedding model. * * @param {Document[]} documents - The documents to add. * @returns {Promise<string[]>} - A promise that resolves with the UUIDs of the added documents. */ async addDocuments(documents: Document[]): Promise<string[]> { const texts = documents.map(({ pageContent }) => pageContent); let vectors: number[][] = []; if (!this.autoEmbed) { vectors = await this.embeddings.embedDocuments(texts); } return this.addVectors(vectors, documents); } _vectorstoreType(): string { return "zep"; } /** * Deletes documents from the collection. * * @param {IZepDeleteParams} params - The list of Zep document UUIDs to delete. * @returns {Promise<void>} */ async delete(params: IZepDeleteParams): Promise<void> { // Wait for collection to be initialized await this.initPromise; for (const uuid of params.uuids) { await this.collection.deleteDocument(uuid); } } /** * Performs a similarity search in the collection and returns the results with their scores. * * @param {number[]} query - The query vector. * @param {number} k - The number of results to return. * @param {Record<string, unknown>} filter - The filter to apply to the search. Zep only supports Record<string, unknown> as filter. * @returns {Promise<[Document, number][]>} - A promise that resolves with the search results and their scores. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: Record<string, unknown> | undefined ): Promise<[Document, number][]> { await this.initPromise; const results = await this.collection.search( { embedding: new Float32Array(query), metadata: assignMetadata(filter), }, k ); return zepDocsToDocumentsAndScore(results); } async _similaritySearchWithScore( query: string, k: number, filter?: Record<string, unknown> | undefined ): Promise<[Document, number][]> { await this.initPromise; const results = await this.collection.search( { text: query, metadata: assignMetadata(filter), }, k ); return zepDocsToDocumentsAndScore(results); } async similaritySearchWithScore( query: string, k = 4, filter: Record<string, unknown> | undefined = undefined, _callbacks = undefined // implement passing to embedQuery later ): Promise<[Document, number][]> { if (this.autoEmbed) { return this._similaritySearchWithScore(query, k, filter); } else { return this.similaritySearchVectorWithScore( await this.embeddings.embedQuery(query), k, filter ); } } /** * Performs a similarity search on the Zep collection. * * @param {string} query - The query string to search for. * @param {number} [k=4] - The number of results to return. Defaults to 4. * @param {this["FilterType"] | undefined} [filter=undefined] - An optional set of JSONPath filters to apply to the search. * @param {Callbacks | undefined} [_callbacks=undefined] - Optional callbacks. Currently not implemented. * @returns {Promise<Document[]>} - A promise that resolves to an array of Documents that are similar to the query. * * @async */ async similaritySearch( query: string, k = 4, filter: this["FilterType"] | undefined = undefined, _callbacks: Callbacks | undefined = undefined // implement passing to embedQuery later ): Promise<Document[]> { await this.initPromise; let results: [Document, number][]; if (this.autoEmbed) { const zepResults = await this.collection.search( { text: query, metadata: assignMetadata(filter) }, k ); results = zepDocsToDocumentsAndScore(zepResults); } else { results = await this.similaritySearchVectorWithScore( await this.embeddings.embedQuery(query), k, assignMetadata(filter) ); } return results.map((result) => result[0]); } /** * Return documents selected using the maximal marginal relevance. * Maximal marginal relevance optimizes for similarity to the query AND diversity * among selected documents. * * @param {string} query - Text to look up documents similar to. * @param options * @param {number} options.k - Number of documents to return. * @param {number} options.fetchK=20- Number of documents to fetch before passing to the MMR algorithm. * @param {number} options.lambda=0.5 - Number between 0 and 1 that determines the degree of diversity among the results, * where 0 corresponds to maximum diversity and 1 to minimum diversity. * @param {Record<string, any>} options.filter - Optional Zep JSONPath query to pre-filter on document metadata field * * @returns {Promise<Document[]>} - List of documents selected by maximal marginal relevance. */ async maxMarginalRelevanceSearch( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]> ): Promise<Document[]> { const { k, fetchK = 20, lambda = 0.5, filter } = options; let queryEmbedding: number[]; let zepResults: IDocument[]; if (!this.autoEmbed) { queryEmbedding = await this.embeddings.embedQuery(query); zepResults = await this.collection.search( { embedding: new Float32Array(queryEmbedding), metadata: assignMetadata(filter), }, fetchK ); } else { let queryEmbeddingArray: Float32Array; [zepResults, queryEmbeddingArray] = await this.collection.searchReturnQueryVector( { text: query, metadata: assignMetadata(filter) }, fetchK ); queryEmbedding = Array.from(queryEmbeddingArray); } const results = zepDocsToDocumentsAndScore(zepResults); const embeddingList = zepResults.map((doc) => Array.from(doc.embedding ? doc.embedding : []) ); const mmrIndexes = maximalMarginalRelevance( queryEmbedding, embeddingList, lambda, k ); return mmrIndexes.filter((idx) => idx !== -1).map((idx) => results[idx][0]); } /** * Creates a new ZepVectorStore instance from an array of texts. Each text is converted into a Document and added to the collection. * * @param {string[]} texts - The texts to convert into Documents. * @param {object[] | object} metadatas - The metadata to associate with each Document. If an array is provided, each element is associated with the corresponding Document. If an object is provided, it is associated with all Documents. * @param {Embeddings} embeddings - The embeddings to use for vectorizing the texts. * @param {IZepConfig} zepConfig - The configuration object for the Zep API. * @returns {Promise<ZepVectorStore>} - A promise that resolves with the new ZepVectorStore instance. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, zepConfig: IZepConfig ): Promise<ZepVectorStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return ZepVectorStore.fromDocuments(docs, embeddings, zepConfig); } /** * Creates a new ZepVectorStore instance from an array of Documents. Each Document is added to a Zep collection. * * @param {Document[]} docs - The Documents to add. * @param {Embeddings} embeddings - The embeddings to use for vectorizing the Document contents. * @param {IZepConfig} zepConfig - The configuration object for the Zep API. * @returns {Promise<ZepVectorStore>} - A promise that resolves with the new ZepVectorStore instance. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, zepConfig: IZepConfig ): Promise<ZepVectorStore> { const instance = new this(embeddings, zepConfig); // Wait for collection to be initialized await instance.initPromise; await instance.addDocuments(docs); return instance; } } function zepDocsToDocumentsAndScore( results: IDocument[] ): [Document, number][] { return results.map((d) => [ new Document({ pageContent: d.content, metadata: d.metadata, }), d.score ? d.score : 0, ]); } function assignMetadata( value: string | Record<string, unknown> | object | undefined ): Record<string, unknown> | undefined { if (typeof value === "object" && value !== null) { return value as Record<string, unknown>; } if (value !== undefined) { console.warn("Metadata filters must be an object, Record, or undefined."); } return undefined; }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/qdrant.ts
import { QdrantClient } from "@qdrant/js-client-rest"; import type { Schemas as QdrantSchemas } from "@qdrant/js-client-rest"; import { v4 as uuid } from "uuid"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; const CONTENT_KEY = "content"; const METADATA_KEY = "metadata"; /** * @deprecated Install and import from @langchain/qdrant instead. * * Interface for the arguments that can be passed to the * `QdrantVectorStore` constructor. It includes options for specifying a * `QdrantClient` instance, the URL and API key for a Qdrant database, and * the name and configuration for a collection. */ export interface QdrantLibArgs { client?: QdrantClient; url?: string; apiKey?: string; collectionName?: string; collectionConfig?: QdrantSchemas["CreateCollection"]; // eslint-disable-next-line @typescript-eslint/no-explicit-any customPayload?: Record<string, any>[]; contentPayloadKey?: string; metadataPayloadKey?: string; } /** @deprecated Install and import from @langchain/qdrant instead. */ export type QdrantAddDocumentOptions = { // eslint-disable-next-line @typescript-eslint/no-explicit-any customPayload: Record<string, any>[]; }; /** @deprecated Install and import from @langchain/qdrant instead. */ export type QdrantFilter = QdrantSchemas["Filter"]; /** @deprecated Install and import from @langchain/qdrant instead. */ export type QdrantCondition = QdrantSchemas["FieldCondition"]; /** * @deprecated Install and import from @langchain/qdrant instead. * * Type for the response returned by a search operation in the Qdrant * database. It includes the score and payload (metadata and content) for * each point (document) in the search results. */ type QdrantSearchResponse = QdrantSchemas["ScoredPoint"] & { payload: { metadata: object; content: string; }; }; /** * @deprecated Install and import from @langchain/qdrant instead. * * Class that extends the `VectorStore` base class to interact with a * Qdrant database. It includes methods for adding documents and vectors * to the Qdrant database, searching for similar vectors, and ensuring the * existence of a collection in the database. */ export class QdrantVectorStore extends VectorStore { declare FilterType: QdrantFilter; get lc_secrets(): { [key: string]: string } { return { apiKey: "QDRANT_API_KEY", url: "QDRANT_URL", }; } client: QdrantClient; collectionName: string; collectionConfig?: QdrantSchemas["CreateCollection"]; contentPayloadKey: string; metadataPayloadKey: string; _vectorstoreType(): string { return "qdrant"; } constructor(embeddings: EmbeddingsInterface, args: QdrantLibArgs) { super(embeddings, args); const url = args.url ?? getEnvironmentVariable("QDRANT_URL"); const apiKey = args.apiKey ?? getEnvironmentVariable("QDRANT_API_KEY"); if (!args.client && !url) { throw new Error("Qdrant client or url address must be set."); } this.client = args.client || new QdrantClient({ url, apiKey, }); this.collectionName = args.collectionName ?? "documents"; this.collectionConfig = args.collectionConfig; this.contentPayloadKey = args.contentPayloadKey ?? CONTENT_KEY; this.metadataPayloadKey = args.metadataPayloadKey ?? METADATA_KEY; } /** * Method to add documents to the Qdrant database. It generates vectors * from the documents using the `Embeddings` instance and then adds the * vectors to the database. * @param documents Array of `Document` instances to be added to the Qdrant database. * @param documentOptions Optional `QdrantAddDocumentOptions` which has a list of JSON objects for extra querying * @returns Promise that resolves when the documents have been added to the database. */ async addDocuments( documents: Document[], documentOptions?: QdrantAddDocumentOptions ): Promise<void> { const texts = documents.map(({ pageContent }) => pageContent); await this.addVectors( await this.embeddings.embedDocuments(texts), documents, documentOptions ); } /** * Method to add vectors to the Qdrant database. Each vector is associated * with a document, which is stored as the payload for a point in the * database. * @param vectors Array of vectors to be added to the Qdrant database. * @param documents Array of `Document` instances associated with the vectors. * @param documentOptions Optional `QdrantAddDocumentOptions` which has a list of JSON objects for extra querying * @returns Promise that resolves when the vectors have been added to the database. */ async addVectors( vectors: number[][], documents: Document[], documentOptions?: QdrantAddDocumentOptions ): Promise<void> { if (vectors.length === 0) { return; } await this.ensureCollection(); const points = vectors.map((embedding, idx) => ({ id: uuid(), vector: embedding, payload: { [this.contentPayloadKey]: documents[idx].pageContent, [this.metadataPayloadKey]: documents[idx].metadata, customPayload: documentOptions?.customPayload[idx], }, })); try { await this.client.upsert(this.collectionName, { wait: true, points, }); // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e: any) { const error = new Error( `${e?.status ?? "Undefined error code"} ${e?.message}: ${ e?.data?.status?.error }` ); throw error; } } /** * Method to search for vectors in the Qdrant database that are similar to * a given query vector. The search results include the score and payload * (metadata and content) for each similar vector. * @param query Query vector to search for similar vectors in the Qdrant database. * @param k Optional number of similar vectors to return. If not specified, all similar vectors are returned. * @param filter Optional filter to apply to the search results. * @returns Promise that resolves with an array of tuples, where each tuple includes a `Document` instance and a score for a similar vector. */ async similaritySearchVectorWithScore( query: number[], k?: number, filter?: this["FilterType"] ): Promise<[Document, number][]> { if (!query) { return []; } await this.ensureCollection(); const results = await this.client.search(this.collectionName, { vector: query, limit: k, filter, }); const result: [Document, number][] = ( results as QdrantSearchResponse[] ).map((res) => [ new Document({ // eslint-disable-next-line @typescript-eslint/no-explicit-any metadata: res.payload[this.metadataPayloadKey] as Record<string, any>, pageContent: res.payload[this.contentPayloadKey] as string, }), res.score, ]); return result; } /** * Method to ensure the existence of a collection in the Qdrant database. * If the collection does not exist, it is created. * @returns Promise that resolves when the existence of the collection has been ensured. */ async ensureCollection() { const response = await this.client.getCollections(); const collectionNames = response.collections.map( (collection) => collection.name ); if (!collectionNames.includes(this.collectionName)) { const collectionConfig = this.collectionConfig ?? { vectors: { size: (await this.embeddings.embedQuery("test")).length, distance: "Cosine", }, }; await this.client.createCollection(this.collectionName, collectionConfig); } } /** * Static method to create a `QdrantVectorStore` instance from texts. Each * text is associated with metadata and converted to a `Document` * instance, which is then added to the Qdrant database. * @param texts Array of texts to be converted to `Document` instances and added to the Qdrant database. * @param metadatas Array or single object of metadata to be associated with the texts. * @param embeddings `Embeddings` instance used to generate vectors from the texts. * @param dbConfig `QdrantLibArgs` instance specifying the configuration for the Qdrant database. * @returns Promise that resolves with a new `QdrantVectorStore` instance. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: QdrantLibArgs ): Promise<QdrantVectorStore> { const docs = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return QdrantVectorStore.fromDocuments(docs, embeddings, dbConfig); } /** * Static method to create a `QdrantVectorStore` instance from `Document` * instances. The documents are added to the Qdrant database. * @param docs Array of `Document` instances to be added to the Qdrant database. * @param embeddings `Embeddings` instance used to generate vectors from the documents. * @param dbConfig `QdrantLibArgs` instance specifying the configuration for the Qdrant database. * @returns Promise that resolves with a new `QdrantVectorStore` instance. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: QdrantLibArgs ): Promise<QdrantVectorStore> { const instance = new this(embeddings, dbConfig); if (dbConfig.customPayload) { const documentOptions = { customPayload: dbConfig?.customPayload, }; await instance.addDocuments(docs, documentOptions); } else { await instance.addDocuments(docs); } return instance; } /** * Static method to create a `QdrantVectorStore` instance from an existing * collection in the Qdrant database. * @param embeddings `Embeddings` instance used to generate vectors from the documents in the collection. * @param dbConfig `QdrantLibArgs` instance specifying the configuration for the Qdrant database. * @returns Promise that resolves with a new `QdrantVectorStore` instance. */ static async fromExistingCollection( embeddings: EmbeddingsInterface, dbConfig: QdrantLibArgs ): Promise<QdrantVectorStore> { const instance = new this(embeddings, dbConfig); await instance.ensureCollection(); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/azure_cosmosdb.ts
import { ObjectId, Collection, Document as MongoDBDocument, MongoClient, Db, Filter, } from "mongodb"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { MaxMarginalRelevanceSearchOptions, VectorStore, } from "@langchain/core/vectorstores"; import { Document, DocumentInterface } from "@langchain/core/documents"; import { maximalMarginalRelevance } from "@langchain/core/utils/math"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; /** * @deprecated Install and import from "@langchain/azure-cosmosdb" instead. * Cosmos DB Similarity type. */ export const AzureCosmosDBSimilarityType = { /** CosineSimilarity */ COS: "COS", /** Inner - product */ IP: "IP", /** Euclidian distance */ L2: "L2", } as const; /** * @deprecated Install and import from "@langchain/azure-cosmosdb" instead. * Cosmos DB Similarity type. */ export type AzureCosmosDBSimilarityType = (typeof AzureCosmosDBSimilarityType)[keyof typeof AzureCosmosDBSimilarityType]; /** * @deprecated Install and import from "@langchain/azure-cosmosdb" instead. * Azure Cosmos DB Index Options. */ export type AzureCosmosDBIndexOptions = { /** Skips automatic index creation. */ readonly skipCreate?: boolean; /** Number of clusters that the inverted file (IVF) index uses to group the vector data. */ readonly numLists?: number; /** Number of dimensions for vector similarity. */ readonly dimensions?: number; /** Similarity metric to use with the IVF index. */ readonly similarity?: AzureCosmosDBSimilarityType; }; /** * @deprecated Install and import from "@langchain/azure-cosmosdb" instead. * Azure Cosmos DB Delete Parameters. */ export type AzureCosmosDBDeleteParams = { /** List of IDs for the documents to be removed. */ readonly ids?: string | string[]; /** MongoDB filter object or list of IDs for the documents to be removed. */ readonly filter?: Filter<MongoDBDocument>; }; /** * @deprecated Install and import from "@langchain/azure-cosmosdb" instead. * Configuration options for the `AzureCosmosDBVectorStore` constructor. */ export interface AzureCosmosDBConfig { readonly client?: MongoClient; readonly connectionString?: string; readonly databaseName?: string; readonly collectionName?: string; readonly indexName?: string; readonly textKey?: string; readonly embeddingKey?: string; readonly indexOptions?: AzureCosmosDBIndexOptions; } /** * @deprecated Install and import from "@langchain/azure-cosmosdb" instead. * Azure Cosmos DB for MongoDB vCore vector store. * To use this, you should have both: * - the `mongodb` NPM package installed * - a connection string associated with a MongoDB VCore Cluster * * You do not need to create a database or collection, it will be created * automatically. * * Though you do need to create an index on the collection, which can be done * using the `createIndex` method. */ export class AzureCosmosDBVectorStore extends VectorStore { get lc_secrets(): { [key: string]: string } { return { endpoint: "AZURE_COSMOSDB_CONNECTION_STRING", }; } private connectPromise: Promise<void>; private readonly initPromise: Promise<void>; private readonly client: MongoClient | undefined; private database: Db; private collection: Collection<MongoDBDocument>; readonly indexName: string; readonly textKey: string; readonly embeddingKey: string; private readonly indexOptions: AzureCosmosDBIndexOptions; _vectorstoreType(): string { return "azure_cosmosdb"; } constructor(embeddings: EmbeddingsInterface, dbConfig: AzureCosmosDBConfig) { super(embeddings, dbConfig); const connectionString = dbConfig.connectionString ?? getEnvironmentVariable("AZURE_COSMOSDB_CONNECTION_STRING"); if (!dbConfig.client && !connectionString) { throw new Error( "Azure Cosmos DB client or connection string must be set." ); } if (!dbConfig.client) { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion this.client = new MongoClient(connectionString!, { appName: "langchainjs", }); } // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const client = dbConfig.client || this.client!; const databaseName = dbConfig.databaseName ?? "documentsDB"; const collectionName = dbConfig.collectionName ?? "documents"; this.indexName = dbConfig.indexName ?? "vectorSearchIndex"; this.textKey = dbConfig.textKey ?? "textContent"; this.embeddingKey = dbConfig.embeddingKey ?? "vectorContent"; this.indexOptions = dbConfig.indexOptions ?? {}; // Start initialization, but don't wait for it to finish here this.initPromise = this.init(client, databaseName, collectionName).catch( (error) => { console.error("Error during Azure Cosmos DB initialization:", error); } ); } /** * Checks if the specified index name during instance construction exists * on the collection. * @returns A promise that resolves to a boolean indicating if the index exists. */ async checkIndexExists(): Promise<boolean> { await this.initPromise; const indexes = await this.collection.listIndexes().toArray(); return indexes.some((index) => index.name === this.indexName); } /** * Deletes the index specified during instance construction if it exists. * @returns A promise that resolves when the index has been deleted. */ async deleteIndex(): Promise<void> { await this.initPromise; if (await this.checkIndexExists()) { await this.collection.dropIndex(this.indexName); } } /** * Creates an index on the collection with the specified index name during * instance construction. * * Setting the numLists parameter correctly is important for achieving good * accuracy and performance. * Since the vector store uses IVF as the indexing strategy, you should * create the index only after you have loaded a large enough sample * documents to ensure that the centroids for the respective buckets are * faily distributed. * * We recommend that numLists is set to documentCount/1000 for up to * 1 million documents and to sqrt(documentCount) for more than 1 million * documents. * As the number of items in your database grows, you should tune numLists * to be larger in order to achieve good latency performance for vector * search. * * If you're experimenting with a new scenario or creating a small demo, * you can start with numLists set to 1 to perform a brute-force search * across all vectors. * This should provide you with the most accurate results from the vector * search, however be aware that the search speed and latency will be slow. * After your initial setup, you should go ahead and tune the numLists * parameter using the above guidance. * @param numLists This integer is the number of clusters that the inverted * file (IVF) index uses to group the vector data. * We recommend that numLists is set to documentCount/1000 for up to * 1 million documents and to sqrt(documentCount) for more than 1 million * documents. * Using a numLists value of 1 is akin to performing brute-force search, * which has limited performance * @param dimensions Number of dimensions for vector similarity. * The maximum number of supported dimensions is 2000. * If no number is provided, it will be determined automatically by * embedding a short text. * @param similarity Similarity metric to use with the IVF index. * Possible options are: * - CosmosDBSimilarityType.COS (cosine distance) * - CosmosDBSimilarityType.L2 (Euclidean distance) * - CosmosDBSimilarityType.IP (inner product) * @returns A promise that resolves when the index has been created. */ async createIndex( numLists = 100, dimensions: number | undefined = undefined, similarity: AzureCosmosDBSimilarityType = AzureCosmosDBSimilarityType.COS ): Promise<void> { await this.connectPromise; let vectorLength = dimensions; if (vectorLength === undefined) { const queryEmbedding = await this.embeddings.embedQuery("test"); vectorLength = queryEmbedding.length; } const createIndexCommands = { createIndexes: this.collection.collectionName, indexes: [ { name: this.indexName, key: { [this.embeddingKey]: "cosmosSearch" }, cosmosSearchOptions: { kind: "vector-ivf", numLists, similarity, dimensions: vectorLength, }, }, ], }; await this.database.command(createIndexCommands); } /** * Removes specified documents from the AzureCosmosDBVectorStore. * If no IDs or filter are specified, all documents will be removed. * @param params Parameters for the delete operation. * @returns A promise that resolves when the documents have been removed. */ async delete( params: AzureCosmosDBDeleteParams | string[] = {} ): Promise<void> { await this.initPromise; let ids: string | string[] | undefined; let filter: AzureCosmosDBDeleteParams["filter"]; if (Array.isArray(params)) { ids = params; } else { ids = params.ids; filter = params.filter; } const idsArray = Array.isArray(ids) ? ids : [ids]; const deleteIds = ids && idsArray.length > 0 ? idsArray : undefined; let deleteFilter = filter ?? {}; if (deleteIds) { const objectIds = deleteIds.map((id) => new ObjectId(id)); deleteFilter = { _id: { $in: objectIds }, ...deleteFilter }; } await this.collection.deleteMany(deleteFilter); } /** * Closes any newly instanciated Azure Cosmos DB client. * If the client was passed in the constructor, it will not be closed. * @returns A promise that resolves when any newly instanciated Azure * Cosmos DB client been closed. */ async close(): Promise<void> { if (this.client) { await this.client.close(); } } /** * Method for adding vectors to the AzureCosmosDBVectorStore. * @param vectors Vectors to be added. * @param documents Corresponding documents to be added. * @returns A promise that resolves to the added documents IDs. */ async addVectors( vectors: number[][], documents: DocumentInterface[] ): Promise<string[]> { const docs = vectors.map((embedding, idx) => ({ [this.textKey]: documents[idx].pageContent, [this.embeddingKey]: embedding, ...documents[idx].metadata, })); await this.initPromise; const result = await this.collection.insertMany(docs); return Object.values(result.insertedIds).map((id) => String(id)); } /** * Method for adding documents to the AzureCosmosDBVectorStore. It first converts * the documents to texts and then adds them as vectors. * @param documents The documents to add. * @returns A promise that resolves to the added documents IDs. */ async addDocuments(documents: DocumentInterface[]): Promise<string[]> { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents ); } /** * Method that performs a similarity search on the vectors stored in the * collection. It returns a list of documents and their corresponding * similarity scores. * @param queryVector Query vector for the similarity search. * @param k=4 Number of nearest neighbors to return. * @returns Promise that resolves to a list of documents and their corresponding similarity scores. */ async similaritySearchVectorWithScore( queryVector: number[], k = 4 ): Promise<[Document, number][]> { await this.initPromise; const pipeline = [ { $search: { cosmosSearch: { vector: queryVector, path: this.embeddingKey, k, }, returnStoredSource: true, }, }, { $project: { similarityScore: { $meta: "searchScore" }, document: "$$ROOT", }, }, ]; const results = await this.collection .aggregate(pipeline) .map<[Document, number]>((result) => { const { similarityScore: score, document } = result; const text = document[this.textKey]; return [new Document({ pageContent: text, metadata: document }), score]; }); return results.toArray(); } /** * Return documents selected using the maximal marginal relevance. * Maximal marginal relevance optimizes for similarity to the query AND * diversity among selected documents. * @param query Text to look up documents similar to. * @param options.k Number of documents to return. * @param options.fetchK=20 Number of documents to fetch before passing to * the MMR algorithm. * @param options.lambda=0.5 Number between 0 and 1 that determines the * degree of diversity among the results, where 0 corresponds to maximum * diversity and 1 to minimum diversity. * @returns List of documents selected by maximal marginal relevance. */ async maxMarginalRelevanceSearch( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]> ): Promise<Document[]> { const { k, fetchK = 20, lambda = 0.5 } = options; const queryEmbedding = await this.embeddings.embedQuery(query); const docs = await this.similaritySearchVectorWithScore( queryEmbedding, fetchK ); const embeddingList = docs.map((doc) => doc[0].metadata[this.embeddingKey]); // Re-rank the results using MMR const mmrIndexes = maximalMarginalRelevance( queryEmbedding, embeddingList, lambda, k ); const mmrDocs = mmrIndexes.map((index) => docs[index][0]); return mmrDocs; } /** * Initializes the AzureCosmosDBVectorStore by connecting to the database. * @param client The MongoClient to use for connecting to the database. * @param databaseName The name of the database to use. * @param collectionName The name of the collection to use. * @returns A promise that resolves when the AzureCosmosDBVectorStore has been initialized. */ private async init( client: MongoClient, databaseName: string, collectionName: string ): Promise<void> { this.connectPromise = (async () => { await client.connect(); this.database = client.db(databaseName); this.collection = this.database.collection(collectionName); })(); // Unless skipCreate is set, create the index // This operation is no-op if the index already exists if (!this.indexOptions.skipCreate) { await this.createIndex( this.indexOptions.numLists, this.indexOptions.dimensions, this.indexOptions.similarity ); } } /** * Static method to create an instance of AzureCosmosDBVectorStore from a * list of texts. It first converts the texts to vectors and then adds * them to the collection. * @param texts List of texts to be converted to vectors. * @param metadatas Metadata for the texts. * @param embeddings Embeddings to be used for conversion. * @param dbConfig Database configuration for Azure Cosmos DB for MongoDB vCore. * @returns Promise that resolves to a new instance of AzureCosmosDBVectorStore. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: AzureCosmosDBConfig ): Promise<AzureCosmosDBVectorStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return AzureCosmosDBVectorStore.fromDocuments(docs, embeddings, dbConfig); } /** * Static method to create an instance of AzureCosmosDBVectorStore from a * list of documents. It first converts the documents to vectors and then * adds them to the collection. * @param docs List of documents to be converted to vectors. * @param embeddings Embeddings to be used for conversion. * @param dbConfig Database configuration for Azure Cosmos DB for MongoDB vCore. * @returns Promise that resolves to a new instance of AzureCosmosDBVectorStore. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: AzureCosmosDBConfig ): Promise<AzureCosmosDBVectorStore> { const instance = new this(embeddings, dbConfig); await instance.addDocuments(docs); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/convex.ts
// eslint-disable-next-line import/no-extraneous-dependencies import { DocumentByInfo, FieldPaths, FilterExpression, FunctionReference, GenericActionCtx, GenericDataModel, GenericTableInfo, NamedTableInfo, NamedVectorIndex, TableNamesInDataModel, VectorFilterBuilder, VectorIndexNames, makeFunctionReference, } from "convex/server"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; /** * Type that defines the config required to initialize the * ConvexVectorStore class. It includes the table name, * index name, text field name, and embedding field name. */ export type ConvexVectorStoreConfig< DataModel extends GenericDataModel, TableName extends TableNamesInDataModel<DataModel>, IndexName extends VectorIndexNames<NamedTableInfo<DataModel, TableName>>, TextFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>>, EmbeddingFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>>, MetadataFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>>, InsertMutation extends FunctionReference< "mutation", "internal", { table: string; document: object } >, GetQuery extends FunctionReference< "query", "internal", { id: string }, object | null > > = { readonly ctx: GenericActionCtx<DataModel>; /** * Defaults to "documents" */ readonly table?: TableName; /** * Defaults to "byEmbedding" */ readonly index?: IndexName; /** * Defaults to "text" */ readonly textField?: TextFieldName; /** * Defaults to "embedding" */ readonly embeddingField?: EmbeddingFieldName; /** * Defaults to "metadata" */ readonly metadataField?: MetadataFieldName; /** * Defaults to `internal.langchain.db.insert` */ readonly insert?: InsertMutation; /** * Defaults to `internal.langchain.db.get` */ readonly get?: GetQuery; }; /** * Class that is a wrapper around Convex storage and vector search. It is used * to insert embeddings in Convex documents with a vector search index, * and perform a vector search on them. * * ConvexVectorStore does NOT implement maxMarginalRelevanceSearch. */ export class ConvexVectorStore< DataModel extends GenericDataModel, TableName extends TableNamesInDataModel<DataModel>, IndexName extends VectorIndexNames<NamedTableInfo<DataModel, TableName>>, TextFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>>, EmbeddingFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>>, MetadataFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>>, InsertMutation extends FunctionReference< "mutation", "internal", { table: string; document: object } >, GetQuery extends FunctionReference< "query", "internal", { id: string }, object | null > > extends VectorStore { /** * Type that defines the filter used in the * similaritySearchVectorWithScore and maxMarginalRelevanceSearch methods. * It includes limit, filter and a flag to include embeddings. */ declare FilterType: { filter?: ( q: VectorFilterBuilder< DocumentByInfo<GenericTableInfo>, NamedVectorIndex<NamedTableInfo<DataModel, TableName>, IndexName> > ) => FilterExpression<boolean>; includeEmbeddings?: boolean; }; private readonly ctx: GenericActionCtx<DataModel>; private readonly table: TableName; private readonly index: IndexName; private readonly textField: TextFieldName; private readonly embeddingField: EmbeddingFieldName; private readonly metadataField: MetadataFieldName; private readonly insert: InsertMutation; private readonly get: GetQuery; _vectorstoreType(): string { return "convex"; } constructor( embeddings: EmbeddingsInterface, config: ConvexVectorStoreConfig< DataModel, TableName, IndexName, TextFieldName, EmbeddingFieldName, MetadataFieldName, InsertMutation, GetQuery > ) { super(embeddings, config); this.ctx = config.ctx; this.table = config.table ?? ("documents" as TableName); this.index = config.index ?? ("byEmbedding" as IndexName); this.textField = config.textField ?? ("text" as TextFieldName); this.embeddingField = config.embeddingField ?? ("embedding" as EmbeddingFieldName); this.metadataField = config.metadataField ?? ("metadata" as MetadataFieldName); this.insert = // eslint-disable-next-line @typescript-eslint/no-explicit-any config.insert ?? (makeFunctionReference("langchain/db:insert") as any); // eslint-disable-next-line @typescript-eslint/no-explicit-any this.get = config.get ?? (makeFunctionReference("langchain/db:get") as any); } /** * Add vectors and their corresponding documents to the Convex table. * @param vectors Vectors to be added. * @param documents Corresponding documents to be added. * @returns Promise that resolves when the vectors and documents have been added. */ async addVectors(vectors: number[][], documents: Document[]): Promise<void> { const convexDocuments = vectors.map((embedding, idx) => ({ [this.textField]: documents[idx].pageContent, [this.embeddingField]: embedding, [this.metadataField]: documents[idx].metadata, })); // TODO: Remove chunking when Convex handles the concurrent requests correctly const PAGE_SIZE = 16; for (let i = 0; i < convexDocuments.length; i += PAGE_SIZE) { await Promise.all( convexDocuments.slice(i, i + PAGE_SIZE).map((document) => this.ctx.runMutation(this.insert, { table: this.table, document, // eslint-disable-next-line @typescript-eslint/no-explicit-any } as any) ) ); } } /** * Add documents to the Convex table. It first converts * the documents to vectors using the embeddings and then calls the * addVectors method. * @param documents Documents to be added. * @returns Promise that resolves when the documents have been added. */ async addDocuments(documents: Document[]): Promise<void> { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents ); } /** * Similarity search on the vectors stored in the * Convex table. It returns a list of documents and their * corresponding similarity scores. * @param query Query vector for the similarity search. * @param k Number of nearest neighbors to return. * @param filter Optional filter to be applied. * @returns Promise that resolves to a list of documents and their corresponding similarity scores. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[Document, number][]> { const idsAndScores = await this.ctx.vectorSearch(this.table, this.index, { vector: query, limit: k, filter: filter?.filter, }); const documents = await Promise.all( idsAndScores.map(({ _id }) => // eslint-disable-next-line @typescript-eslint/no-explicit-any this.ctx.runQuery(this.get, { id: _id } as any) ) ); return documents.map( ( { [this.textField]: text, [this.embeddingField]: embedding, [this.metadataField]: metadata, }, idx ) => [ new Document({ pageContent: text as string, metadata: { ...metadata, ...(filter?.includeEmbeddings ? { embedding } : null), }, }), idsAndScores[idx]._score, ] ); } /** * Static method to create an instance of ConvexVectorStore from a * list of texts. It first converts the texts to vectors and then adds * them to the Convex table. * @param texts List of texts to be converted to vectors. * @param metadatas Metadata for the texts. * @param embeddings Embeddings to be used for conversion. * @param dbConfig Database configuration for Convex. * @returns Promise that resolves to a new instance of ConvexVectorStore. */ static async fromTexts< DataModel extends GenericDataModel, TableName extends TableNamesInDataModel<DataModel>, IndexName extends VectorIndexNames<NamedTableInfo<DataModel, TableName>>, TextFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>>, EmbeddingFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>>, MetadataFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>>, InsertMutation extends FunctionReference< "mutation", "internal", { table: string; document: object } >, GetQuery extends FunctionReference< "query", "internal", { id: string }, object | null > >( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: ConvexVectorStoreConfig< DataModel, TableName, IndexName, TextFieldName, EmbeddingFieldName, MetadataFieldName, InsertMutation, GetQuery > ): Promise< ConvexVectorStore< DataModel, TableName, IndexName, TextFieldName, EmbeddingFieldName, MetadataFieldName, InsertMutation, GetQuery > > { const docs = texts.map( (text, i) => new Document({ pageContent: text, metadata: Array.isArray(metadatas) ? metadatas[i] : metadatas, }) ); return ConvexVectorStore.fromDocuments(docs, embeddings, dbConfig); } /** * Static method to create an instance of ConvexVectorStore from a * list of documents. It first converts the documents to vectors and then * adds them to the Convex table. * @param docs List of documents to be converted to vectors. * @param embeddings Embeddings to be used for conversion. * @param dbConfig Database configuration for Convex. * @returns Promise that resolves to a new instance of ConvexVectorStore. */ static async fromDocuments< DataModel extends GenericDataModel, TableName extends TableNamesInDataModel<DataModel>, IndexName extends VectorIndexNames<NamedTableInfo<DataModel, TableName>>, TextFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>>, EmbeddingFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>>, MetadataFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>>, InsertMutation extends FunctionReference< "mutation", "internal", { table: string; document: object } >, GetQuery extends FunctionReference< "query", "internal", { id: string }, object | null > >( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: ConvexVectorStoreConfig< DataModel, TableName, IndexName, TextFieldName, EmbeddingFieldName, MetadataFieldName, InsertMutation, GetQuery > ): Promise< ConvexVectorStore< DataModel, TableName, IndexName, TextFieldName, EmbeddingFieldName, MetadataFieldName, InsertMutation, GetQuery > > { const instance = new this(embeddings, dbConfig); await instance.addDocuments(docs); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/hanavector.ts
import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore, MaxMarginalRelevanceSearchOptions, } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; import { maximalMarginalRelevance } from "@langchain/core/utils/math"; export type DistanceStrategy = "euclidean" | "cosine"; const COMPARISONS_TO_SQL: Record<string, string> = { $eq: "=", $ne: "<>", $lt: "<", $lte: "<=", $gt: ">", $gte: ">=", }; // Base value types that can be used in comparisons type ComparisonRValue = | string | number | boolean | Date | Array<ComparisonRValue>; // Available comparison operators for filtering type Comparator = | "$eq" | "$ne" | "$lt" | "$lte" | "$gt" | "$gte" | "$in" | "$nin" | "$between" | "$like"; // Filter using comparison operators // Defines the relationship between a comparison operator and its value type ComparatorFilter = { [K in Comparator]?: ComparisonRValue; }; type LogicalOperator = "$and" | "$or"; type LogicalFilter = { [K in LogicalOperator]?: Filter[]; }; type PropertyFilter = { [property: string]: string | number | boolean | Date | ComparatorFilter; }; type Filter = PropertyFilter | LogicalFilter; interface DateValue { type: "date"; date: string | Date; } const IN_OPERATORS_TO_SQL: Record<string, string> = { $in: "IN", $nin: "NOT IN", }; const BETWEEN_OPERATOR_TO_SQL: Record<string, string> = { $between: "BETWEEN", }; const LIKE_OPERATOR_TO_SQL: Record<string, string> = { $like: "LIKE", }; const LOGICAL_OPERATORS_TO_SQL: Record<string, string> = { $and: "AND", $or: "OR", }; const HANA_DISTANCE_FUNCTION: Record<DistanceStrategy, [string, string]> = { cosine: ["COSINE_SIMILARITY", "DESC"], euclidean: ["L2DISTANCE", "ASC"], }; const defaultDistanceStrategy = "cosine"; const defaultTableName = "EMBEDDINGS"; const defaultContentColumn = "VEC_TEXT"; const defaultMetadataColumn = "VEC_META"; const defaultVectorColumn = "VEC_VECTOR"; const defaultVectorColumnLength = -1; // -1 means dynamic length /** * Interface defining the arguments required to create an instance of * `HanaDB`. */ export interface HanaDBArgs { // eslint-disable-next-line @typescript-eslint/no-explicit-any connection: any; distanceStrategy?: DistanceStrategy; tableName?: string; contentColumn?: string; metadataColumn?: string; vectorColumn?: string; vectorColumnLength?: number; specificMetadataColumns?: string[]; } export class HanaDB extends VectorStore { // eslint-disable-next-line @typescript-eslint/no-explicit-any private connection: any; private distanceStrategy: DistanceStrategy; // Compile pattern only once, for better performance private static compiledPattern = /^[a-zA-Z_][a-zA-Z0-9_]*$/; private tableName: string; private contentColumn: string; private metadataColumn: string; private vectorColumn: string; private vectorColumnLength: number; declare FilterType: Filter; private specificMetadataColumns: string[]; _vectorstoreType(): string { return "hanadb"; } constructor(embeddings: EmbeddingsInterface, args: HanaDBArgs) { super(embeddings, args); this.distanceStrategy = args.distanceStrategy || defaultDistanceStrategy; this.tableName = HanaDB.sanitizeName(args.tableName || defaultTableName); this.contentColumn = HanaDB.sanitizeName( args.contentColumn || defaultContentColumn ); this.metadataColumn = HanaDB.sanitizeName( args.metadataColumn || defaultMetadataColumn ); this.vectorColumn = HanaDB.sanitizeName( args.vectorColumn || defaultVectorColumn ); this.vectorColumnLength = HanaDB.sanitizeInt( args.vectorColumnLength || defaultVectorColumnLength, -1 ); this.specificMetadataColumns = HanaDB.sanitizeSpecificMetadataColumns( args.specificMetadataColumns || [] ); this.connection = args.connection; } // eslint-disable-next-line @typescript-eslint/no-explicit-any private executeQuery(client: any, query: string): Promise<any> { return new Promise((resolve, reject) => { // eslint-disable-next-line @typescript-eslint/no-explicit-any client.exec(query, (err: Error, result: any) => { if (err) { reject(err); } else { resolve(result); } }); }); } // eslint-disable-next-line @typescript-eslint/no-explicit-any private prepareQuery(client: any, query: string): Promise<any> { return new Promise((resolve, reject) => { // eslint-disable-next-line @typescript-eslint/no-explicit-any client.prepare(query, (err: Error, statement: any) => { if (err) { reject(err); } else { resolve(statement); } }); }); } // eslint-disable-next-line @typescript-eslint/no-explicit-any private executeStatement(statement: any, params: any): Promise<any> { return new Promise((resolve, reject) => { // eslint-disable-next-line @typescript-eslint/no-explicit-any statement.exec(params, (err: Error, res: any) => { if (err) { reject(err); } else { resolve(res); } }); }); } public async initialize() { let valid_distance = false; for (const key in HANA_DISTANCE_FUNCTION) { if (key === this.distanceStrategy) { valid_distance = true; break; // Added to exit loop once a match is found } } if (!valid_distance) { throw new Error( `Unsupported distance_strategy: ${this.distanceStrategy}` ); } await this.createTableIfNotExists(); await this.checkColumn(this.tableName, this.contentColumn, [ "NCLOB", "NVARCHAR", ]); await this.checkColumn(this.tableName, this.metadataColumn, [ "NCLOB", "NVARCHAR", ]); await this.checkColumn( this.tableName, this.vectorColumn, ["REAL_VECTOR"], this.vectorColumnLength ); } /** * Sanitizes the input string by removing characters that are not alphanumeric or underscores. * @param inputStr The string to be sanitized. * @returns The sanitized string. */ public static sanitizeName(inputStr: string): string { return inputStr.replace(/[^a-zA-Z0-9_]/g, ""); } /** * Sanitizes the input to integer. Throws an error if the value is less than lower bound. * @param inputInt The input to be sanitized. * @returns The sanitized integer. */ public static sanitizeInt(inputInt: number | string, lowerBound = 0): number { const value = parseInt(inputInt.toString(), 10); if (Number.isNaN(value) || value < lowerBound) { throw new Error( `Value (${value}) must not be smaller than ${lowerBound}` ); } return value; } /** * Sanitizes a list to ensure all elements are floats (numbers in TypeScript). * Throws an error if any element is not a number. * * @param {number[]} embedding - The array of numbers (floats) to be sanitized. * @returns {number[]} The sanitized array of numbers (floats). * @throws {Error} Throws an error if any element is not a number. */ public static sanitizeListFloat(embedding: number[]): number[] { if (!Array.isArray(embedding)) { throw new Error( `Expected 'embedding' to be an array, but received ${typeof embedding}` ); } embedding.forEach((value) => { if (typeof value !== "number") { throw new Error(`Value (${value}) does not have type number`); } }); return embedding; } /** * Sanitizes the keys of the metadata object to ensure they match the required pattern. * Throws an error if any key does not match the pattern. * * @param {Record<string, any>} metadata - The metadata object with keys to be validated. * @returns {object[] | object} The original metadata object if all keys are valid. * @throws {Error} Throws an error if any metadata key is invalid. */ private sanitizeMetadataKeys(metadata: object[] | object): object[] | object { if (!metadata) { return {}; } Object.keys(metadata).forEach((key) => { if (!HanaDB.compiledPattern.test(key)) { throw new Error(`Invalid metadata key ${key}`); } }); return metadata; } static sanitizeSpecificMetadataColumns(columns: string[]): string[] { return columns.map((column) => this.sanitizeName(column)); } /** * Parses a string representation of a float array and returns an array of numbers. * @param {string} arrayAsString - The string representation of the array. * @returns {number[]} An array of floats parsed from the string. */ public static parseFloatArrayFromString(arrayAsString: string): number[] { const arrayWithoutBrackets = arrayAsString.slice(1, -1); return arrayWithoutBrackets.split(",").map((x) => parseFloat(x)); } /** * Checks if the specified column exists in the table and validates its data type and length. * @param tableName The name of the table. * @param columnName The name of the column to check. * @param columnType The expected data type(s) of the column. * @param columnLength The expected length of the column. Optional. */ public async checkColumn( tableName: string, columnName: string, columnType: string | string[], columnLength?: number ): Promise<void> { const sqlStr = ` SELECT DATA_TYPE_NAME, LENGTH FROM SYS.TABLE_COLUMNS WHERE SCHEMA_NAME = CURRENT_SCHEMA AND TABLE_NAME = ? AND COLUMN_NAME = ?`; const client = this.connection; // Get the connection object // Prepare the statement with parameter placeholders const stm = await this.prepareQuery(client, sqlStr); // Execute the query with actual parameters to avoid SQL injection const resultSet = await this.executeStatement(stm, [tableName, columnName]); if (resultSet.length === 0) { throw new Error(`Column ${columnName} does not exist`); } else { const dataType: string = resultSet[0].DATA_TYPE_NAME; const length: number = resultSet[0].LENGTH; // Check if dataType is within columnType const isValidType = Array.isArray(columnType) ? columnType.includes(dataType) : columnType === dataType; if (!isValidType) { throw new Error(`Column ${columnName} has the wrong type: ${dataType}`); } // Length can either be -1 (QRC01+02-24) or 0 (QRC03-24 onwards) // to indicate no length constraint being present. // Check length, if parameter was provided if (columnLength !== undefined && length !== columnLength && length > 0) { throw new Error(`Column ${columnName} has the wrong length: ${length}`); } } } private async createTableIfNotExists() { const tableExists = await this.tableExists(this.tableName); if (!tableExists) { let sqlStr = `CREATE TABLE "${this.tableName}" (` + `"${this.contentColumn}" NCLOB, ` + `"${this.metadataColumn}" NCLOB, ` + `"${this.vectorColumn}" REAL_VECTOR`; // Length can either be -1 (QRC01+02-24) or 0 (QRC03-24 onwards) if (this.vectorColumnLength === -1 || this.vectorColumnLength === 0) { sqlStr += ");"; } else { sqlStr += `(${this.vectorColumnLength}));`; } const client = this.connection; await this.executeQuery(client, sqlStr); } } public async tableExists(tableName: string): Promise<boolean> { const tableExistsSQL = `SELECT COUNT(*) AS COUNT FROM SYS.TABLES WHERE SCHEMA_NAME = CURRENT_SCHEMA AND TABLE_NAME = ?`; const client = this.connection; // Get the connection object const stm = await this.prepareQuery(client, tableExistsSQL); const resultSet = await this.executeStatement(stm, [tableName]); if (resultSet[0].COUNT === 1) { // Table does exist return true; } return false; } /** * Creates a WHERE clause based on the provided filter object. * @param filter - A filter object with keys as metadata fields and values as filter values. * @returns A tuple containing the WHERE clause string and an array of query parameters. */ private createWhereByFilter( filter?: this["FilterType"] ): [string, Array<ComparisonRValue>] { let whereStr = ""; let queryTuple: Array<ComparisonRValue> = []; if (filter && Object.keys(filter).length > 0) { const [where, params] = this.processFilterObject(filter); whereStr = ` WHERE ${where}`; queryTuple = params; } return [whereStr, queryTuple]; } /** * Processes a filter object to generate SQL WHERE clause components. * @param filter - A filter object with keys as metadata fields and values as filter values. * @returns A tuple containing the WHERE clause string and an array of query parameters. */ private processFilterObject( filter: this["FilterType"] ): [string, Array<ComparisonRValue>] { let whereStr = ""; const queryTuple: Array<ComparisonRValue> = []; Object.keys(filter).forEach((key, i) => { const filterValue = filter[key as keyof Filter] as | ComparisonRValue | ComparatorFilter | Filter[]; if (i !== 0) { whereStr += " AND "; } // Handling logical operators ($and, $or) if (key in LOGICAL_OPERATORS_TO_SQL) { const logicalOperator = LOGICAL_OPERATORS_TO_SQL[key]; const logicalOperands = filterValue as Filter[]; logicalOperands.forEach((operand: Filter, j: number) => { if (j !== 0) { whereStr += ` ${logicalOperator} `; } const [whereLogical, paramsLogical] = this.processFilterObject(operand); whereStr += "(" + whereLogical + ")"; queryTuple.push(...paramsLogical); }); return; } // Handle special comparison operators and simple types let operator = "="; let sqlParam = "?"; if (typeof filterValue === "number") { if (Number.isInteger(filterValue)) { // hdb requires string while sap/hana-client doesn't queryTuple.push(filterValue.toString()); } else { throw new Error( `Unsupported filter data-type: wrong number type for key ${key}` ); } } else if (typeof filterValue === "string") { queryTuple.push(filterValue); } else if (typeof filterValue === "boolean") { queryTuple.push(filterValue.toString()); } else if (typeof filterValue === "object" && filterValue !== null) { // Get the special operator key, like $eq, $ne, $in, $between, etc. const specialOp = Object.keys(filterValue)[0] as Comparator; const specialVal = (filterValue as ComparatorFilter)[specialOp]; // Handling of 'special' operators starting with "$" if (specialOp in COMPARISONS_TO_SQL) { operator = COMPARISONS_TO_SQL[specialOp]; if (specialVal === undefined) { throw new Error( `Operator '${specialOp}' expects a non-undefined value.` ); } if (typeof specialVal === "boolean") { queryTuple.push(specialVal.toString()); } else if (typeof specialVal === "number") { sqlParam = "CAST(? as float)"; queryTuple.push(specialVal); } else if ( typeof specialVal === "object" && specialVal !== null && "type" in specialVal && specialVal.type === "date" && "date" in specialVal ) { sqlParam = "CAST(? as DATE)"; queryTuple.push((specialVal as DateValue).date); } else { queryTuple.push(specialVal); } } else if (specialOp in BETWEEN_OPERATOR_TO_SQL) { // ensure the value is an array with exact length of 2 if (!Array.isArray(specialVal) || specialVal.length !== 2) { throw new Error(`Operator '${specialOp}' expects two values.`); } const [betweenFrom, betweenTo] = specialVal as [ ComparisonRValue, ComparisonRValue ]; operator = BETWEEN_OPERATOR_TO_SQL[specialOp]; sqlParam = "? AND ?"; queryTuple.push(betweenFrom.toString(), betweenTo.toString()); } else if (specialOp in LIKE_OPERATOR_TO_SQL) { operator = LIKE_OPERATOR_TO_SQL[specialOp]; if (specialVal !== undefined) { queryTuple.push(specialVal.toString()); } else { throw new Error( `Operator '${specialOp}' expects a non-undefined value.` ); } } else if (specialOp in IN_OPERATORS_TO_SQL) { operator = IN_OPERATORS_TO_SQL[specialOp]; if (Array.isArray(specialVal)) { const placeholders = Array(specialVal.length).fill("?").join(","); sqlParam = `(${placeholders})`; queryTuple.push( ...specialVal.map((listEntry) => listEntry.toString()) ); } else { throw new Error(`Unsupported value for ${operator}: ${specialVal}`); } } else { throw new Error(`Unsupported operator: ${specialOp}`); } } else { throw new Error(`Unsupported filter data-type: ${typeof filterValue}`); } // Metadata column handling const selector = this.specificMetadataColumns.includes(key) ? `"${key}"` : `JSON_VALUE(${this.metadataColumn}, '$.${key}')`; whereStr += `${selector} ${operator} ${sqlParam}`; }); return [whereStr, queryTuple]; } /** * Creates an HNSW vector index on a specified table and vector column with * optional build and search configurations. If no configurations are provided, * default parameters from the database are used. If provided values exceed the * valid ranges, an error will be raised. * The index is always created in ONLINE mode. * * @param {object} options Object containing configuration options for the index * @param {number} [options.m] (Optional) Maximum number of neighbors per graph node (Valid Range: [4, 1000]) * @param {number} [options.efConstruction] (Optional) Maximal candidates to consider when building the graph * (Valid Range: [1, 100000]) * @param {number} [options.efSearch] (Optional) Minimum candidates for top-k-nearest neighbor queries * (Valid Range: [1, 100000]) * @param {string} [options.indexName] (Optional) Custom index name. Defaults to <table_name>_<distance_strategy>_idx * @returns {Promise<void>} Promise that resolves when index is added. */ public async createHnswIndex( options: { m?: number; efConstruction?: number; efSearch?: number; indexName?: string; } = {} ): Promise<void> { const { m, efConstruction, efSearch, indexName } = options; // Determine the distance function based on the configured strategy const distanceFuncName = HANA_DISTANCE_FUNCTION[this.distanceStrategy][0]; const defaultIndexName = `${this.tableName}_${distanceFuncName}_idx`; // Use provided indexName or fallback to default const finalIndexName = HanaDB.sanitizeName(indexName || defaultIndexName); // Initialize buildConfig and searchConfig objects const buildConfig: Record<string, number> = {}; const searchConfig: Record<string, number> = {}; // Validate and add m parameter to buildConfig if provided if (m !== undefined) { const minimumHnswM = 4; const maximumHnswM = 1000; const sanitizedM = HanaDB.sanitizeInt(m, minimumHnswM); if (sanitizedM < minimumHnswM || sanitizedM > maximumHnswM) { throw new Error("M must be in the range [4, 1000]"); } buildConfig.M = sanitizedM; } // Validate and add efConstruction to buildConfig if provided if (efConstruction !== undefined) { const minimumEfConstruction = 1; const maximumEfConstruction = 100000; const sanitizedEfConstruction = HanaDB.sanitizeInt( efConstruction, minimumEfConstruction ); if ( sanitizedEfConstruction < minimumEfConstruction || sanitizedEfConstruction > maximumEfConstruction ) { throw new Error("efConstruction must be in the range [1, 100000]"); } buildConfig.efConstruction = sanitizedEfConstruction; } // Validate and add efSearch to searchConfig if provided if (efSearch !== undefined) { const minimumEfSearch = 1; const maximumEfSearch = 100000; const sanitizedEfSearch = HanaDB.sanitizeInt(efSearch, minimumEfSearch); if ( sanitizedEfSearch < minimumEfSearch || sanitizedEfSearch > maximumEfSearch ) { throw new Error("efSearch must be in the range [1, 100000]"); } searchConfig.efSearch = sanitizedEfSearch; } // Convert buildConfig and searchConfig to JSON strings if they contain values const buildConfigStr = Object.keys(buildConfig).length ? JSON.stringify(buildConfig) : ""; const searchConfigStr = Object.keys(searchConfig).length ? JSON.stringify(searchConfig) : ""; // Create the base SQL string for index creation let sqlStr = `CREATE HNSW VECTOR INDEX ${finalIndexName} ON "${this.tableName}" ("${this.vectorColumn}") SIMILARITY FUNCTION ${distanceFuncName} `; // Append buildConfig to the SQL string if provided if (buildConfigStr) { sqlStr += `BUILD CONFIGURATION '${buildConfigStr}' `; } // Append searchConfig to the SQL string if provided if (searchConfigStr) { sqlStr += `SEARCH CONFIGURATION '${searchConfigStr}' `; } // Add the ONLINE option sqlStr += "ONLINE;"; const client = this.connection; await this.executeQuery(client, sqlStr); } /** * Deletes entries from the table based on the provided filter. * @param ids - Optional. Deletion by ids is not supported and will throw an error. * @param filter - Optional. A filter object to specify which entries to delete. * @throws Error if 'ids' parameter is provided, as deletion by ids is not supported. * @throws Error if 'filter' parameter is not provided, as it is required for deletion. * to do: adjust the call signature */ public async delete(options: { ids?: string[]; filter?: Filter; }): Promise<void> { const { ids, filter } = options; if (ids) { throw new Error("Deletion via IDs is not supported"); } if (!filter) { throw new Error("Parameter 'filter' is required when calling 'delete'"); } const [whereStr, queryTuple] = this.createWhereByFilter(filter); const sqlStr = `DELETE FROM "${this.tableName}" ${whereStr}`; const client = this.connection; const stm = await this.prepareQuery(client, sqlStr); await this.executeStatement(stm, queryTuple); } /** * Static method to create a HanaDB instance from raw texts. This method embeds the documents, * creates a table if it does not exist, and adds the documents to the table. * @param texts Array of text documents to add. * @param metadatas metadata for each text document. * @param embedding EmbeddingsInterface instance for document embedding. * @param dbConfig Configuration for the HanaDB. * @returns A Promise that resolves to an instance of HanaDB. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: HanaDBArgs ): Promise<HanaDB> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return HanaDB.fromDocuments(docs, embeddings, dbConfig); } /** * Creates an instance of `HanaDB` from an array of * Document instances. The documents are added to the database. * @param docs List of documents to be converted to vectors. * @param embeddings Embeddings instance used to convert the documents to vectors. * @param dbConfig Configuration for the HanaDB. * @returns Promise that resolves to an instance of `HanaDB`. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: HanaDBArgs ): Promise<HanaDB> { const instance = new HanaDB(embeddings, dbConfig); await instance.initialize(); await instance.addDocuments(docs); return instance; } /** * Adds an array of documents to the table. The documents are first * converted to vectors using the `embedDocuments` method of the * `embeddings` instance. * @param documents Array of Document instances to be added to the table. * @returns Promise that resolves when the documents are added. */ async addDocuments(documents: Document[]): Promise<void> { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents ); } /** * Adds an array of vectors and corresponding documents to the database. * The vectors and documents are batch inserted into the database. * @param vectors Array of vectors to be added to the table. * @param documents Array of Document instances corresponding to the vectors. * @returns Promise that resolves when the vectors and documents are added. */ async addVectors(vectors: number[][], documents: Document[]): Promise<void> { if (vectors.length !== documents.length) { throw new Error(`Vectors and metadatas must have the same length`); } const texts = documents.map((doc) => doc.pageContent); const metadatas = documents.map((doc) => doc.metadata); const client = this.connection; const sqlParams: [string, string, string][] = texts.map((text, i) => { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; // Ensure embedding is generated or provided const embeddingString = `[${vectors[i].join(", ")}]`; // Prepare the SQL parameters return [ text, JSON.stringify(this.sanitizeMetadataKeys(metadata)), embeddingString, ]; }); // Insert data into the table, bulk insert. const sqlStr = `INSERT INTO "${this.tableName}" ("${this.contentColumn}", "${this.metadataColumn}", "${this.vectorColumn}") VALUES (?, ?, TO_REAL_VECTOR(?));`; const stm = await this.prepareQuery(client, sqlStr); await this.executeStatement(stm, sqlParams); // stm.execBatch(sqlParams); } /** * Return docs most similar to query. * @param query Query text for the similarity search. * @param k Number of Documents to return. Defaults to 4. * @param filter A dictionary of metadata fields and values to filter by. Defaults to None. * @returns Promise that resolves to a list of documents and their corresponding similarity scores. */ async similaritySearch( query: string, k: number, filter?: this["FilterType"] ): Promise<Document[]> { const results = await this.similaritySearchWithScore(query, k, filter); return results.map((result) => result[0]); } /** * Return documents and score values most similar to query. * @param query Query text for the similarity search. * @param k Number of Documents to return. Defaults to 4. * @param filter A dictionary of metadata fields and values to filter by. Defaults to None. * @returns Promise that resolves to a list of documents and their corresponding similarity scores. */ async similaritySearchWithScore( query: string, k: number, filter?: this["FilterType"] ): Promise<[Document, number][]> { const queryEmbedding = await this.embeddings.embedQuery(query); return this.similaritySearchVectorWithScore(queryEmbedding, k, filter); } /** * Return docs most similar to the given embedding. * @param query Query embedding for the similarity search. * @param k Number of Documents to return. Defaults to 4. * @param filter A dictionary of metadata fields and values to filter by. Defaults to None. * @returns Promise that resolves to a list of documents and their corresponding similarity scores. */ async similaritySearchVectorWithScore( queryEmbedding: number[], k: number, filter?: this["FilterType"] ): Promise<[Document, number][]> { const wholeResult = await this.similaritySearchWithScoreAndVectorByVector( queryEmbedding, k, filter ); // Return documents and scores, discarding the vectors return wholeResult.map(([doc, score]) => [doc, score]); } /** * Performs a similarity search based on vector comparison and returns documents along with their similarity scores and vectors. * @param embedding The vector representation of the query for similarity comparison. * @param k The number of top similar documents to return. * @param filter Optional filter criteria to apply to the search query. * @returns A promise that resolves to an array of tuples, each containing a Document, its similarity score, and its vector. */ async similaritySearchWithScoreAndVectorByVector( embedding: number[], k: number, filter?: this["FilterType"] ): Promise<Array<[Document, number, number[]]>> { // Sanitize inputs const sanitizedK = HanaDB.sanitizeInt(k); const sanitizedEmbedding = HanaDB.sanitizeListFloat(embedding); // Determine the distance function based on the configured strategy const distanceFuncName = HANA_DISTANCE_FUNCTION[this.distanceStrategy][0]; // Convert the embedding vector to a string for SQL query const embeddingAsString = sanitizedEmbedding.join(","); let sqlStr = `SELECT TOP ${sanitizedK} "${this.contentColumn}", "${this.metadataColumn}", TO_NVARCHAR("${this.vectorColumn}") AS VECTOR, ${distanceFuncName}("${this.vectorColumn}", TO_REAL_VECTOR('[${embeddingAsString}]')) AS CS FROM "${this.tableName}"`; // Add order by clause to sort by similarity const orderStr = ` ORDER BY CS ${ HANA_DISTANCE_FUNCTION[this.distanceStrategy][1] }`; // Prepare and execute the SQL query const [whereStr, queryTuple] = this.createWhereByFilter(filter); sqlStr += whereStr + orderStr; const client = this.connection; const stm = await this.prepareQuery(client, sqlStr); const resultSet = await this.executeStatement(stm, queryTuple); const result: Array<[Document, number, number[]]> = resultSet.map( // eslint-disable-next-line @typescript-eslint/no-explicit-any (row: any) => { const metadata = JSON.parse(row[this.metadataColumn].toString("utf8")); const doc: Document = { pageContent: row[this.contentColumn].toString("utf8"), metadata, }; const resultVector = HanaDB.parseFloatArrayFromString(row.VECTOR); const score = row.CS; return [doc, score, resultVector]; } ); return result; } /** * Return documents selected using the maximal marginal relevance. * Maximal marginal relevance optimizes for similarity to the query AND * diversity among selected documents. * @param query Text to look up documents similar to. * @param options.k Number of documents to return. * @param options.fetchK=20 Number of documents to fetch before passing to * the MMR algorithm. * @param options.lambda=0.5 Number between 0 and 1 that determines the * degree of diversity among the results, where 0 corresponds to maximum * diversity and 1 to minimum diversity. * @returns List of documents selected by maximal marginal relevance. */ async maxMarginalRelevanceSearch( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]> ): Promise<Document[]> { const { k, fetchK = 20, lambda = 0.5 } = options; const queryEmbedding = await this.embeddings.embedQuery(query); const docs = await this.similaritySearchWithScoreAndVectorByVector( queryEmbedding, fetchK ); // docs is an Array of tuples: [Document, number, number[]] const embeddingList = docs.map((doc) => doc[2]); // Extracts the embedding from each tuple // Re-rank the results using MMR const mmrIndexes = maximalMarginalRelevance( queryEmbedding, embeddingList, lambda, k ); const mmrDocs = mmrIndexes.map((index) => docs[index][0]); return mmrDocs; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/zep_cloud.ts
/* eslint-disable @typescript-eslint/no-non-null-assertion */ import { ZepClient } from "@getzep/zep-cloud"; import { CreateDocumentRequest, DocumentSearchResult, NotFoundError, } from "@getzep/zep-cloud/api"; import { MaxMarginalRelevanceSearchOptions, VectorStore, } from "@langchain/core/vectorstores"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { Document } from "@langchain/core/documents"; import { Callbacks } from "@langchain/core/callbacks/manager"; import { maximalMarginalRelevance } from "@langchain/core/utils/math"; import { FakeEmbeddings } from "@langchain/core/utils/testing"; function zepDocsToDocumentsAndScore( results: DocumentSearchResult[] ): [Document, number][] { return results.map((d) => [ new Document({ pageContent: d.content ?? "", metadata: d.metadata, }), d.score ? d.score : 0, ]); } function assignMetadata( value: string | Record<string, unknown> | object | undefined ): Record<string, unknown> | undefined { if (typeof value === "object" && value !== null) { return value as Record<string, unknown>; } if (value !== undefined) { console.warn("Metadata filters must be an object, Record, or undefined."); } return undefined; } /** * Interface for the configuration options for a ZepCloudVectorStore instance. */ export interface IZepCloudConfig { apiKey?: string; client?: ZepClient; collectionName: string; description?: string; metadata?: Record<string, never>; } /** * Interface for the parameters required to delete documents from a * ZepCloudVectorStore instance. */ export interface IZepCloudDeleteParams { uuids: string[]; } /** * ZepCloudVectorStore is a VectorStore implementation * that uses the Zep long-term memory store as a backend. * * If the collection does not exist, it will be created automatically. * * Requires `@getzep/zep-cloud` to be installed: * * * @property {ZepClient} client - The ZepClient instance used to interact with Zep's API. * @property {Promise<void>} initPromise - A promise that resolves * when the collection is initialized. */ export class ZepCloudVectorStore extends VectorStore { public client: ZepClient; public collectionName: string; private readonly initPromise: Promise<void>; constructor(embeddings: EmbeddingsInterface, args: IZepCloudConfig) { super(embeddings, args); this.initPromise = this.initCollection(args).catch((err) => { console.error("Error initializing collection:", err); throw err; }); } /** * Initializes the document collection. If the collection does not exist, it creates a new one. * * @param {IZepConfig} args - The configuration object for the Zep API. */ private async initCollection(args: IZepCloudConfig) { if (args.client) { this.client = args.client; } else { this.client = new ZepClient({ apiKey: args.apiKey, }); } try { this.collectionName = args.collectionName; await this.client.document.getCollection(this.collectionName); } catch (err) { // eslint-disable-next-line no-instanceof/no-instanceof if (err instanceof Error) { // eslint-disable-next-line no-instanceof/no-instanceof if (err instanceof NotFoundError || err.name === "NotFoundError") { await this.createCollection(args); } else { throw err; } } } } /** * Creates a new document collection. * * @param {IZepConfig} args - The configuration object for the Zep API. */ private async createCollection(args: IZepCloudConfig) { await this.client.document.addCollection(args.collectionName, { description: args.description, metadata: args.metadata, }); } async addVectors(): Promise<string[]> { throw new Error("Adding vectors is not supported in Zep Cloud."); } /** * Adds documents to the collection. The documents are first embedded into vectors * using the provided embedding model. * * @param {Document[]} documents - The documents to add. * @returns {Promise<string[]>} - A promise that resolves with the UUIDs of the added documents. */ async addDocuments(documents: Document[]): Promise<string[]> { const docs: Array<CreateDocumentRequest> = []; for (let i = 0; i < documents.length; i += 1) { const doc: CreateDocumentRequest = { content: documents[i].pageContent, metadata: documents[i].metadata, }; docs.push(doc); } // Wait for collection to be initialized await this.initPromise; return this.client.document.addDocuments(this.collectionName, docs); } // eslint-disable-next-line class-methods-use-this,no-underscore-dangle _vectorstoreType(): string { return "zep"; } /** * Deletes documents from the collection. * * @param {IZepDeleteParams} params - The list of Zep document UUIDs to delete. * @returns {Promise<void>} */ async delete(params: IZepCloudDeleteParams): Promise<void> { // Wait for collection to be initialized await this.initPromise; // eslint-disable-next-line no-restricted-syntax for await (const uuid of params.uuids) { await this.client.document.deleteDocument(this.collectionName, uuid); } } async similaritySearchVectorWithScore(): Promise<[Document, number][]> { throw new Error("Unsupported in Zep Cloud."); } // eslint-disable-next-line no-underscore-dangle async _similaritySearchWithScore( query: string, k: number, filter?: Record<string, unknown> | undefined ): Promise<[Document, number][]> { await this.initPromise; const { results } = await this.client.document.search(this.collectionName, { text: query, metadata: assignMetadata(filter), limit: k, }); return zepDocsToDocumentsAndScore(results!); } async similaritySearchWithScore( query: string, k = 4, filter: Record<string, unknown> | undefined = undefined, // eslint-disable-next-line @typescript-eslint/no-unused-vars _callbacks = undefined // implement passing to embedQuery later ): Promise<[Document, number][]> { return this._similaritySearchWithScore(query, k, filter); } /** * Performs a similarity search on the Zep collection. * * @param {string} query - The query string to search for. * @param {number} [k=4] - The number of results to return. Defaults to 4. * @param {this["FilterType"] | undefined} [filter=undefined] - An optional set of JSONPath filters to apply to the search. * @param {Callbacks | undefined} [_callbacks=undefined] - Optional callbacks. Currently not implemented. * @returns {Promise<Document[]>} - A promise that resolves to an array of Documents that are similar to the query. * * @async */ async similaritySearch( query: string, k = 4, filter: this["FilterType"] | undefined = undefined, // eslint-disable-next-line @typescript-eslint/no-unused-vars _callbacks: Callbacks | undefined = undefined // implement passing to embedQuery later ): Promise<Document[]> { await this.initPromise; const { results: zepResults } = await this.client.document.search( this.collectionName, { text: query, metadata: assignMetadata(filter), limit: k, } ); const results = zepDocsToDocumentsAndScore(zepResults!); return results.map((result) => result[0]); } /** * Return documents selected using the maximal marginal relevance. * Maximal marginal relevance optimizes for similarity to the query AND diversity * among selected documents. * * @param {string} query - Text to look up documents similar to. * @param options * @param {number} options.k - Number of documents to return. * @param {number} options.fetchK=20- Number of documents to fetch before passing to the MMR algorithm. * @param {number} options.lambda=0.5 - Number between 0 and 1 that determines the degree of diversity among the results, * where 0 corresponds to maximum diversity and 1 to minimum diversity. * @param {Record<string, any>} options.filter - Optional Zep JSONPath query to pre-filter on document metadata field * * @returns {Promise<Document[]>} - List of documents selected by maximal marginal relevance. */ async maxMarginalRelevanceSearch( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]> ): Promise<Document[]> { const { k, fetchK = 20, lambda = 0.5, filter } = options; const r = await this.client.document.search(this.collectionName, { text: query, metadata: assignMetadata(filter), limit: fetchK, }); const queryEmbedding = Array.from(r.queryVector!); const results = zepDocsToDocumentsAndScore(r.results!); const embeddingList = r.results!.map((doc) => Array.from(doc.embedding ? doc.embedding : []) ); const mmrIndexes = maximalMarginalRelevance( queryEmbedding, embeddingList, lambda, k ); return mmrIndexes.filter((idx) => idx !== -1).map((idx) => results[idx][0]); } static async init(zepConfig: IZepCloudConfig) { const instance = new this(new FakeEmbeddings(), zepConfig); // Wait for collection to be initialized await instance.initPromise; return instance; } /** * Creates a new ZepVectorStore instance from an array of texts. Each text is converted into a Document and added to the collection. * * @param {string[]} texts - The texts to convert into Documents. * @param {object[] | object} metadatas - The metadata to associate with each Document. * If an array is provided, each element is associated with the corresponding Document. * If an object is provided, it is associated with all Documents. * @param {Embeddings} embeddings - Pass FakeEmbeddings, Zep Cloud will handle text embedding for you. * @param {IZepConfig} zepConfig - The configuration object for the Zep API. * @returns {Promise<ZepVectorStore>} - A promise that resolves with the new ZepVectorStore instance. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, zepConfig: IZepCloudConfig ): Promise<ZepCloudVectorStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return ZepCloudVectorStore.fromDocuments(docs, embeddings, zepConfig); } /** * Creates a new ZepVectorStore instance from an array of Documents. Each Document is added to a Zep collection. * * @param {Document[]} docs - The Documents to add. * @param {Embeddings} embeddings - Pass FakeEmbeddings, Zep Cloud will handle text embedding for you. * @param {IZepConfig} zepConfig - The configuration object for the Zep API. * @returns {Promise<ZepVectorStore>} - A promise that resolves with the new ZepVectorStore instance. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, zepConfig: IZepCloudConfig ): Promise<ZepCloudVectorStore> { const instance = new this(embeddings, zepConfig); // Wait for collection to be initialized await instance.initPromise; await instance.addDocuments(docs); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/lancedb.ts
import { connect, Table, Connection } from "@lancedb/lancedb"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; /** * Defines the arguments for the LanceDB class constructor. It includes a * table and an optional textKey. */ export type LanceDBArgs = { table?: Table; textKey?: string; uri?: string; tableName?: string; mode?: "create" | "overwrite"; }; /** * A wrapper for an open-source database for vector-search with persistent * storage. It simplifies retrieval, filtering, and management of * embeddings. */ export class LanceDB extends VectorStore { private table?: Table; private textKey: string; private uri: string; private tableName: string; private mode?: "create" | "overwrite"; constructor(embeddings: EmbeddingsInterface, args?: LanceDBArgs) { super(embeddings, args || {}); this.table = args?.table; this.embeddings = embeddings; this.textKey = args?.textKey || "text"; this.uri = args?.uri || "~/lancedb"; this.tableName = args?.tableName || "langchain"; this.mode = args?.mode || "overwrite"; } /** * Adds documents to the database. * @param documents The documents to be added. * @returns A Promise that resolves when the documents have been added. */ async addDocuments(documents: Document[]): Promise<void> { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents ); } _vectorstoreType(): string { return "lancedb"; } /** * Adds vectors and their corresponding documents to the database. * @param vectors The vectors to be added. * @param documents The corresponding documents to be added. * @returns A Promise that resolves when the vectors and documents have been added. */ async addVectors(vectors: number[][], documents: Document[]): Promise<void> { if (vectors.length === 0) { return; } if (vectors.length !== documents.length) { throw new Error(`Vectors and documents must have the same length`); } const data: Array<Record<string, unknown>> = []; for (let i = 0; i < documents.length; i += 1) { const record = { vector: vectors[i], [this.textKey]: documents[i].pageContent, }; Object.keys(documents[i].metadata).forEach((metaKey) => { record[metaKey] = documents[i].metadata[metaKey]; }); data.push(record); } if (!this.table) { const db: Connection = await connect(this.uri); this.table = await db.createTable(this.tableName, data, { mode: this.mode, }); return; } await this.table.add(data); } /** * Performs a similarity search on the vectors in the database and returns * the documents and their scores. * @param query The query vector. * @param k The number of results to return. * @returns A Promise that resolves with an array of tuples, each containing a Document and its score. */ async similaritySearchVectorWithScore( query: number[], k: number ): Promise<[Document, number][]> { if (!this.table) { throw new Error( "Table not found. Please add vectors to the table first." ); } const results = await this.table .query() .nearestTo(query) .limit(k) .toArray(); const docsAndScore: [Document, number][] = []; results.forEach((item) => { const metadata: Record<string, unknown> = {}; Object.keys(item).forEach((key) => { if (key !== "vector" && key !== "score" && key !== this.textKey) { metadata[key] = item[key]; } }); docsAndScore.push([ new Document({ pageContent: item[this.textKey] as string, metadata, }), item.score as number, ]); }); return docsAndScore; } /** * Creates a new instance of LanceDB from texts. * @param texts The texts to be converted into documents. * @param metadatas The metadata for the texts. * @param embeddings The embeddings to be managed. * @param dbConfig The configuration for the LanceDB instance. * @returns A Promise that resolves with a new instance of LanceDB. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig?: LanceDBArgs ): Promise<LanceDB> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return LanceDB.fromDocuments(docs, embeddings, dbConfig); } /** * Creates a new instance of LanceDB from documents. * @param docs The documents to be added to the database. * @param embeddings The embeddings to be managed. * @param dbConfig The configuration for the LanceDB instance. * @returns A Promise that resolves with a new instance of LanceDB. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig?: LanceDBArgs ): Promise<LanceDB> { const instance = new this(embeddings, dbConfig); await instance.addDocuments(docs); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/myscale.ts
import * as uuid from "uuid"; import { ClickHouseClient, createClient } from "@clickhouse/client"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; /** * Arguments for the MyScaleStore class, which include the host, port, * protocol, username, password, index type, index parameters, column map, * database, table, and metric. */ export interface MyScaleLibArgs { host: string; port: string | number; protocol?: string; username: string; password: string; indexType?: string; indexParam?: Record<string, string>; columnMap?: ColumnMap; database?: string; table?: string; metric?: metric; } /** * Mapping of columns in the MyScale database. */ export interface ColumnMap { id: string; text: string; vector: string; metadata: string; } /** * Type of metric used in the MyScale database. */ export type metric = "L2" | "Cosine" | "IP"; /** * Type for filtering search results in the MyScale database. */ export interface MyScaleFilter { whereStr: string; } /** * Class for interacting with the MyScale database. It extends the * VectorStore class and provides methods for adding vectors and * documents, searching for similar vectors, and creating instances from * texts or documents. */ export class MyScaleStore extends VectorStore { declare FilterType: MyScaleFilter; private client: ClickHouseClient; private indexType: string; private indexParam: Record<string, string>; private columnMap: ColumnMap; private database: string; private table: string; private metric: metric; private isInitialized = false; _vectorstoreType(): string { return "myscale"; } constructor(embeddings: EmbeddingsInterface, args: MyScaleLibArgs) { super(embeddings, args); this.indexType = args.indexType || "MSTG"; this.indexParam = args.indexParam || {}; this.columnMap = args.columnMap || { id: "id", text: "text", vector: "vector", metadata: "metadata", }; this.database = args.database || "default"; this.table = args.table || "vector_table"; this.metric = args.metric || "Cosine"; this.client = createClient({ host: `${args.protocol ?? "https://"}${args.host}:${args.port}`, username: args.username, password: args.password, session_id: uuid.v4(), }); } /** * Method to add vectors to the MyScale database. * @param vectors The vectors to add. * @param documents The documents associated with the vectors. * @returns Promise that resolves when the vectors have been added. */ async addVectors(vectors: number[][], documents: Document[]): Promise<void> { if (vectors.length === 0) { return; } if (!this.isInitialized) { await this.initialize(vectors[0].length); } const queryStr = this.buildInsertQuery(vectors, documents); await this.client.exec({ query: queryStr }); } /** * Method to add documents to the MyScale database. * @param documents The documents to add. * @returns Promise that resolves when the documents have been added. */ async addDocuments(documents: Document[]): Promise<void> { return this.addVectors( await this.embeddings.embedDocuments(documents.map((d) => d.pageContent)), documents ); } /** * Method to search for vectors that are similar to a given query vector. * @param query The query vector. * @param k The number of similar vectors to return. * @param filter Optional filter for the search results. * @returns Promise that resolves with an array of tuples, each containing a Document and a score. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[Document, number][]> { if (!this.isInitialized) { await this.initialize(query.length); } const queryStr = this.buildSearchQuery(query, k, filter); const queryResultSet = await this.client.query({ query: queryStr }); const queryResult: { data: { text: string; metadata: object; dist: number }[]; } = await queryResultSet.json(); const result: [Document, number][] = queryResult.data.map((item) => [ new Document({ pageContent: item.text, metadata: item.metadata }), item.dist, ]); return result; } /** * Static method to create an instance of MyScaleStore from texts. * @param texts The texts to use. * @param metadatas The metadata associated with the texts. * @param embeddings The embeddings to use. * @param args The arguments for the MyScaleStore. * @returns Promise that resolves with a new instance of MyScaleStore. */ static async fromTexts( texts: string[], metadatas: object | object[], embeddings: EmbeddingsInterface, args: MyScaleLibArgs ): Promise<MyScaleStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return MyScaleStore.fromDocuments(docs, embeddings, args); } /** * Static method to create an instance of MyScaleStore from documents. * @param docs The documents to use. * @param embeddings The embeddings to use. * @param args The arguments for the MyScaleStore. * @returns Promise that resolves with a new instance of MyScaleStore. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, args: MyScaleLibArgs ): Promise<MyScaleStore> { const instance = new this(embeddings, args); await instance.addDocuments(docs); return instance; } /** * Static method to create an instance of MyScaleStore from an existing * index. * @param embeddings The embeddings to use. * @param args The arguments for the MyScaleStore. * @returns Promise that resolves with a new instance of MyScaleStore. */ static async fromExistingIndex( embeddings: EmbeddingsInterface, args: MyScaleLibArgs ): Promise<MyScaleStore> { const instance = new this(embeddings, args); await instance.initialize(); return instance; } /** * Method to initialize the MyScale database. * @param dimension Optional dimension of the vectors. * @returns Promise that resolves when the database has been initialized. */ private async initialize(dimension?: number): Promise<void> { const dim = dimension ?? (await this.embeddings.embedQuery("test")).length; let indexParamStr = ""; for (const [key, value] of Object.entries(this.indexParam)) { indexParamStr += `, '${key}=${value}'`; } const query = ` CREATE TABLE IF NOT EXISTS ${this.database}.${this.table}( ${this.columnMap.id} String, ${this.columnMap.text} String, ${this.columnMap.vector} Array(Float32), ${this.columnMap.metadata} JSON, CONSTRAINT cons_vec_len CHECK length(${this.columnMap.vector}) = ${dim}, VECTOR INDEX vidx ${this.columnMap.vector} TYPE ${this.indexType}('metric_type=${this.metric}'${indexParamStr}) ) ENGINE = MergeTree ORDER BY ${this.columnMap.id} `; await this.client.exec({ query: "SET allow_experimental_object_type=1" }); await this.client.exec({ query: "SET output_format_json_named_tuples_as_objects = 1", }); await this.client.exec({ query }); this.isInitialized = true; } /** * Method to build an SQL query for inserting vectors and documents into * the MyScale database. * @param vectors The vectors to insert. * @param documents The documents to insert. * @returns The SQL query string. */ private buildInsertQuery(vectors: number[][], documents: Document[]): string { const columnsStr = Object.values(this.columnMap).join(", "); const data: string[] = []; for (let i = 0; i < vectors.length; i += 1) { const vector = vectors[i]; const document = documents[i]; const item = [ `'${uuid.v4()}'`, `'${this.escapeString(document.pageContent)}'`, `[${vector}]`, `'${JSON.stringify(document.metadata)}'`, ].join(", "); data.push(`(${item})`); } const dataStr = data.join(", "); return ` INSERT INTO TABLE ${this.database}.${this.table}(${columnsStr}) VALUES ${dataStr} `; } private escapeString(str: string): string { return str.replace(/\\/g, "\\\\").replace(/'/g, "\\'"); } /** * Method to build an SQL query for searching for similar vectors in the * MyScale database. * @param query The query vector. * @param k The number of similar vectors to return. * @param filter Optional filter for the search results. * @returns The SQL query string. */ private buildSearchQuery( query: number[], k: number, filter?: MyScaleFilter ): string { const order = this.metric === "IP" ? "DESC" : "ASC"; const whereStr = filter ? `PREWHERE ${filter.whereStr}` : ""; return ` SELECT ${this.columnMap.text} AS text, ${this.columnMap.metadata} AS metadata, dist FROM ${this.database}.${this.table} ${whereStr} ORDER BY distance(${this.columnMap.vector}, [${query}]) AS dist ${order} LIMIT ${k} `; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/vercel_postgres.ts
import { type VercelPool, type VercelPoolClient, type VercelPostgresPoolConfig, createPool, } from "@vercel/postgres"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; type Metadata = Record<string, string | number | Record<"in", string[]>>; /** * Interface that defines the arguments required to create a * `VercelPostgres` instance. It includes Postgres connection options, * table name, filter, and verbosity level. */ export interface VercelPostgresFields { pool: VercelPool; client: VercelPoolClient; tableName?: string; columns?: { idColumnName?: string; vectorColumnName?: string; contentColumnName?: string; metadataColumnName?: string; }; filter?: Metadata; verbose?: boolean; } /** * Class that provides an interface to a Vercel Postgres vector database. It * extends the `VectorStore` base class and implements methods for adding * documents and vectors and performing similarity searches. */ export class VercelPostgres extends VectorStore { declare FilterType: Metadata; tableName: string; idColumnName: string; vectorColumnName: string; contentColumnName: string; metadataColumnName: string; filter?: Metadata; _verbose?: boolean; pool: VercelPool; client: VercelPoolClient; _vectorstoreType(): string { return "vercel"; } constructor(embeddings: EmbeddingsInterface, config: VercelPostgresFields) { super(embeddings, config); this.tableName = config.tableName ?? "langchain_vectors"; this.filter = config.filter; this.vectorColumnName = config.columns?.vectorColumnName ?? "embedding"; this.contentColumnName = config.columns?.contentColumnName ?? "text"; this.idColumnName = config.columns?.idColumnName ?? "id"; this.metadataColumnName = config.columns?.metadataColumnName ?? "metadata"; this.pool = config.pool; this.client = config.client; this._verbose = getEnvironmentVariable("LANGCHAIN_VERBOSE") === "true" ?? !!config.verbose; } /** * Static method to create a new `VercelPostgres` instance from a * connection. It creates a table if one does not exist, and calls * `connect` to return a new instance of `VercelPostgres`. * * @param embeddings - Embeddings instance. * @param fields - `VercelPostgres` configuration options. * @returns A new instance of `VercelPostgres`. */ static async initialize( embeddings: EmbeddingsInterface, config?: Partial<VercelPostgresFields> & { postgresConnectionOptions?: VercelPostgresPoolConfig; } ): Promise<VercelPostgres> { // Default maxUses to 1 for edge environments: // https://github.com/vercel/storage/tree/main/packages/postgres#a-note-on-edge-environments const pool = config?.pool ?? createPool({ maxUses: 1, ...config?.postgresConnectionOptions }); const client = config?.client ?? (await pool.connect()); const postgresqlVectorStore = new VercelPostgres(embeddings, { ...config, pool, client, }); await postgresqlVectorStore.ensureTableInDatabase(); return postgresqlVectorStore; } /** * Method to add documents to the vector store. It converts the documents into * vectors, and adds them to the store. * * @param documents - Array of `Document` instances. * @returns Promise that resolves when the documents have been added. */ async addDocuments( documents: Document[], options?: { ids?: string[] } ): Promise<string[]> { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents, options ); } /** * Generates the SQL placeholders for a specific row at the provided index. * * @param index - The index of the row for which placeholders need to be generated. * @returns The SQL placeholders for the row values. */ protected generatePlaceholderForRowAt( // eslint-disable-next-line @typescript-eslint/no-explicit-any row: (string | Record<string, any>)[], index: number ): string { const base = index * row.length; return `(${row.map((_, j) => `$${base + 1 + j}`)})`; } /** * Constructs the SQL query for inserting rows into the specified table. * * @param rows - The rows of data to be inserted, consisting of values and records. * @param chunkIndex - The starting index for generating query placeholders based on chunk positioning. * @returns The complete SQL INSERT INTO query string. */ protected async runInsertQuery( // eslint-disable-next-line @typescript-eslint/no-explicit-any rows: (string | Record<string, any>)[][], useIdColumn: boolean ) { const values = rows.map((row, j) => this.generatePlaceholderForRowAt(row, j) ); const flatValues = rows.flat(); return this.client.query( ` INSERT INTO ${this.tableName} ( ${useIdColumn ? `${this.idColumnName},` : ""} ${this.contentColumnName}, ${this.vectorColumnName}, ${this.metadataColumnName} ) VALUES ${values.join(", ")} ON CONFLICT (${this.idColumnName}) DO UPDATE SET ${this.contentColumnName} = EXCLUDED.${this.contentColumnName}, ${this.vectorColumnName} = EXCLUDED.${this.vectorColumnName}, ${this.metadataColumnName} = EXCLUDED.${this.metadataColumnName} RETURNING ${this.idColumnName}`, flatValues ); } /** * Method to add vectors to the vector store. It converts the vectors into * rows and inserts them into the database. * * @param vectors - Array of vectors. * @param documents - Array of `Document` instances. * @returns Promise that resolves when the vectors have been added. */ async addVectors( vectors: number[][], documents: Document[], options?: { ids?: string[] } ): Promise<string[]> { if (options?.ids !== undefined && options?.ids.length !== vectors.length) { throw new Error( `If provided, the length of "ids" must be the same as the number of vectors.` ); } const rows = vectors.map((embedding, idx) => { const embeddingString = `[${embedding.join(",")}]`; const row = [ documents[idx].pageContent, embeddingString, documents[idx].metadata, ]; if (options?.ids) { return [options.ids[idx], ...row]; } return row; }); const chunkSize = 500; const ids = []; for (let i = 0; i < rows.length; i += chunkSize) { const chunk = rows.slice(i, i + chunkSize); try { const result = await this.runInsertQuery( chunk, options?.ids !== undefined ); ids.push(...result.rows.map((row) => row[this.idColumnName])); } catch (e) { console.error(e); throw new Error(`Error inserting: ${(e as Error).message}`); } } return ids; } /** * Method to perform a similarity search in the vector store. It returns * the `k` most similar documents to the query vector, along with their * similarity scores. * * @param query - Query vector. * @param k - Number of most similar documents to return. * @param filter - Optional filter to apply to the search. * @returns Promise that resolves with an array of tuples, each containing a `Document` and its similarity score. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[Document, number][]> { const embeddingString = `[${query.join(",")}]`; const _filter: this["FilterType"] = filter ?? {}; const whereClauses = []; const values = [embeddingString, k]; let paramCount = values.length; for (const [key, value] of Object.entries(_filter)) { if (typeof value === "object" && value !== null) { const currentParamCount = paramCount; const placeholders = value.in .map((_, index) => `$${currentParamCount + index + 1}`) .join(","); whereClauses.push( `${this.metadataColumnName}->>'${key}' IN (${placeholders})` ); values.push(...value.in); paramCount += value.in.length; } else { paramCount += 1; whereClauses.push( `${this.metadataColumnName}->>'${key}' = $${paramCount}` ); values.push(value); } } const whereClause = whereClauses.length ? `WHERE ${whereClauses.join(" AND ")}` : ""; const queryString = ` SELECT *, ${this.vectorColumnName} <=> $1 as "_distance" FROM ${this.tableName} ${whereClause} ORDER BY "_distance" ASC LIMIT $2;`; const documents = (await this.client.query(queryString, values)).rows; const results = [] as [Document, number][]; for (const doc of documents) { if (doc._distance != null && doc[this.contentColumnName] != null) { const document = new Document({ pageContent: doc[this.contentColumnName], metadata: doc[this.metadataColumnName], }); results.push([document, doc._distance]); } } return results; } async delete(params: { ids?: string[]; deleteAll?: boolean }): Promise<void> { if (params.ids !== undefined) { await this.client.query( `DELETE FROM ${this.tableName} WHERE ${ this.idColumnName } IN (${params.ids.map((_, idx) => `$${idx + 1}`)})`, params.ids ); } else if (params.deleteAll) { await this.client.query(`TRUNCATE TABLE ${this.tableName}`); } } /** * Method to ensure the existence of the table in the database. It creates * the table if it does not already exist. * * @returns Promise that resolves when the table has been ensured. */ async ensureTableInDatabase(): Promise<void> { await this.client.query(`CREATE EXTENSION IF NOT EXISTS vector;`); await this.client.query(`CREATE EXTENSION IF NOT EXISTS "uuid-ossp";`); await this.client.query(`CREATE TABLE IF NOT EXISTS "${this.tableName}" ( "${this.idColumnName}" uuid NOT NULL DEFAULT uuid_generate_v4() PRIMARY KEY, "${this.contentColumnName}" text, "${this.metadataColumnName}" jsonb, "${this.vectorColumnName}" vector );`); } /** * Static method to create a new `VercelPostgres` instance from an * array of texts and their metadata. It converts the texts into * `Document` instances and adds them to the store. * * @param texts - Array of texts. * @param metadatas - Array of metadata objects or a single metadata object. * @param embeddings - Embeddings instance. * @param fields - `VercelPostgres` configuration options. * @returns Promise that resolves with a new instance of `VercelPostgres`. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig?: Partial<VercelPostgresFields> & { postgresConnectionOptions?: VercelPostgresPoolConfig; } ): Promise<VercelPostgres> { const docs = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return this.fromDocuments(docs, embeddings, dbConfig); } /** * Static method to create a new `VercelPostgres` instance from an * array of `Document` instances. It adds the documents to the store. * * @param docs - Array of `Document` instances. * @param embeddings - Embeddings instance. * @param fields - `VercelPostgres` configuration options. * @returns Promise that resolves with a new instance of `VercelPostgres`. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig?: Partial<VercelPostgresFields> & { postgresConnectionOptions?: VercelPostgresPoolConfig; } ): Promise<VercelPostgres> { const instance = await this.initialize(embeddings, dbConfig); await instance.addDocuments(docs); return instance; } /** * Closes all the clients in the pool and terminates the pool. * * @returns Promise that resolves when all clients are closed and the pool is terminated. */ async end(): Promise<void> { await this.client?.release(); return this.pool.end(); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/astradb.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ import * as uuid from "uuid"; import { Collection, DataAPIClient, CreateCollectionOptions, Db, InsertManyError, } from "@datastax/astra-db-ts"; import { AsyncCaller, AsyncCallerParams, } from "@langchain/core/utils/async_caller"; import { Document } from "@langchain/core/documents"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { maximalMarginalRelevance } from "@langchain/core/utils/math"; import { MaxMarginalRelevanceSearchOptions, VectorStore, } from "@langchain/core/vectorstores"; export type CollectionFilter = Record<string, unknown>; export interface AstraLibArgs extends AsyncCallerParams { token: string; endpoint: string; collection: string; namespace?: string; idKey?: string; contentKey?: string; skipCollectionProvisioning?: boolean; collectionOptions?: CreateCollectionOptions<any>; batchSize?: number; } export type AstraDeleteParams = { ids: string[]; }; export class AstraDBVectorStore extends VectorStore { declare FilterType: CollectionFilter; private astraDBClient: Db; private collectionName: string; private collection: Collection | undefined; private collectionOptions: CreateCollectionOptions<any> | undefined; private readonly idKey: string; private readonly contentKey: string; // if undefined the entirety of the content aside from the id and embedding will be stored as content caller: AsyncCaller; private readonly skipCollectionProvisioning: boolean; _vectorstoreType(): string { return "astradb"; } constructor(embeddings: EmbeddingsInterface, args: AstraLibArgs) { super(embeddings, args); const { token, endpoint, collection, collectionOptions, namespace, idKey, contentKey, skipCollectionProvisioning, ...callerArgs } = args; const dataAPIClient = new DataAPIClient(token, { caller: ["langchainjs"] }); this.astraDBClient = dataAPIClient.db(endpoint, { namespace }); this.skipCollectionProvisioning = skipCollectionProvisioning ?? false; if (this.skipCollectionProvisioning && collectionOptions) { throw new Error( "If 'skipCollectionProvisioning' has been set to true, 'collectionOptions' must not be defined" ); } this.collectionName = collection; this.collectionOptions = AstraDBVectorStore.applyCollectionOptionsDefaults(collectionOptions); this.idKey = idKey ?? "_id"; this.contentKey = contentKey ?? "text"; this.caller = new AsyncCaller(callerArgs); if (args.batchSize) { console.warn( "[WARNING]: `batchSize` is deprecated, and no longer has any effect.\n`astra-db-ts` > 1.0.0 handles this internally." ); } } private static applyCollectionOptionsDefaults( fromUser?: CreateCollectionOptions<any> ): CreateCollectionOptions<any> { const copy: CreateCollectionOptions<any> = fromUser ? { ...fromUser } : {}; if (copy.checkExists === undefined) { copy.checkExists = false; } if (copy.indexing === undefined) { // same default as langchain python AstraDBVectorStore. // this enables to create the collection in python/ts and use it in ts/python with default options. copy.indexing = { allow: ["metadata"] }; } return copy; } /** * Create a new collection in your Astra DB vector database and then connects to it. * If the collection already exists, it will connect to it as well. * * @returns Promise that resolves if connected to the collection. */ async initialize(): Promise<void> { if (!this.skipCollectionProvisioning) { await this.astraDBClient.createCollection( this.collectionName, this.collectionOptions ); } this.collection = await this.astraDBClient.collection(this.collectionName); console.debug("Connected to Astra DB collection"); } /** * Method to save vectors to AstraDB. * * @param vectors Vectors to save. * @param documents The documents associated with the vectors. * @returns Promise that resolves when the vectors have been added. */ async addVectors( vectors: number[][], documents: Document[], options?: string[] ) { if (!this.collection) { throw new Error("Must connect to a collection before adding vectors"); } const docs = vectors.map((embedding, idx) => ({ [this.idKey]: options?.[idx] ?? uuid.v4(), [this.contentKey]: documents[idx].pageContent, $vector: embedding, ...documents[idx].metadata, })); let insertResults; const isInsertManyError = (error: any): error is InsertManyError => error.name === "InsertManyError"; try { insertResults = await this.collection.insertMany(docs, { ordered: false, }); } catch (error) { if (isInsertManyError(error)) { insertResults = error.partialResult; } else { throw error; } } const insertedIds = insertResults.insertedIds as string[]; if (insertedIds.length !== docs.length) { const missingDocs = docs.filter( (doc) => !insertedIds.includes(doc[this.idKey]) ); for (let i = 0; i < missingDocs.length; i += 1) { await this.caller.call(async () => { await this.collection?.replaceOne( { [this.idKey]: missingDocs[i][this.idKey] }, missingDocs[i] ); }); } } } /** * Method that adds documents to AstraDB. * * @param documents Array of documents to add to AstraDB. * @param options Optional ids for the documents. * @returns Promise that resolves the documents have been added. */ async addDocuments(documents: Document[], options?: string[]) { if (!this.collection) { throw new Error("Must connect to a collection before adding vectors"); } return this.addVectors( await this.embeddings.embedDocuments(documents.map((d) => d.pageContent)), documents, options ); } /** * Method that deletes documents from AstraDB. * * @param params AstraDeleteParameters for the delete. * @returns Promise that resolves when the documents have been deleted. */ async delete(params: AstraDeleteParams) { if (!this.collection) { throw new Error("Must connect to a collection before deleting"); } await this.collection.deleteMany({ [this.idKey]: { $in: params.ids } }); console.log(`Deleted ${params.ids.length} documents`); } /** * Method that performs a similarity search in AstraDB and returns and similarity scores. * * @param query Query vector for the similarity search. * @param k Number of top results to return. * @param filter Optional filter to apply to the search. * @returns Promise that resolves with an array of documents and their scores. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: CollectionFilter ): Promise<[Document, number][]> { if (!this.collection) { throw new Error("Must connect to a collection before adding vectors"); } const cursor = await this.collection.find(filter ?? {}, { sort: { $vector: query }, limit: k, includeSimilarity: true, }); const results: [Document, number][] = []; for await (const row of cursor) { const { $similarity: similarity, [this.contentKey]: content, ...metadata } = row; const doc = new Document({ pageContent: content as string, metadata, }); results.push([doc, similarity as number]); } return results; } /** * Return documents selected using the maximal marginal relevance. * Maximal marginal relevance optimizes for similarity to the query AND diversity * among selected documents. * * @param {string} query - Text to look up documents similar to. * @param {number} options.k - Number of documents to return. * @param {number} options.fetchK - Number of documents to fetch before passing to the MMR algorithm. * @param {number} options.lambda - Number between 0 and 1 that determines the degree of diversity among the results, * where 0 corresponds to maximum diversity and 1 to minimum diversity. * @param {CollectionFilter} options.filter - Optional filter * * @returns {Promise<Document[]>} - List of documents selected by maximal marginal relevance. */ async maxMarginalRelevanceSearch( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]> ): Promise<Document[]> { if (!this.collection) { throw new Error("Must connect to a collection before adding vectors"); } const queryEmbedding = await this.embeddings.embedQuery(query); const cursor = await this.collection.find(options.filter ?? {}, { sort: { $vector: queryEmbedding }, limit: options.k, includeSimilarity: true, }); const results = (await cursor.toArray()) ?? []; const embeddingList: number[][] = results.map( (row) => row.$vector as number[] ); const mmrIndexes = maximalMarginalRelevance( queryEmbedding, embeddingList, options.lambda, options.k ); const topMmrMatches = mmrIndexes.map((idx) => results[idx]); const docs: Document[] = []; topMmrMatches.forEach((match) => { const { [this.contentKey]: content, ...metadata } = match; const doc: Document = { pageContent: content as string, metadata, }; docs.push(doc); }); return docs; } /** * Static method to create an instance of AstraDBVectorStore from texts. * * @param texts The texts to use. * @param metadatas The metadata associated with the texts. * @param embeddings The embeddings to use. * @param dbConfig The arguments for the AstraDBVectorStore. * @returns Promise that resolves with a new instance of AstraDBVectorStore. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: AstraLibArgs ): Promise<AstraDBVectorStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const doc = new Document({ pageContent: texts[i], metadata, }); docs.push(doc); } return AstraDBVectorStore.fromDocuments(docs, embeddings, dbConfig); } /** * Static method to create an instance of AstraDBVectorStore from documents. * * @param docs The Documents to use. * @param embeddings The embeddings to use. * @param dbConfig The arguments for the AstraDBVectorStore. * @returns Promise that resolves with a new instance of AstraDBVectorStore. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: AstraLibArgs ): Promise<AstraDBVectorStore> { const instance = new this(embeddings, dbConfig); await instance.initialize(); await instance.addDocuments(docs); return instance; } /** * Static method to create an instance of AstraDBVectorStore from an existing index. * * @param embeddings The embeddings to use. * @param dbConfig The arguments for the AstraDBVectorStore. * @returns Promise that resolves with a new instance of AstraDBVectorStore. */ static async fromExistingIndex( embeddings: EmbeddingsInterface, dbConfig: AstraLibArgs ): Promise<AstraDBVectorStore> { const instance = new this(embeddings, dbConfig); await instance.initialize(); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/upstash.ts
import * as uuid from "uuid"; import { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Index as UpstashIndex, type QueryResult } from "@upstash/vector"; import { Document, DocumentInterface } from "@langchain/core/documents"; import { chunkArray } from "@langchain/core/utils/chunk_array"; import { FakeEmbeddings } from "@langchain/core/utils/testing"; import { AsyncCaller, AsyncCallerParams, } from "@langchain/core/utils/async_caller"; /** * This interface defines the arguments for the UpstashVectorStore class. */ export interface UpstashVectorLibArgs extends AsyncCallerParams { index: UpstashIndex; filter?: string; namespace?: string; } // eslint-disable-next-line @typescript-eslint/no-explicit-any export type UpstashMetadata = Record<string, any>; export type UpstashQueryMetadata = UpstashMetadata & { // eslint-disable-next-line @typescript-eslint/no-explicit-any _pageContentLC: any; }; /** * Type that defines the parameters for the delete method. * It can either contain the target id(s) or the deleteAll config to reset all the vectors. */ export type UpstashDeleteParams = | { ids: string | string[]; deleteAll?: never; } | { deleteAll: boolean; ids?: never }; const CONCURRENT_UPSERT_LIMIT = 1000; /** * The main class that extends the 'VectorStore' class. It provides * methods for interacting with Upstash index, such as adding documents, * deleting documents, performing similarity search and more. */ export class UpstashVectorStore extends VectorStore { declare FilterType: string; index: UpstashIndex; caller: AsyncCaller; useUpstashEmbeddings?: boolean; filter?: this["FilterType"]; namespace?: string; _vectorstoreType(): string { return "upstash"; } constructor(embeddings: EmbeddingsInterface, args: UpstashVectorLibArgs) { super(embeddings, args); // Special case where the embeddings instance is a FakeEmbeddings instance. In this case, we need to disable "instanceof" rule. // eslint-disable-next-line no-instanceof/no-instanceof if (embeddings instanceof FakeEmbeddings) { this.useUpstashEmbeddings = true; } const { index, namespace, ...asyncCallerArgs } = args; this.index = index; this.caller = new AsyncCaller(asyncCallerArgs); this.filter = args.filter; this.namespace = namespace; } /** * This method adds documents to Upstash database. Documents are first converted to vectors * using the provided embeddings instance, and then upserted to the database. * @param documents Array of Document objects to be added to the database. * @param options Optional object containing array of ids for the documents. * @returns Promise that resolves with the ids of the provided documents when the upsert operation is done. */ async addDocuments( documents: DocumentInterface[], options?: { ids?: string[]; useUpstashEmbeddings?: boolean } ) { const texts = documents.map(({ pageContent }) => pageContent); if (this.useUpstashEmbeddings || options?.useUpstashEmbeddings) { return this._addData(documents, options); } const embeddings = await this.embeddings.embedDocuments(texts); return this.addVectors(embeddings, documents, options); } /** * This method adds the provided vectors to Upstash database. * @param vectors Array of vectors to be added to the Upstash database. * @param documents Array of Document objects, each associated with a vector. * @param options Optional object containing the array of ids foor the vectors. * @returns Promise that resolves with the ids of the provided documents when the upsert operation is done. */ async addVectors( vectors: number[][], documents: DocumentInterface[], options?: { ids?: string[] } ) { const documentIds = options?.ids ?? Array.from({ length: vectors.length }, () => uuid.v4()); const upstashVectors = vectors.map((vector, index) => { const metadata = { _pageContentLC: documents[index].pageContent, ...documents[index].metadata, }; const id = documentIds[index]; return { id, vector, metadata, }; }); const namespace = this.index.namespace(this.namespace ?? ""); const vectorChunks = chunkArray(upstashVectors, CONCURRENT_UPSERT_LIMIT); const batchRequests = vectorChunks.map((chunk) => this.caller.call(async () => namespace.upsert(chunk)) ); await Promise.all(batchRequests); return documentIds; } /** * This method adds the provided documents to Upstash database. The pageContent of the documents will be embedded by Upstash Embeddings. * @param documents Array of Document objects to be added to the Upstash database. * @param options Optional object containing the array of ids for the documents. * @returns Promise that resolves with the ids of the provided documents when the upsert operation is done. */ protected async _addData( documents: DocumentInterface[], options?: { ids?: string[] } ) { const documentIds = options?.ids ?? Array.from({ length: documents.length }, () => uuid.v4()); const upstashVectorsWithData = documents.map((document, index) => { const metadata = { _pageContentLC: documents[index].pageContent, ...documents[index].metadata, }; const id = documentIds[index]; return { id, data: document.pageContent, metadata, }; }); const namespace = this.index.namespace(this.namespace ?? ""); const vectorChunks = chunkArray( upstashVectorsWithData, CONCURRENT_UPSERT_LIMIT ); const batchRequests = vectorChunks.map((chunk) => this.caller.call(async () => namespace.upsert(chunk)) ); await Promise.all(batchRequests); return documentIds; } /** * This method deletes documents from the Upstash database. You can either * provide the target ids, or delete all vectors in the database. * @param params Object containing either array of ids of the documents or boolean deleteAll. * @returns Promise that resolves when the specified documents have been deleted from the database. */ async delete(params: UpstashDeleteParams): Promise<void> { const namespace = this.index.namespace(this.namespace ?? ""); if (params.deleteAll) { await namespace.reset(); } else if (params.ids) { await namespace.delete(params.ids); } } protected async _runUpstashQuery( query: number[] | string, k: number, filter?: this["FilterType"], options?: { includeVectors: boolean } ) { let queryResult: QueryResult<UpstashQueryMetadata>[] = []; const namespace = this.index.namespace(this.namespace ?? ""); if (typeof query === "string") { queryResult = await namespace.query<UpstashQueryMetadata>({ data: query, topK: k, includeMetadata: true, filter, ...options, }); } else { queryResult = await namespace.query<UpstashQueryMetadata>({ vector: query, topK: k, includeMetadata: true, filter, ...options, }); } return queryResult; } /** * This method performs a similarity search in the Upstash database * over the existing vectors. * @param query Query vector for the similarity search. * @param k The number of similar vectors to return as result. * @returns Promise that resolves with an array of tuples, each containing * Document object and similarity score. The length of the result will be * maximum of 'k' and vectors in the index. */ async similaritySearchVectorWithScore( query: number[] | string, k: number, filter?: this["FilterType"] ): Promise<[DocumentInterface, number][]> { const results = await this._runUpstashQuery(query, k, filter); const searchResult: [DocumentInterface, number][] = results.map((res) => { const { _pageContentLC, ...metadata } = (res.metadata ?? {}) as UpstashQueryMetadata; return [ new Document({ metadata, pageContent: _pageContentLC, }), res.score, ]; }); return searchResult; } /** * This method creates a new UpstashVector instance from an array of texts. * The texts are initially converted to Document instances and added to Upstash * database. * @param texts The texts to create the documents from. * @param metadatas The metadata values associated with the texts. * @param embeddings Embedding interface of choice, to create the text embeddings. * @param dbConfig Object containing the Upstash database configs. * @returns Promise that resolves with a new UpstashVector instance. */ static async fromTexts( texts: string[], metadatas: UpstashMetadata | UpstashMetadata[], embeddings: EmbeddingsInterface, dbConfig: UpstashVectorLibArgs ): Promise<UpstashVectorStore> { const docs: DocumentInterface[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDocument = new Document({ pageContent: texts[i], metadata, }); docs.push(newDocument); } return this.fromDocuments(docs, embeddings, dbConfig); } /** * This method creates a new UpstashVector instance from an array of Document instances. * @param docs The docs to be added to Upstash database. * @param embeddings Embedding interface of choice, to create the embeddings. * @param dbConfig Object containing the Upstash database configs. * @returns Promise that resolves with a new UpstashVector instance */ static async fromDocuments( docs: DocumentInterface[], embeddings: EmbeddingsInterface, dbConfig: UpstashVectorLibArgs ): Promise<UpstashVectorStore> { const instance = new this(embeddings, dbConfig); await instance.addDocuments(docs); return instance; } /** * This method creates a new UpstashVector instance from an existing index. * @param embeddings Embedding interface of the choice, to create the embeddings. * @param dbConfig Object containing the Upstash database configs. * @returns */ static async fromExistingIndex( embeddings: EmbeddingsInterface, dbConfig: UpstashVectorLibArgs ): Promise<UpstashVectorStore> { const instance = new this(embeddings, dbConfig); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/mongodb_atlas.ts
import type { Collection, Document as MongoDBDocument } from "mongodb"; import { MaxMarginalRelevanceSearchOptions, VectorStore, } from "@langchain/core/vectorstores"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { Document } from "@langchain/core/documents"; import { maximalMarginalRelevance } from "@langchain/core/utils/math"; import { AsyncCaller, AsyncCallerParams, } from "@langchain/core/utils/async_caller"; /** * @deprecated Install and import from the "@langchain/mongodb" integration package instead. * Type that defines the arguments required to initialize the * MongoDBAtlasVectorSearch class. It includes the MongoDB collection, * index name, text key, embedding key, primary key, and overwrite flag. * * @param collection MongoDB collection to store the vectors. * @param indexName A Collections Index Name. * @param textKey Corresponds to the plaintext of 'pageContent'. * @param embeddingKey Key to store the embedding under. * @param primaryKey The Key to use for upserting documents. */ export interface MongoDBAtlasVectorSearchLibArgs extends AsyncCallerParams { readonly collection: Collection<MongoDBDocument>; readonly indexName?: string; readonly textKey?: string; readonly embeddingKey?: string; readonly primaryKey?: string; } /** * @deprecated Install and import from the "@langchain/mongodb" integration package instead. * Type that defines the filter used in the * similaritySearchVectorWithScore and maxMarginalRelevanceSearch methods. * It includes pre-filter, post-filter pipeline, and a flag to include * embeddings. */ type MongoDBAtlasFilter = { preFilter?: MongoDBDocument; postFilterPipeline?: MongoDBDocument[]; includeEmbeddings?: boolean; } & MongoDBDocument; /** * @deprecated Install and import from the "@langchain/mongodb" integration package instead. * Class that is a wrapper around MongoDB Atlas Vector Search. It is used * to store embeddings in MongoDB documents, create a vector search index, * and perform K-Nearest Neighbors (KNN) search with an approximate * nearest neighbor algorithm. */ export class MongoDBAtlasVectorSearch extends VectorStore { declare FilterType: MongoDBAtlasFilter; private readonly collection: Collection<MongoDBDocument>; private readonly indexName: string; private readonly textKey: string; private readonly embeddingKey: string; private readonly primaryKey: string; private caller: AsyncCaller; _vectorstoreType(): string { return "mongodb_atlas"; } constructor( embeddings: EmbeddingsInterface, args: MongoDBAtlasVectorSearchLibArgs ) { super(embeddings, args); this.collection = args.collection; this.indexName = args.indexName ?? "default"; this.textKey = args.textKey ?? "text"; this.embeddingKey = args.embeddingKey ?? "embedding"; this.primaryKey = args.primaryKey ?? "_id"; this.caller = new AsyncCaller(args); } /** * Method to add vectors and their corresponding documents to the MongoDB * collection. * @param vectors Vectors to be added. * @param documents Corresponding documents to be added. * @returns Promise that resolves when the vectors and documents have been added. */ async addVectors( vectors: number[][], documents: Document[], options?: { ids?: string[] } ) { const docs = vectors.map((embedding, idx) => ({ [this.textKey]: documents[idx].pageContent, [this.embeddingKey]: embedding, ...documents[idx].metadata, })); if (options?.ids === undefined) { await this.collection.insertMany(docs); } else { if (options.ids.length !== vectors.length) { throw new Error( `If provided, "options.ids" must be an array with the same length as "vectors".` ); } const { ids } = options; for (let i = 0; i < docs.length; i += 1) { await this.caller.call(async () => { await this.collection.updateOne( { [this.primaryKey]: ids[i] }, { $set: { [this.primaryKey]: ids[i], ...docs[i] } }, { upsert: true } ); }); } } return options?.ids ?? docs.map((doc) => doc[this.primaryKey]); } /** * Method to add documents to the MongoDB collection. It first converts * the documents to vectors using the embeddings and then calls the * addVectors method. * @param documents Documents to be added. * @returns Promise that resolves when the documents have been added. */ async addDocuments(documents: Document[], options?: { ids?: string[] }) { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents, options ); } /** * Method that performs a similarity search on the vectors stored in the * MongoDB collection. It returns a list of documents and their * corresponding similarity scores. * @param query Query vector for the similarity search. * @param k Number of nearest neighbors to return. * @param filter Optional filter to be applied. * @returns Promise that resolves to a list of documents and their corresponding similarity scores. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: MongoDBAtlasFilter ): Promise<[Document, number][]> { const postFilterPipeline = filter?.postFilterPipeline ?? []; const preFilter: MongoDBDocument | undefined = filter?.preFilter || filter?.postFilterPipeline || filter?.includeEmbeddings ? filter.preFilter : filter; const removeEmbeddingsPipeline = !filter?.includeEmbeddings ? [ { $project: { [this.embeddingKey]: 0, }, }, ] : []; const pipeline: MongoDBDocument[] = [ { $vectorSearch: { queryVector: MongoDBAtlasVectorSearch.fixArrayPrecision(query), index: this.indexName, path: this.embeddingKey, limit: k, numCandidates: 10 * k, ...(preFilter && { filter: preFilter }), }, }, { $set: { score: { $meta: "vectorSearchScore" }, }, }, ...removeEmbeddingsPipeline, ...postFilterPipeline, ]; const results = this.collection .aggregate(pipeline) .map<[Document, number]>((result) => { const { score, [this.textKey]: text, ...metadata } = result; return [new Document({ pageContent: text, metadata }), score]; }); return results.toArray(); } /** * Return documents selected using the maximal marginal relevance. * Maximal marginal relevance optimizes for similarity to the query AND diversity * among selected documents. * * @param {string} query - Text to look up documents similar to. * @param {number} options.k - Number of documents to return. * @param {number} options.fetchK=20- Number of documents to fetch before passing to the MMR algorithm. * @param {number} options.lambda=0.5 - Number between 0 and 1 that determines the degree of diversity among the results, * where 0 corresponds to maximum diversity and 1 to minimum diversity. * @param {MongoDBAtlasFilter} options.filter - Optional Atlas Search operator to pre-filter on document fields * or post-filter following the knnBeta search. * * @returns {Promise<Document[]>} - List of documents selected by maximal marginal relevance. */ async maxMarginalRelevanceSearch( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]> ): Promise<Document[]> { const { k, fetchK = 20, lambda = 0.5, filter } = options; const queryEmbedding = await this.embeddings.embedQuery(query); // preserve the original value of includeEmbeddings const includeEmbeddingsFlag = options.filter?.includeEmbeddings || false; // update filter to include embeddings, as they will be used in MMR const includeEmbeddingsFilter = { ...filter, includeEmbeddings: true, }; const resultDocs = await this.similaritySearchVectorWithScore( MongoDBAtlasVectorSearch.fixArrayPrecision(queryEmbedding), fetchK, includeEmbeddingsFilter ); const embeddingList = resultDocs.map( (doc) => doc[0].metadata[this.embeddingKey] ); const mmrIndexes = maximalMarginalRelevance( queryEmbedding, embeddingList, lambda, k ); return mmrIndexes.map((idx) => { const doc = resultDocs[idx][0]; // remove embeddings if they were not requested originally if (!includeEmbeddingsFlag) { delete doc.metadata[this.embeddingKey]; } return doc; }); } /** * Static method to create an instance of MongoDBAtlasVectorSearch from a * list of texts. It first converts the texts to vectors and then adds * them to the MongoDB collection. * @param texts List of texts to be converted to vectors. * @param metadatas Metadata for the texts. * @param embeddings Embeddings to be used for conversion. * @param dbConfig Database configuration for MongoDB Atlas. * @returns Promise that resolves to a new instance of MongoDBAtlasVectorSearch. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: MongoDBAtlasVectorSearchLibArgs & { ids?: string[] } ): Promise<MongoDBAtlasVectorSearch> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return MongoDBAtlasVectorSearch.fromDocuments(docs, embeddings, dbConfig); } /** * Static method to create an instance of MongoDBAtlasVectorSearch from a * list of documents. It first converts the documents to vectors and then * adds them to the MongoDB collection. * @param docs List of documents to be converted to vectors. * @param embeddings Embeddings to be used for conversion. * @param dbConfig Database configuration for MongoDB Atlas. * @returns Promise that resolves to a new instance of MongoDBAtlasVectorSearch. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: MongoDBAtlasVectorSearchLibArgs & { ids?: string[] } ): Promise<MongoDBAtlasVectorSearch> { const instance = new this(embeddings, dbConfig); await instance.addDocuments(docs, { ids: dbConfig.ids }); return instance; } /** * Static method to fix the precision of the array that ensures that * every number in this array is always float when casted to other types. * This is needed since MongoDB Atlas Vector Search does not cast integer * inside vector search to float automatically. * This method shall introduce a hint of error but should be safe to use * since introduced error is very small, only applies to integer numbers * returned by embeddings, and most embeddings shall not have precision * as high as 15 decimal places. * @param array Array of number to be fixed. * @returns */ static fixArrayPrecision(array: number[]) { return array.map((value) => { if (Number.isInteger(value)) { return value + 0.000000000000001; } return value; }); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/azure_aisearch.ts
import * as uuid from "uuid"; import { SearchClient, SearchIndexClient, AzureKeyCredential, IndexingResult, SearchIndex, SearchIndexingBufferedSender, VectorFilterMode, } from "@azure/search-documents"; import type { KeyCredential, TokenCredential } from "@azure/core-auth"; import { MaxMarginalRelevanceSearchOptions, VectorStore, } from "@langchain/core/vectorstores"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { Document } from "@langchain/core/documents"; import { maximalMarginalRelevance } from "@langchain/core/utils/math"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; /** * Azure AI Search query type. */ export const AzureAISearchQueryType = { /** Vector search. */ Similarity: "similarity", /** Hybrid full text and vector search. */ SimilarityHybrid: "similarity_hybrid", /** Hybrid full text and vector search with semantic ranking. */ SemanticHybrid: "semantic_hybrid", } as const; /** * Azure AI Search query type. */ export type AzureAISearchQueryType = (typeof AzureAISearchQueryType)[keyof typeof AzureAISearchQueryType]; /** * Azure AI Search settings. */ export interface AzureAISearchQueryOptions { readonly type?: AzureAISearchQueryType; readonly semanticConfigurationName?: string; } /** * Configuration options for the `AzureAISearchStore` constructor. */ export interface AzureAISearchConfig { readonly client?: SearchClient<AzureAISearchDocument>; readonly indexName?: string; readonly endpoint?: string; readonly key?: string; readonly credentials?: KeyCredential | TokenCredential; readonly search?: AzureAISearchQueryOptions; } /** * Azure AI Search options metadata schema. * If yout want to add custom data, use the attributes property. */ export type AzureAISearchDocumentMetadata = { source: string; attributes?: Array<{ key: string; value: string }>; embedding?: number[]; }; /** * Azure AI Search indexed document. */ export type AzureAISearchDocument = { id: string; content: string; content_vector: number[]; metadata: AzureAISearchDocumentMetadata; }; /** * Azure AI Search options for adding documents. */ export type AzureAISearchAddDocumentsOptions = { ids?: string[]; }; /** * Azure AI Search filter type. */ export type AzureAISearchFilterType = { /** OData filter. */ filterExpression?: string; /** Determines whether or not filters are applied before or after the vector search is performed. */ vectorFilterMode?: VectorFilterMode; /** Determines whether or not to include the embeddings in the search results. */ includeEmbeddings?: boolean; }; const USER_AGENT_PREFIX = "langchainjs-azure-aisearch"; const DEFAULT_FIELD_ID = "id"; const DEFAULT_FIELD_CONTENT = "content"; const DEFAULT_FIELD_CONTENT_VECTOR = "content_vector"; const DEFAULT_FIELD_METADATA = "metadata"; const DEFAULT_FIELD_METADATA_SOURCE = "source"; const DEFAULT_FIELD_METADATA_ATTRS = "attributes"; /** * Azure AI Search vector store. * To use this, you should have: * - the `@azure/search-documents` NPM package installed * - an endpoint and key to the Azure AI Search instance * * If you directly provide a `SearchClient` instance, you need to ensure that * an index has been created. When using and endpoint and key, the index will * be created automatically if it does not exist. */ export class AzureAISearchVectorStore extends VectorStore { declare FilterType: AzureAISearchFilterType; get lc_secrets(): { [key: string]: string } { return { endpoint: "AZURE_AISEARCH_ENDPOINT", key: "AZURE_AISEARCH_KEY", }; } _vectorstoreType(): string { return "azure_aisearch"; } private readonly initPromise: Promise<void>; private readonly client: SearchClient<AzureAISearchDocument>; private readonly indexName: string; private readonly options: AzureAISearchQueryOptions; constructor(embeddings: EmbeddingsInterface, config: AzureAISearchConfig) { super(embeddings, config); const endpoint = config.endpoint ?? getEnvironmentVariable("AZURE_AISEARCH_ENDPOINT"); const key = config.key ?? getEnvironmentVariable("AZURE_AISEARCH_KEY"); let { credentials } = config; if (!config.client && (!endpoint || (!key && !credentials))) { throw new Error( "Azure AI Search client or endpoint and key/credentials must be set." ); } this.indexName = config.indexName ?? "vectorsearch"; if (!config.client) { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion credentials ??= new AzureKeyCredential(key!); // eslint-disable-next-line @typescript-eslint/no-non-null-assertion this.client = new SearchClient(endpoint!, this.indexName, credentials, { userAgentOptions: { userAgentPrefix: USER_AGENT_PREFIX }, }); // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const indexClient = new SearchIndexClient(endpoint!, credentials, { userAgentOptions: { userAgentPrefix: USER_AGENT_PREFIX }, }); // Start initialization, but don't wait for it to finish here this.initPromise = this.ensureIndexExists(indexClient).catch((error) => { console.error( "Error during Azure AI Search index initialization:", error ); }); } else { this.client = config.client; } this.options = config.search ?? {}; this.embeddings = embeddings; } /** * Removes specified documents from the AzureAISearchVectorStore using IDs or a filter. * @param params Object that includes either an array of IDs or a filter for the data to be deleted. * @returns A promise that resolves when the documents have been removed. */ async delete(params: { ids?: string | string[]; filter?: AzureAISearchFilterType; }) { if (!params.ids && !params.filter) { throw new Error( `Azure AI Search delete requires either "ids" or "filter" to be set in the params object` ); } await this.initPromise; if (params.ids) { await this.deleteById(params.ids); } if (params.filter) { await this.deleteMany(params.filter); } } /** * Removes specified documents from the AzureAISearchVectorStore using a filter. * @param filter Filter options to find documents to delete. * @returns A promise that resolves when the documents have been removed. */ private async deleteMany( filter: AzureAISearchFilterType ): Promise<IndexingResult[]> { if (!filter.filterExpression) { throw new Error( `Azure AI Search deleteMany requires "filterExpression" to be set in the filter object` ); } const { results } = await this.client.search("*", { filter: filter.filterExpression, }); const docs: AzureAISearchDocument[] = []; for await (const item of results) { docs.push(item.document); } const deleteResults: IndexingResult[] = []; const bufferedClient = new SearchIndexingBufferedSender<AzureAISearchDocument>( this.client, (entity) => entity.id ); bufferedClient.on("batchSucceeded", (response) => { deleteResults.push(...response.results); }); bufferedClient.on("batchFailed", (response) => { throw new Error( `Azure AI Search deleteDocuments batch failed: ${response}` ); }); await bufferedClient.deleteDocuments(docs); await bufferedClient.flush(); await bufferedClient.dispose(); return deleteResults; } /** * Removes specified documents from the AzureAISearchVectorStore. * @param ids IDs of the documents to be removed. * @returns A promise that resolves when the documents have been removed. */ private async deleteById(ids: string | string[]): Promise<IndexingResult[]> { const docsIds = Array.isArray(ids) ? ids : [ids]; const docs: { id: string }[] = docsIds.map((id) => ({ id })); const deleteResults: IndexingResult[] = []; const bufferedClient = new SearchIndexingBufferedSender<{ id: string }>( this.client, (entity) => entity.id ); bufferedClient.on("batchSucceeded", (response) => { deleteResults.push(...response.results); }); bufferedClient.on("batchFailed", (response) => { throw new Error( `Azure AI Search deleteDocuments batch failed: ${response}` ); }); await bufferedClient.deleteDocuments(docs); await bufferedClient.flush(); await bufferedClient.dispose(); return deleteResults; } /** * Adds documents to the AzureAISearchVectorStore. * @param documents The documents to add. * @param options Options for adding documents. * @returns A promise that resolves to the ids of the added documents. */ async addDocuments( documents: Document[], options?: AzureAISearchAddDocumentsOptions ) { const texts = documents.map(({ pageContent }) => pageContent); const embeddings: number[][] = await this.embeddings.embedDocuments(texts); const results = await this.addVectors(embeddings, documents, options); return results; } /** * Adds vectors to the AzureAISearchVectorStore. * @param vectors Vectors to be added. * @param documents Corresponding documents to be added. * @param options Options for adding documents. * @returns A promise that resolves to the ids of the added documents. */ async addVectors( vectors: number[][], documents: Document[], options?: AzureAISearchAddDocumentsOptions ): Promise<string[]> { const ids = options?.ids ?? documents.map(() => uuid.v4()); const entities: AzureAISearchDocument[] = documents.map((doc, idx) => ({ id: ids[idx], content: doc.pageContent, content_vector: vectors[idx], metadata: { source: doc.metadata?.source, attributes: doc.metadata?.attributes ?? [], }, })); await this.initPromise; const bufferedClient = new SearchIndexingBufferedSender<AzureAISearchDocument>( this.client, (entity) => entity.id ); bufferedClient.on("batchFailed", (response) => { throw new Error( `Azure AI Search uploadDocuments batch failed: ${response}` ); }); await bufferedClient.uploadDocuments(entities); await bufferedClient.flush(); await bufferedClient.dispose(); return ids; } /** * Performs a similarity search using query type specified in configuration. * If the query type is not specified, it defaults to similarity search. * @param query Query text for the similarity search. * @param k=4 Number of nearest neighbors to return. * @param filter Optional filter options for the documents. * @returns Promise that resolves to a list of documents and their corresponding similarity scores. */ async similaritySearch( query: string, k = 4, filter: this["FilterType"] | undefined = undefined ): Promise<Document[]> { const results = await this.similaritySearchWithScore(query, k, filter); return results.map((result) => result[0]); } /** * Performs a similarity search using query type specified in configuration. * If the query type is not specified, it defaults to similarity hybrid search. * @param query Query text for the similarity search. * @param k=4 Number of nearest neighbors to return. * @param filter Optional filter options for the documents. * @returns Promise that resolves to a list of documents and their corresponding similarity scores. */ async similaritySearchWithScore( query: string, k = 4, filter: this["FilterType"] | undefined = undefined ): Promise<[Document, number][]> { const searchType = this.options.type ?? AzureAISearchQueryType.SimilarityHybrid; if (searchType === AzureAISearchQueryType.Similarity) { return this.similaritySearchVectorWithScore( await this.embeddings.embedQuery(query), k, filter ); } else if (searchType === AzureAISearchQueryType.SimilarityHybrid) { return this.hybridSearchVectorWithScore( query, await this.embeddings.embedQuery(query), k, filter ); } else if (searchType === AzureAISearchQueryType.SemanticHybrid) { return this.semanticHybridSearchVectorWithScore( query, await this.embeddings.embedQuery(query), k, filter ); } throw new Error(`Unrecognized search type '${searchType}'`); } /** * Performs a hybrid search using query text. * @param query Query text for the similarity search. * @param queryVector Query vector for the similarity search. * If not provided, the query text will be embedded. * @param k=4 Number of nearest neighbors to return. * @param filter Optional filter options for the documents. * @returns Promise that resolves to a list of documents and their corresponding similarity scores. */ async hybridSearchVectorWithScore( query: string, queryVector?: number[], k = 4, filter: this["FilterType"] | undefined = undefined ): Promise<[Document, number][]> { const vector = queryVector ?? (await this.embeddings.embedQuery(query)); await this.initPromise; const { results } = await this.client.search(query, { vectorSearchOptions: { queries: [ { kind: "vector", vector, kNearestNeighborsCount: k, fields: [DEFAULT_FIELD_CONTENT_VECTOR], }, ], filterMode: filter?.vectorFilterMode, }, filter: filter?.filterExpression, top: k, }); const docsWithScore: [Document, number][] = []; for await (const item of results) { const document = new Document<AzureAISearchDocumentMetadata>({ pageContent: item.document[DEFAULT_FIELD_CONTENT], metadata: { ...item.document[DEFAULT_FIELD_METADATA], }, }); if (filter?.includeEmbeddings) { document.metadata.embedding = item.document[DEFAULT_FIELD_CONTENT_VECTOR]; } docsWithScore.push([document, item.score]); } return docsWithScore; } /** * Performs a hybrid search with semantic reranker using query text. * @param query Query text for the similarity search. * @param queryVector Query vector for the similarity search. * If not provided, the query text will be embedded. * @param k=4 Number of nearest neighbors to return. * @param filter Optional filter options for the documents. * @returns Promise that resolves to a list of documents and their corresponding similarity scores. */ async semanticHybridSearchVectorWithScore( query: string, queryVector?: number[], k = 4, filter: this["FilterType"] | undefined = undefined ): Promise<[Document, number][]> { const vector = queryVector ?? (await this.embeddings.embedQuery(query)); await this.initPromise; const { results } = await this.client.search(query, { vectorSearchOptions: { queries: [ { kind: "vector", vector, kNearestNeighborsCount: k, fields: [DEFAULT_FIELD_CONTENT_VECTOR], }, ], filterMode: filter?.vectorFilterMode, }, filter: filter?.filterExpression, top: k, queryType: "semantic", semanticSearchOptions: { configurationName: "semantic-search-config", }, }); const docsWithScore: [Document, number][] = []; for await (const item of results) { const document = new Document<AzureAISearchDocumentMetadata>({ pageContent: item.document[DEFAULT_FIELD_CONTENT], metadata: { ...item.document[DEFAULT_FIELD_METADATA], }, }); if (filter?.includeEmbeddings) { document.metadata.embedding = item.document[DEFAULT_FIELD_CONTENT_VECTOR]; } docsWithScore.push([document, item.score]); } return docsWithScore; } /** * Performs a similarity search on the vectors stored in the collection. * @param queryVector Query vector for the similarity search. * @param k=4 Number of nearest neighbors to return. * @param filter Optional filter options for the documents. * @returns Promise that resolves to a list of documents and their corresponding similarity scores. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[Document, number][]> { await this.initPromise; const { results } = await this.client.search("*", { vectorSearchOptions: { queries: [ { kind: "vector", vector: query, kNearestNeighborsCount: k, fields: [DEFAULT_FIELD_CONTENT_VECTOR], }, ], filterMode: filter?.vectorFilterMode, }, filter: filter?.filterExpression, }); const docsWithScore: [Document, number][] = []; for await (const item of results) { const document = new Document<AzureAISearchDocumentMetadata>({ pageContent: item.document[DEFAULT_FIELD_CONTENT], metadata: { ...item.document[DEFAULT_FIELD_METADATA], }, }); if (filter?.includeEmbeddings) { document.metadata.embedding = item.document[DEFAULT_FIELD_CONTENT_VECTOR]; } docsWithScore.push([document, item.score]); } return docsWithScore; } /** * Return documents selected using the maximal marginal relevance. * Maximal marginal relevance optimizes for similarity to the query AND * diversity among selected documents. * @param query Text to look up documents similar to. * @param options.k Number of documents to return. * @param options.fetchK=20 Number of documents to fetch before passing to * the MMR algorithm. * @param options.lambda=0.5 Number between 0 and 1 that determines the * degree of diversity among the results, where 0 corresponds to maximum * diversity and 1 to minimum diversity. * @returns List of documents selected by maximal marginal relevance. */ async maxMarginalRelevanceSearch( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]> ): Promise<Document[]> { const { k, fetchK = 20, lambda = 0.5 } = options; const includeEmbeddingsFlag = options.filter?.includeEmbeddings || false; const queryEmbedding = await this.embeddings.embedQuery(query); const docs = await this.similaritySearchVectorWithScore( queryEmbedding, fetchK, { ...options.filter, includeEmbeddings: true, } ); const embeddingList = docs.map((doc) => doc[0].metadata.embedding); // Re-rank the results using MMR const mmrIndexes = maximalMarginalRelevance( queryEmbedding, embeddingList, lambda, k ); return mmrIndexes.map((index) => { const doc = docs[index][0]; // Remove embeddings if they were not requested originally if (!includeEmbeddingsFlag) { delete doc.metadata.embedding; } return doc; }); } /** * Ensures that an index exists on the AzureAISearchVectorStore. * @param indexClient The Azure AI Search index client. * @returns A promise that resolves when the AzureAISearchVectorStore index has been initialized. * @protected */ protected async ensureIndexExists( indexClient: SearchIndexClient ): Promise<void> { try { await indexClient.getIndex(this.indexName); } catch (e) { // Index does not exists, create it const searchIndex = await this.createSearchIndexDefinition( this.indexName ); await indexClient.createIndex(searchIndex); } } /** * Prepares the search index definition for Azure AI Search. * @param indexName The name of the index. * @returns The SearchIndex object. * @protected */ protected async createSearchIndexDefinition( indexName: string ): Promise<SearchIndex> { // Embed a test query to get the embedding dimensions const testEmbedding = await this.embeddings.embedQuery("test"); const embeddingDimensions = testEmbedding.length; return { name: indexName, vectorSearch: { algorithms: [ { name: "vector-search-algorithm", kind: "hnsw", parameters: { m: 4, efSearch: 500, metric: "cosine", efConstruction: 400, }, }, ], profiles: [ { name: "vector-search-profile", algorithmConfigurationName: "vector-search-algorithm", }, ], }, semanticSearch: { defaultConfigurationName: "semantic-search-config", configurations: [ { name: "semantic-search-config", prioritizedFields: { contentFields: [ { name: DEFAULT_FIELD_CONTENT, }, ], keywordsFields: [ { name: DEFAULT_FIELD_CONTENT, }, ], }, }, ], }, fields: [ { name: DEFAULT_FIELD_ID, filterable: true, key: true, type: "Edm.String", }, { name: DEFAULT_FIELD_CONTENT, searchable: true, filterable: true, type: "Edm.String", }, { name: DEFAULT_FIELD_CONTENT_VECTOR, searchable: true, type: "Collection(Edm.Single)", vectorSearchDimensions: embeddingDimensions, vectorSearchProfileName: "vector-search-profile", }, { name: DEFAULT_FIELD_METADATA, type: "Edm.ComplexType", fields: [ { name: DEFAULT_FIELD_METADATA_SOURCE, type: "Edm.String", filterable: true, }, { name: DEFAULT_FIELD_METADATA_ATTRS, type: "Collection(Edm.ComplexType)", fields: [ { name: "key", type: "Edm.String", filterable: true, }, { name: "value", type: "Edm.String", filterable: true, }, ], }, ], }, ], }; } /** * Static method to create an instance of AzureAISearchVectorStore from a * list of texts. It first converts the texts to vectors and then adds * them to the collection. * @param texts List of texts to be converted to vectors. * @param metadatas Metadata for the texts. * @param embeddings Embeddings to be used for conversion. * @param config Database configuration for Azure AI Search. * @returns Promise that resolves to a new instance of AzureAISearchVectorStore. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, config: AzureAISearchConfig ): Promise<AzureAISearchVectorStore> { const docs: Document<AzureAISearchDocumentMetadata>[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return AzureAISearchVectorStore.fromDocuments(docs, embeddings, config); } /** * Static method to create an instance of AzureAISearchVectorStore from a * list of documents. It first converts the documents to vectors and then * adds them to the database. * @param docs List of documents to be converted to vectors. * @param embeddings Embeddings to be used for conversion. * @param config Database configuration for Azure AI Search. * @returns Promise that resolves to a new instance of AzureAISearchVectorStore. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, config: AzureAISearchConfig, options?: AzureAISearchAddDocumentsOptions ): Promise<AzureAISearchVectorStore> { const instance = new this(embeddings, config); await instance.addDocuments(docs, options); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/momento_vector_index.ts
/* eslint-disable no-instanceof/no-instanceof */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { ALL_VECTOR_METADATA, IVectorIndexClient, VectorIndexItem, CreateVectorIndex, VectorUpsertItemBatch, VectorDeleteItemBatch, VectorSearch, VectorSearchAndFetchVectors, } from "@gomomento/sdk-core"; import * as uuid from "uuid"; import { Document } from "@langchain/core/documents"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { MaxMarginalRelevanceSearchOptions, VectorStore, } from "@langchain/core/vectorstores"; import { maximalMarginalRelevance } from "@langchain/core/utils/math"; export interface DocumentProps { ids: string[]; } export interface MomentoVectorIndexLibArgs { /** * The Momento Vector Index client. */ client: IVectorIndexClient; /** * The name of the index to use to store the data. * Defaults to "default". */ indexName?: string; /** * The name of the metadata field to use to store the text of the document. * Defaults to "text". */ textField?: string; /** * Whether to create the index if it does not already exist. * Defaults to true. */ ensureIndexExists?: boolean; } export interface DeleteProps { /** * The ids of the documents to delete. */ ids: string[]; } /** * A vector store that uses the Momento Vector Index. * * @remarks * To sign up for a free Momento account, visit https://console.gomomento.com. */ export class MomentoVectorIndex extends VectorStore { private client: IVectorIndexClient; private indexName: string; private textField: string; private _ensureIndexExists: boolean; _vectorstoreType(): string { return "momento"; } /** * Creates a new `MomentoVectorIndex` instance. * @param embeddings The embeddings instance to use to generate embeddings from documents. * @param args The arguments to use to configure the vector store. */ constructor( embeddings: EmbeddingsInterface, args: MomentoVectorIndexLibArgs ) { super(embeddings, args); this.embeddings = embeddings; this.client = args.client; this.indexName = args.indexName ?? "default"; this.textField = args.textField ?? "text"; this._ensureIndexExists = args.ensureIndexExists ?? true; } /** * Returns the Momento Vector Index client. * @returns The Momento Vector Index client. */ public getClient(): IVectorIndexClient { return this.client; } /** * Creates the index if it does not already exist. * @param numDimensions The number of dimensions of the vectors to be stored in the index. * @returns Promise that resolves to true if the index was created, false if it already existed. */ private async ensureIndexExists(numDimensions: number): Promise<boolean> { const response = await this.client.createIndex( this.indexName, numDimensions ); if (response instanceof CreateVectorIndex.Success) { return true; } else if (response instanceof CreateVectorIndex.AlreadyExists) { return false; } else if (response instanceof CreateVectorIndex.Error) { throw new Error(response.toString()); } else { throw new Error(`Unknown response type: ${response.toString()}`); } } /** * Converts the documents to a format that can be stored in the index. * * This is necessary because the Momento Vector Index requires that the metadata * be a map of strings to strings. * @param vectors The vectors to convert. * @param documents The documents to convert. * @param ids The ids to convert. * @returns The converted documents. */ private prepareItemBatch( vectors: number[][], documents: Document<Record<string, any>>[], ids: string[] ): VectorIndexItem[] { return vectors.map((vector, idx) => ({ id: ids[idx], vector, metadata: { ...documents[idx].metadata, [this.textField]: documents[idx].pageContent, }, })); } /** * Adds vectors to the index. * * @remarks If the index does not already exist, it will be created if `ensureIndexExists` is true. * @param vectors The vectors to add to the index. * @param documents The documents to add to the index. * @param documentProps The properties of the documents to add to the index, specifically the ids. * @returns Promise that resolves when the vectors have been added to the index. Also returns the ids of the * documents that were added. */ public async addVectors( vectors: number[][], documents: Document<Record<string, any>>[], documentProps?: DocumentProps ): Promise<void | string[]> { if (vectors.length === 0) { return; } if (documents.length !== vectors.length) { throw new Error( `Number of vectors (${vectors.length}) does not equal number of documents (${documents.length})` ); } if (vectors.some((v) => v.length !== vectors[0].length)) { throw new Error("All vectors must have the same length"); } if ( documentProps?.ids !== undefined && documentProps.ids.length !== vectors.length ) { throw new Error( `Number of ids (${ documentProps?.ids?.length || "null" }) does not equal number of vectors (${vectors.length})` ); } if (this._ensureIndexExists) { await this.ensureIndexExists(vectors[0].length); } const documentIds = documentProps?.ids ?? documents.map(() => uuid.v4()); const batchSize = 128; const numBatches = Math.ceil(vectors.length / batchSize); // Add each batch of vectors to the index for (let i = 0; i < numBatches; i += 1) { const [startIndex, endIndex] = [ i * batchSize, Math.min((i + 1) * batchSize, vectors.length), ]; const batchVectors = vectors.slice(startIndex, endIndex); const batchDocuments = documents.slice(startIndex, endIndex); const batchDocumentIds = documentIds.slice(startIndex, endIndex); // Insert the items to the index const response = await this.client.upsertItemBatch( this.indexName, this.prepareItemBatch(batchVectors, batchDocuments, batchDocumentIds) ); if (response instanceof VectorUpsertItemBatch.Success) { // eslint-disable-next-line no-continue continue; } else if (response instanceof VectorUpsertItemBatch.Error) { throw new Error(response.toString()); } else { throw new Error(`Unknown response type: ${response.toString()}`); } } } /** * Adds vectors to the index. Generates embeddings from the documents * using the `Embeddings` instance passed to the constructor. * @param documents Array of `Document` instances to be added to the index. * @returns Promise that resolves when the documents have been added to the index. */ async addDocuments( documents: Document[], documentProps?: DocumentProps ): Promise<void> { const texts = documents.map(({ pageContent }) => pageContent); await this.addVectors( await this.embeddings.embedDocuments(texts), documents, documentProps ); } /** * Deletes vectors from the index by id. * @param params The parameters to use to delete the vectors, specifically the ids. */ public async delete(params: DeleteProps): Promise<void> { const response = await this.client.deleteItemBatch( this.indexName, params.ids ); if (response instanceof VectorDeleteItemBatch.Success) { // pass } else if (response instanceof VectorDeleteItemBatch.Error) { throw new Error(response.toString()); } else { throw new Error(`Unknown response type: ${response.toString()}`); } } /** * Searches the index for the most similar vectors to the query vector. * @param query The query vector. * @param k The number of results to return. * @returns Promise that resolves to the documents of the most similar vectors * to the query vector. */ public async similaritySearchVectorWithScore( query: number[], k: number ): Promise<[Document<Record<string, any>>, number][]> { const response = await this.client.search(this.indexName, query, { topK: k, metadataFields: ALL_VECTOR_METADATA, }); if (response instanceof VectorSearch.Success) { if (response.hits === undefined) { return []; } return response.hits().map((hit) => [ new Document({ pageContent: hit.metadata[this.textField]?.toString() ?? "", metadata: Object.fromEntries( Object.entries(hit.metadata).filter( ([key]) => key !== this.textField ) ), }), hit.score, ]); } else if (response instanceof VectorSearch.Error) { throw new Error(response.toString()); } else { throw new Error(`Unknown response type: ${response.toString()}`); } } /** * Return documents selected using the maximal marginal relevance. * Maximal marginal relevance optimizes for similarity to the query AND diversity * among selected documents. * * @param {string} query - Text to look up documents similar to. * @param {number} options.k - Number of documents to return. * @param {number} options.fetchK - Number of documents to fetch before passing to the MMR algorithm. * @param {number} options.lambda - Number between 0 and 1 that determines the degree of diversity among the results, * where 0 corresponds to maximum diversity and 1 to minimum diversity. * @param {this["FilterType"]} options.filter - Optional filter * @param _callbacks * * @returns {Promise<Document[]>} - List of documents selected by maximal marginal relevance. */ async maxMarginalRelevanceSearch( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]> ): Promise<Document[]> { const queryEmbedding = await this.embeddings.embedQuery(query); const response = await this.client.searchAndFetchVectors( this.indexName, queryEmbedding, { topK: options.fetchK ?? 20, metadataFields: ALL_VECTOR_METADATA } ); if (response instanceof VectorSearchAndFetchVectors.Success) { const hits = response.hits(); // Gather the embeddings of the search results const embeddingList = hits.map((hit) => hit.vector); // Gather the ids of the most relevant results when applying MMR const mmrIndexes = maximalMarginalRelevance( queryEmbedding, embeddingList, options.lambda, options.k ); const finalResult = mmrIndexes.map((index) => { const hit = hits[index]; const { [this.textField]: pageContent, ...metadata } = hit.metadata; return new Document({ metadata, pageContent: pageContent as string }); }); return finalResult; } else if (response instanceof VectorSearchAndFetchVectors.Error) { throw new Error(response.toString()); } else { throw new Error(`Unknown response type: ${response.toString()}`); } } /** * Stores the documents in the index. * * Converts the documents to vectors using the `Embeddings` instance passed. * @param texts The texts to store in the index. * @param metadatas The metadata to store in the index. * @param embeddings The embeddings instance to use to generate embeddings from the documents. * @param dbConfig The configuration to use to instantiate the vector store. * @param documentProps The properties of the documents to add to the index, specifically the ids. * @returns Promise that resolves to the vector store. */ public static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: MomentoVectorIndexLibArgs, documentProps?: DocumentProps ): Promise<MomentoVectorIndex> { if (Array.isArray(metadatas) && texts.length !== metadatas.length) { throw new Error( `Number of texts (${texts.length}) does not equal number of metadatas (${metadatas.length})` ); } const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment const metadata: object = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return await this.fromDocuments(docs, embeddings, dbConfig, documentProps); } /** * Stores the documents in the index. * @param docs The documents to store in the index. * @param embeddings The embeddings instance to use to generate embeddings from the documents. * @param dbConfig The configuration to use to instantiate the vector store. * @param documentProps The properties of the documents to add to the index, specifically the ids. * @returns Promise that resolves to the vector store. */ public static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: MomentoVectorIndexLibArgs, documentProps?: DocumentProps ): Promise<MomentoVectorIndex> { const vectorStore = new MomentoVectorIndex(embeddings, dbConfig); await vectorStore.addDocuments(docs, documentProps); return vectorStore; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/tigris.ts
import * as uuid from "uuid"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; /** * Type definition for the arguments required to initialize a * TigrisVectorStore instance. */ export type TigrisLibArgs = { // eslint-disable-next-line @typescript-eslint/no-explicit-any index: any; }; /** * Class for managing and operating vector search applications with * Tigris, an open-source Serverless NoSQL Database and Search Platform. */ export class TigrisVectorStore extends VectorStore { // eslint-disable-next-line @typescript-eslint/no-explicit-any index?: any; _vectorstoreType(): string { return "tigris"; } constructor(embeddings: EmbeddingsInterface, args: TigrisLibArgs) { super(embeddings, args); this.embeddings = embeddings; this.index = args.index; } /** * Method to add an array of documents to the Tigris database. * @param documents An array of Document instances to be added to the Tigris database. * @param options Optional parameter that can either be an array of string IDs or an object with a property 'ids' that is an array of string IDs. * @returns A Promise that resolves when the documents have been added to the Tigris database. */ async addDocuments( documents: Document[], options?: { ids?: string[] } | string[] ): Promise<void> { const texts = documents.map(({ pageContent }) => pageContent); await this.addVectors( await this.embeddings.embedDocuments(texts), documents, options ); } /** * Method to add vectors to the Tigris database. * @param vectors An array of vectors to be added to the Tigris database. * @param documents An array of Document instances corresponding to the vectors. * @param options Optional parameter that can either be an array of string IDs or an object with a property 'ids' that is an array of string IDs. * @returns A Promise that resolves when the vectors have been added to the Tigris database. */ async addVectors( vectors: number[][], documents: Document[], options?: { ids?: string[] } | string[] ) { if (vectors.length === 0) { return; } if (vectors.length !== documents.length) { throw new Error(`Vectors and metadatas must have the same length`); } const ids = Array.isArray(options) ? options : options?.ids; const documentIds = ids == null ? documents.map(() => uuid.v4()) : ids; await this.index?.addDocumentsWithVectors({ ids: documentIds, embeddings: vectors, documents: documents.map(({ metadata, pageContent }) => ({ content: pageContent, metadata, })), }); } /** * Method to perform a similarity search in the Tigris database and return * the k most similar vectors along with their similarity scores. * @param query The query vector. * @param k The number of most similar vectors to return. * @param filter Optional filter object to apply during the search. * @returns A Promise that resolves to an array of tuples, each containing a Document and its similarity score. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: object ) { const result = await this.index?.similaritySearchVectorWithScore({ query, k, filter, }); if (!result) { return []; } // eslint-disable-next-line @typescript-eslint/no-explicit-any return result.map(([document, score]: [any, any]) => [ new Document({ pageContent: document.content, metadata: document.metadata, }), score, ]) as [Document, number][]; } /** * Static method to create a new instance of TigrisVectorStore from an * array of texts. * @param texts An array of texts to be converted into Document instances and added to the Tigris database. * @param metadatas Either an array of metadata objects or a single metadata object to be associated with the texts. * @param embeddings An instance of Embeddings to be used for embedding the texts. * @param dbConfig An instance of TigrisLibArgs to be used for configuring the Tigris database. * @returns A Promise that resolves to a new instance of TigrisVectorStore. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: TigrisLibArgs ): Promise<TigrisVectorStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return TigrisVectorStore.fromDocuments(docs, embeddings, dbConfig); } /** * Static method to create a new instance of TigrisVectorStore from an * array of Document instances. * @param docs An array of Document instances to be added to the Tigris database. * @param embeddings An instance of Embeddings to be used for embedding the documents. * @param dbConfig An instance of TigrisLibArgs to be used for configuring the Tigris database. * @returns A Promise that resolves to a new instance of TigrisVectorStore. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: TigrisLibArgs ): Promise<TigrisVectorStore> { const instance = new this(embeddings, dbConfig); await instance.addDocuments(docs); return instance; } /** * Static method to create a new instance of TigrisVectorStore from an * existing index. * @param embeddings An instance of Embeddings to be used for embedding the documents. * @param dbConfig An instance of TigrisLibArgs to be used for configuring the Tigris database. * @returns A Promise that resolves to a new instance of TigrisVectorStore. */ static async fromExistingIndex( embeddings: EmbeddingsInterface, dbConfig: TigrisLibArgs ): Promise<TigrisVectorStore> { const instance = new this(embeddings, dbConfig); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/faiss.ts
import type { IndexFlatL2 } from "faiss-node"; import type { NameRegistry, Parser } from "pickleparser"; import * as uuid from "uuid"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { SaveableVectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; import { SynchronousInMemoryDocstore } from "../stores/doc/in_memory.js"; /** * Interface for the arguments required to initialize a FaissStore * instance. */ export interface FaissLibArgs { docstore?: SynchronousInMemoryDocstore; index?: IndexFlatL2; mapping?: Record<number, string>; } /** * A class that wraps the FAISS (Facebook AI Similarity Search) vector * database for efficient similarity search and clustering of dense * vectors. */ export class FaissStore extends SaveableVectorStore { _index?: IndexFlatL2; _mapping: Record<number, string>; docstore: SynchronousInMemoryDocstore; args: FaissLibArgs; _vectorstoreType(): string { return "faiss"; } getMapping(): Record<number, string> { return this._mapping; } getDocstore(): SynchronousInMemoryDocstore { return this.docstore; } constructor(embeddings: EmbeddingsInterface, args: FaissLibArgs) { super(embeddings, args); this.args = args; this._index = args.index; this._mapping = args.mapping ?? {}; this.embeddings = embeddings; this.docstore = args?.docstore ?? new SynchronousInMemoryDocstore(); } /** * Adds an array of Document objects to the store. * @param documents An array of Document objects. * @returns A Promise that resolves when the documents have been added. */ async addDocuments(documents: Document[], options?: { ids?: string[] }) { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents, options ); } public get index(): IndexFlatL2 { if (!this._index) { throw new Error( "Vector store not initialised yet. Try calling `fromTexts`, `fromDocuments` or `fromIndex` first." ); } return this._index; } private set index(index: IndexFlatL2) { this._index = index; } /** * Adds an array of vectors and their corresponding Document objects to * the store. * @param vectors An array of vectors. * @param documents An array of Document objects corresponding to the vectors. * @returns A Promise that resolves with an array of document IDs when the vectors and documents have been added. */ async addVectors( vectors: number[][], documents: Document[], options?: { ids?: string[] } ) { if (vectors.length === 0) { return []; } if (vectors.length !== documents.length) { throw new Error(`Vectors and documents must have the same length`); } const dv = vectors[0].length; if (!this._index) { const { IndexFlatL2 } = await FaissStore.importFaiss(); this._index = new IndexFlatL2(dv); } const d = this.index.getDimension(); if (dv !== d) { throw new Error( `Vectors must have the same length as the number of dimensions (${d})` ); } const docstoreSize = this.index.ntotal(); const documentIds = options?.ids ?? documents.map(() => uuid.v4()); for (let i = 0; i < vectors.length; i += 1) { const documentId = documentIds[i]; const id = docstoreSize + i; this.index.add(vectors[i]); this._mapping[id] = documentId; this.docstore.add({ [documentId]: documents[i] }); } return documentIds; } /** * Performs a similarity search in the vector store using a query vector * and returns the top k results along with their scores. * @param query A query vector. * @param k The number of top results to return. * @returns A Promise that resolves with an array of tuples, each containing a Document and its corresponding score. */ async similaritySearchVectorWithScore(query: number[], k: number) { const d = this.index.getDimension(); if (query.length !== d) { throw new Error( `Query vector must have the same length as the number of dimensions (${d})` ); } if (k > this.index.ntotal()) { const total = this.index.ntotal(); console.warn( `k (${k}) is greater than the number of elements in the index (${total}), setting k to ${total}` ); // eslint-disable-next-line no-param-reassign k = total; } const result = this.index.search(query, k); return result.labels.map((id, index) => { const uuid = this._mapping[id]; return [this.docstore.search(uuid), result.distances[index]] as [ Document, number ]; }); } /** * Saves the current state of the FaissStore to a specified directory. * @param directory The directory to save the state to. * @returns A Promise that resolves when the state has been saved. */ async save(directory: string) { const fs = await import("node:fs/promises"); const path = await import("node:path"); await fs.mkdir(directory, { recursive: true }); await Promise.all([ this.index.write(path.join(directory, "faiss.index")), await fs.writeFile( path.join(directory, "docstore.json"), JSON.stringify([ Array.from(this.docstore._docs.entries()), this._mapping, ]) ), ]); } /** * Method to delete documents. * @param params Object containing the IDs of the documents to delete. * @returns A promise that resolves when the deletion is complete. */ async delete(params: { ids: string[] }) { const documentIds = params.ids; if (documentIds == null) { throw new Error("No documentIds provided to delete."); } const mappings = new Map( Object.entries(this._mapping).map(([key, value]) => [ parseInt(key, 10), value, ]) ); const reversedMappings = new Map( Array.from(mappings, (entry) => [entry[1], entry[0]]) ); const missingIds = new Set( documentIds.filter((id) => !reversedMappings.has(id)) ); if (missingIds.size > 0) { throw new Error( `Some specified documentIds do not exist in the current store. DocumentIds not found: ${Array.from( missingIds ).join(", ")}` ); } // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const indexIdToDelete = documentIds.map((id) => reversedMappings.get(id)!); // remove from index this.index.removeIds(indexIdToDelete); // remove from docstore documentIds.forEach((id) => { this.docstore._docs.delete(id); }); // remove from mappings indexIdToDelete.forEach((id) => { mappings.delete(id); }); this._mapping = { ...Array.from(mappings.values()) }; } /** * Merges the current FaissStore with another FaissStore. * @param targetIndex The FaissStore to merge with. * @returns A Promise that resolves with an array of document IDs when the merge is complete. */ async mergeFrom(targetIndex: FaissStore) { const targetIndexDimensions = targetIndex.index.getDimension(); if (!this._index) { const { IndexFlatL2 } = await FaissStore.importFaiss(); this._index = new IndexFlatL2(targetIndexDimensions); } const d = this.index.getDimension(); if (targetIndexDimensions !== d) { throw new Error("Cannot merge indexes with different dimensions."); } const targetMapping = targetIndex.getMapping(); const targetDocstore = targetIndex.getDocstore(); const targetSize = targetIndex.index.ntotal(); const documentIds = []; const currentDocstoreSize = this.index.ntotal(); for (let i = 0; i < targetSize; i += 1) { const targetId = targetMapping[i]; documentIds.push(targetId); const targetDocument = targetDocstore.search(targetId); const id = currentDocstoreSize + i; this._mapping[id] = targetId; this.docstore.add({ [targetId]: targetDocument }); } this.index.mergeFrom(targetIndex.index); return documentIds; } /** * Loads a FaissStore from a specified directory. * @param directory The directory to load the FaissStore from. * @param embeddings An Embeddings object. * @returns A Promise that resolves with a new FaissStore instance. */ static async load(directory: string, embeddings: EmbeddingsInterface) { const fs = await import("node:fs/promises"); const path = await import("node:path"); const readStore = (directory: string) => fs .readFile(path.join(directory, "docstore.json"), "utf8") .then(JSON.parse) as Promise< [Map<string, Document>, Record<number, string>] >; const readIndex = async (directory: string) => { const { IndexFlatL2 } = await this.importFaiss(); return IndexFlatL2.read(path.join(directory, "faiss.index")); }; const [[docstoreFiles, mapping], index] = await Promise.all([ readStore(directory), readIndex(directory), ]); const docstore = new SynchronousInMemoryDocstore(new Map(docstoreFiles)); return new this(embeddings, { docstore, index, mapping }); } static async loadFromPython( directory: string, embeddings: EmbeddingsInterface ) { const fs = await import("node:fs/promises"); const path = await import("node:path"); const { Parser, NameRegistry } = await this.importPickleparser(); class PyDocument extends Map { toDocument(): Document { return new Document({ pageContent: this.get("page_content"), metadata: this.get("metadata"), }); } } class PyInMemoryDocstore { _dict: Map<string, PyDocument>; toInMemoryDocstore(): SynchronousInMemoryDocstore { const s = new SynchronousInMemoryDocstore(); for (const [key, value] of Object.entries(this._dict)) { s._docs.set(key, value.toDocument()); } return s; } } const readStore = async (directory: string) => { const pkl = await fs.readFile( path.join(directory, "index.pkl"), "binary" ); const buffer = Buffer.from(pkl, "binary"); const registry = new NameRegistry() .register( "langchain.docstore.in_memory", "InMemoryDocstore", PyInMemoryDocstore ) .register( "langchain_community.docstore.in_memory", "InMemoryDocstore", PyInMemoryDocstore ) .register("langchain.schema", "Document", PyDocument) .register("langchain.docstore.document", "Document", PyDocument) .register("langchain.schema.document", "Document", PyDocument) .register("langchain_core.documents.base", "Document", PyDocument) .register("pathlib", "WindowsPath", (...args) => args.join("\\")) .register("pathlib", "PosixPath", (...args) => args.join("/")); const pickleparser = new Parser({ nameResolver: registry, }); const [rawStore, mapping] = pickleparser.parse<[PyInMemoryDocstore, Record<number, string>]>( buffer ); const store = rawStore.toInMemoryDocstore(); return { store, mapping }; }; const readIndex = async (directory: string) => { const { IndexFlatL2 } = await this.importFaiss(); return IndexFlatL2.read(path.join(directory, "index.faiss")); }; const [store, index] = await Promise.all([ readStore(directory), readIndex(directory), ]); return new this(embeddings, { docstore: store.store, index, mapping: store.mapping, }); } /** * Creates a new FaissStore from an array of texts, their corresponding * metadata, and an Embeddings object. * @param texts An array of texts. * @param metadatas An array of metadata corresponding to the texts, or a single metadata object to be used for all texts. * @param embeddings An Embeddings object. * @param dbConfig An optional configuration object for the document store. * @returns A Promise that resolves with a new FaissStore instance. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig?: { docstore?: SynchronousInMemoryDocstore; } ): Promise<FaissStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return this.fromDocuments(docs, embeddings, dbConfig); } /** * Creates a new FaissStore from an array of Document objects and an * Embeddings object. * @param docs An array of Document objects. * @param embeddings An Embeddings object. * @param dbConfig An optional configuration object for the document store. * @returns A Promise that resolves with a new FaissStore instance. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig?: { docstore?: SynchronousInMemoryDocstore; } ): Promise<FaissStore> { const args: FaissLibArgs = { docstore: dbConfig?.docstore, }; const instance = new this(embeddings, args); await instance.addDocuments(docs); return instance; } /** * Creates a new FaissStore from an existing FaissStore and an Embeddings * object. * @param targetIndex An existing FaissStore. * @param embeddings An Embeddings object. * @param dbConfig An optional configuration object for the document store. * @returns A Promise that resolves with a new FaissStore instance. */ static async fromIndex( targetIndex: FaissStore, embeddings: EmbeddingsInterface, dbConfig?: { docstore?: SynchronousInMemoryDocstore; } ): Promise<FaissStore> { const args: FaissLibArgs = { docstore: dbConfig?.docstore, }; const instance = new this(embeddings, args); await instance.mergeFrom(targetIndex); return instance; } static async importFaiss(): Promise<{ IndexFlatL2: typeof IndexFlatL2 }> { try { const { default: { IndexFlatL2 }, } = await import("faiss-node"); return { IndexFlatL2 }; // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (err: any) { throw new Error( `Could not import faiss-node. Please install faiss-node as a dependency with, e.g. \`npm install -S faiss-node\`.\n\nError: ${err?.message}` ); } } static async importPickleparser(): Promise<{ Parser: typeof Parser; NameRegistry: typeof NameRegistry; }> { try { const { default: { Parser, NameRegistry }, } = await import("pickleparser"); return { Parser, NameRegistry }; // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (err: any) { throw new Error( `Could not import pickleparser. Please install pickleparser as a dependency with, e.g. \`npm install -S pickleparser\`.\n\nError: ${err?.message}` ); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/opensearch.ts
import { Client, RequestParams, errors } from "@opensearch-project/opensearch"; import * as uuid from "uuid"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; type OpenSearchEngine = "nmslib" | "hnsw"; type OpenSearchSpaceType = "l2" | "cosinesimil" | "ip"; /** * Interface defining the options for vector search in OpenSearch. It * includes the engine type, space type, and parameters for the HNSW * algorithm. */ interface VectorSearchOptions { readonly engine?: OpenSearchEngine; readonly spaceType?: OpenSearchSpaceType; readonly m?: number; readonly efConstruction?: number; readonly efSearch?: number; readonly numberOfShards?: number; readonly numberOfReplicas?: number; } /** * Interface defining the arguments required to create an instance of the * OpenSearchVectorStore class. It includes the OpenSearch client, index * name, and vector search options. */ export interface OpenSearchClientArgs { readonly client: Client; readonly vectorFieldName?: string; readonly textFieldName?: string; readonly metadataFieldName?: string; readonly service?: "es" | "aoss"; readonly indexName?: string; readonly vectorSearchOptions?: VectorSearchOptions; } /** * Type alias for an object. It's used to define filters for OpenSearch * queries. */ type OpenSearchFilter = { [key: string]: FilterTypeValue | (string | number)[] | string | number; }; /** * FilterTypeValue for OpenSearch queries. */ interface FilterTypeValue { exists?: boolean; fuzzy?: string; ids?: string[]; prefix?: string; gte?: number; gt?: number; lte?: number; lt?: number; regexp?: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any terms_set?: Record<string, any>; wildcard?: string; } /** * Class that provides a wrapper around the OpenSearch service for vector * search. It provides methods for adding documents and vectors to the * OpenSearch index, searching for similar vectors, and managing the * OpenSearch index. */ export class OpenSearchVectorStore extends VectorStore { declare FilterType: OpenSearchFilter; private readonly client: Client; private readonly indexName: string; // if true, use the Amazon OpenSearch Serverless service instead of es private readonly isAoss: boolean; private readonly engine: OpenSearchEngine; private readonly spaceType: OpenSearchSpaceType; private readonly efConstruction: number; private readonly efSearch: number; private readonly numberOfShards: number; private readonly numberOfReplicas: number; private readonly m: number; private readonly vectorFieldName: string; private readonly textFieldName: string; private readonly metadataFieldName: string; _vectorstoreType(): string { return "opensearch"; } constructor(embeddings: EmbeddingsInterface, args: OpenSearchClientArgs) { super(embeddings, args); this.spaceType = args.vectorSearchOptions?.spaceType ?? "l2"; this.engine = args.vectorSearchOptions?.engine ?? "nmslib"; this.m = args.vectorSearchOptions?.m ?? 16; this.efConstruction = args.vectorSearchOptions?.efConstruction ?? 512; this.efSearch = args.vectorSearchOptions?.efSearch ?? 512; this.numberOfShards = args.vectorSearchOptions?.numberOfShards ?? 5; this.numberOfReplicas = args.vectorSearchOptions?.numberOfReplicas ?? 1; this.vectorFieldName = args.vectorFieldName ?? "embedding"; this.textFieldName = args.textFieldName ?? "text"; this.metadataFieldName = args.metadataFieldName ?? "metadata"; this.client = args.client; this.indexName = args.indexName ?? "documents"; this.isAoss = (args.service ?? "es") === "aoss"; } /** * Method to add documents to the OpenSearch index. It first converts the * documents to vectors using the embeddings, then adds the vectors to the * index. * @param documents The documents to be added to the OpenSearch index. * @returns Promise resolving to void. */ async addDocuments(documents: Document[]): Promise<void> { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents ); } /** * Method to add vectors to the OpenSearch index. It ensures the index * exists, then adds the vectors and associated documents to the index. * @param vectors The vectors to be added to the OpenSearch index. * @param documents The documents associated with the vectors. * @param options Optional parameter that can contain the IDs for the documents. * @returns Promise resolving to void. */ async addVectors( vectors: number[][], documents: Document[], options?: { ids?: string[] } ): Promise<void> { await this.ensureIndexExists( vectors[0].length, this.engine, this.spaceType, this.efSearch, this.efConstruction, this.numberOfShards, this.numberOfReplicas, this.m ); const documentIds = options?.ids ?? Array.from({ length: vectors.length }, () => uuid.v4()); const operations = vectors.flatMap((embedding, idx) => { // eslint-disable-next-line @typescript-eslint/no-explicit-any const document: Record<string, any> = [ { index: { _index: this.indexName, _id: documentIds[idx], }, }, { [this.vectorFieldName]: embedding, [this.textFieldName]: documents[idx].pageContent, [this.metadataFieldName]: documents[idx].metadata, }, ]; // aoss does not support document id if (this.isAoss) { delete document[0].index?._id; } return document; }); await this.client.bulk({ body: operations }); // aoss does not support refresh if (!this.isAoss) { await this.client.indices.refresh({ index: this.indexName }); } } /** * Method to perform a similarity search on the OpenSearch index using a * query vector. It returns the k most similar documents and their scores. * @param query The query vector. * @param k The number of similar documents to return. * @param filter Optional filter for the OpenSearch query. * @returns Promise resolving to an array of tuples, each containing a Document and its score. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: OpenSearchFilter | undefined ): Promise<[Document, number][]> { const search: RequestParams.Search = { index: this.indexName, body: { query: { bool: { filter: { bool: this.buildMetadataTerms(filter) }, must: [ { knn: { [this.vectorFieldName]: { vector: query, k }, }, }, ], }, }, size: k, }, }; const { body } = await this.client.search(search); // eslint-disable-next-line @typescript-eslint/no-explicit-any return body.hits.hits.map((hit: any) => [ new Document({ pageContent: hit._source[this.textFieldName], metadata: hit._source[this.metadataFieldName], }), hit._score, ]); } /** * Static method to create a new OpenSearchVectorStore from an array of * texts, their metadata, embeddings, and OpenSearch client arguments. * @param texts The texts to be converted into documents and added to the OpenSearch index. * @param metadatas The metadata associated with the texts. Can be an array of objects or a single object. * @param embeddings The embeddings used to convert the texts into vectors. * @param args The OpenSearch client arguments. * @returns Promise resolving to a new instance of OpenSearchVectorStore. */ static fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, args: OpenSearchClientArgs ): Promise<OpenSearchVectorStore> { const documents = texts.map((text, idx) => { const metadata = Array.isArray(metadatas) ? metadatas[idx] : metadatas; return new Document({ pageContent: text, metadata }); }); return OpenSearchVectorStore.fromDocuments(documents, embeddings, args); } /** * Static method to create a new OpenSearchVectorStore from an array of * Documents, embeddings, and OpenSearch client arguments. * @param docs The documents to be added to the OpenSearch index. * @param embeddings The embeddings used to convert the documents into vectors. * @param dbConfig The OpenSearch client arguments. * @returns Promise resolving to a new instance of OpenSearchVectorStore. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: OpenSearchClientArgs ): Promise<OpenSearchVectorStore> { const store = new OpenSearchVectorStore(embeddings, dbConfig); await store.addDocuments(docs).then(() => store); return store; } /** * Static method to create a new OpenSearchVectorStore from an existing * OpenSearch index, embeddings, and OpenSearch client arguments. * @param embeddings The embeddings used to convert the documents into vectors. * @param dbConfig The OpenSearch client arguments. * @returns Promise resolving to a new instance of OpenSearchVectorStore. */ static async fromExistingIndex( embeddings: EmbeddingsInterface, dbConfig: OpenSearchClientArgs ): Promise<OpenSearchVectorStore> { const store = new OpenSearchVectorStore(embeddings, dbConfig); await store.client.cat.indices({ index: store.indexName }); return store; } private async ensureIndexExists( dimension: number, engine = "nmslib", spaceType = "l2", efSearch = 512, efConstruction = 512, numberOfShards = 5, numberOfReplicas = 1, m = 16 ): Promise<void> { const body = { settings: { index: { number_of_shards: numberOfShards, number_of_replicas: numberOfReplicas, knn: true, "knn.algo_param.ef_search": efSearch, }, }, mappings: { dynamic_templates: [ { // map all metadata properties to be keyword [`${this.metadataFieldName}.*`]: { match_mapping_type: "string", mapping: { type: "keyword" }, }, }, { [`${this.metadataFieldName}.loc`]: { match_mapping_type: "object", mapping: { type: "object" }, }, }, ], properties: { [this.textFieldName]: { type: "text" }, [this.metadataFieldName]: { type: "object" }, [this.vectorFieldName]: { type: "knn_vector", dimension, method: { name: "hnsw", engine, space_type: spaceType, parameters: { ef_construction: efConstruction, m }, }, }, }, }, }; const indexExists = await this.doesIndexExist(); if (indexExists) return; await this.client.indices.create({ index: this.indexName, body }); } /** * Builds metadata terms for OpenSearch queries. * * This function takes a filter object and constructs an array of query terms * compatible with OpenSearch 2.x. It supports a variety of query types including * term, terms, terms_set, ids, range, prefix, exists, fuzzy, wildcard, and regexp. * Reference: https://opensearch.org/docs/latest/query-dsl/term/index/ * * @param {Filter | null} filter - The filter object used to construct query terms. * Each key represents a field, and the value specifies the type of query and its parameters. * * @returns {Array<Record<string, any>>} An array of OpenSearch query terms. * * @example * // Example filter: * const filter = { * status: { "exists": true }, * age: { "gte": 30, "lte": 40 }, * tags: ["tag1", "tag2"], * description: { "wildcard": "*test*" }, * * }; * * // Resulting query terms: * const queryTerms = buildMetadataTerms(filter); * // queryTerms would be an array of OpenSearch query objects. */ buildMetadataTerms(filter: OpenSearchFilter | undefined): object { if (!filter) return {}; const must = []; const must_not = []; for (const [key, value] of Object.entries(filter)) { const metadataKey = `${this.metadataFieldName}.${key}`; if (value) { if (typeof value === "object" && !Array.isArray(value)) { if ("exists" in value) { if (value.exists) { must.push({ exists: { field: metadataKey } }); } else { must_not.push({ exists: { field: metadataKey } }); } } else if ("fuzzy" in value) { must.push({ fuzzy: { [metadataKey]: value.fuzzy } }); } else if ("ids" in value) { must.push({ ids: { values: value.ids } }); } else if ("prefix" in value) { must.push({ prefix: { [metadataKey]: value.prefix } }); } else if ( "gte" in value || "gt" in value || "lte" in value || "lt" in value ) { must.push({ range: { [metadataKey]: value } }); } else if ("regexp" in value) { must.push({ regexp: { [metadataKey]: value.regexp } }); } else if ("terms_set" in value) { must.push({ terms_set: { [metadataKey]: value.terms_set } }); } else if ("wildcard" in value) { must.push({ wildcard: { [metadataKey]: value.wildcard } }); } } else { const aggregatorKey = Array.isArray(value) ? "terms" : "term"; must.push({ [aggregatorKey]: { [metadataKey]: value } }); } } } return { must, must_not }; } /** * Method to check if the OpenSearch index exists. * @returns Promise resolving to a boolean indicating whether the index exists. */ async doesIndexExist(): Promise<boolean> { try { await this.client.cat.indices({ index: this.indexName }); return true; } catch (err: unknown) { // eslint-disable-next-line no-instanceof/no-instanceof if (err instanceof errors.ResponseError && err.statusCode === 404) { return false; } throw err; } } /** * Method to delete the OpenSearch index if it exists. * @returns Promise resolving to void. */ async deleteIfExists(): Promise<void> { const indexExists = await this.doesIndexExist(); if (!indexExists) return; await this.client.indices.delete({ index: this.indexName }); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/weaviate.ts
import * as uuid from "uuid"; import type { WeaviateClient, WeaviateObject, WhereFilter, } from "weaviate-ts-client"; import { MaxMarginalRelevanceSearchOptions, VectorStore, } from "@langchain/core/vectorstores"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { Document } from "@langchain/core/documents"; import { maximalMarginalRelevance } from "@langchain/core/utils/math"; // Note this function is not generic, it is designed specifically for Weaviate // https://weaviate.io/developers/weaviate/config-refs/datatypes#introduction /** * @deprecated Prefer the `@langchain/weaviate` package. */ export const flattenObjectForWeaviate = ( // eslint-disable-next-line @typescript-eslint/no-explicit-any obj: Record<string, any> ) => { // eslint-disable-next-line @typescript-eslint/no-explicit-any const flattenedObject: Record<string, any> = {}; for (const key in obj) { if (!Object.hasOwn(obj, key)) { continue; } const value = obj[key]; if (typeof obj[key] === "object" && !Array.isArray(value)) { const recursiveResult = flattenObjectForWeaviate(value); for (const deepKey in recursiveResult) { if (Object.hasOwn(obj, key)) { flattenedObject[`${key}_${deepKey}`] = recursiveResult[deepKey]; } } } else if (Array.isArray(value)) { if (value.length === 0) { flattenedObject[key] = value; } else if ( typeof value[0] !== "object" && // eslint-disable-next-line @typescript-eslint/no-explicit-any value.every((el: any) => typeof el === typeof value[0]) ) { // Weaviate only supports arrays of primitive types, // where all elements are of the same type flattenedObject[key] = value; } } else { flattenedObject[key] = value; } } return flattenedObject; }; /** * @deprecated Prefer the `@langchain/weaviate` package. * * Interface that defines the arguments required to create a new instance * of the `WeaviateStore` class. It includes the Weaviate client, the name * of the class in Weaviate, and optional keys for text and metadata. */ export interface WeaviateLibArgs { client: WeaviateClient; /** * The name of the class in Weaviate. Must start with a capital letter. */ indexName: string; textKey?: string; metadataKeys?: string[]; tenant?: string; } interface ResultRow { // eslint-disable-next-line @typescript-eslint/no-explicit-any [key: string]: any; } /** * @deprecated Prefer the `@langchain/weaviate` package. * * Interface that defines a filter for querying data from Weaviate. It * includes a distance and a `WhereFilter`. */ export interface WeaviateFilter { distance?: number; where: WhereFilter; } /** * @deprecated Prefer the `@langchain/weaviate` package. * * Class that extends the `VectorStore` base class. It provides methods to * interact with a Weaviate index, including adding vectors and documents, * deleting data, and performing similarity searches. */ export class WeaviateStore extends VectorStore { declare FilterType: WeaviateFilter; private client: WeaviateClient; private indexName: string; private textKey: string; private queryAttrs: string[]; private tenant?: string; _vectorstoreType(): string { return "weaviate"; } constructor(public embeddings: EmbeddingsInterface, args: WeaviateLibArgs) { super(embeddings, args); this.client = args.client; this.indexName = args.indexName; this.textKey = args.textKey || "text"; this.queryAttrs = [this.textKey]; this.tenant = args.tenant; if (args.metadataKeys) { this.queryAttrs = [ ...new Set([ ...this.queryAttrs, ...args.metadataKeys.filter((k) => { // https://spec.graphql.org/June2018/#sec-Names // queryAttrs need to be valid GraphQL Names const keyIsValid = /^[_A-Za-z][_0-9A-Za-z]*$/.test(k); if (!keyIsValid) { console.warn( `Skipping metadata key ${k} as it is not a valid GraphQL Name` ); } return keyIsValid; }), ]), ]; } } /** * Method to add vectors and corresponding documents to the Weaviate * index. * @param vectors Array of vectors to be added. * @param documents Array of documents corresponding to the vectors. * @param options Optional parameter that can include specific IDs for the documents. * @returns An array of document IDs. */ async addVectors( vectors: number[][], documents: Document[], options?: { ids?: string[] } ) { const documentIds = options?.ids ?? documents.map((_) => uuid.v4()); const batch: WeaviateObject[] = documents.map((document, index) => { if (Object.hasOwn(document.metadata, "id")) throw new Error( "Document inserted to Weaviate vectorstore should not have `id` in their metadata." ); const flattenedMetadata = flattenObjectForWeaviate(document.metadata); return { ...(this.tenant ? { tenant: this.tenant } : {}), class: this.indexName, id: documentIds[index], vector: vectors[index], properties: { [this.textKey]: document.pageContent, ...flattenedMetadata, }, }; }); try { const responses = await this.client.batch .objectsBatcher() .withObjects(...batch) .do(); // if storing vectors fails, we need to know why const errorMessages: string[] = []; responses.forEach((response) => { if (response?.result?.errors?.error) { errorMessages.push( ...response.result.errors.error.map( (err) => err.message ?? "!! Unfortunately no error message was presented in the API response !!" ) ); } }); if (errorMessages.length > 0) { throw new Error(errorMessages.join("\n")); } } catch (e) { throw Error(`Error adding vectors: ${e}`); } return documentIds; } /** * Method to add documents to the Weaviate index. It first generates * vectors for the documents using the embeddings, then adds the vectors * and documents to the index. * @param documents Array of documents to be added. * @param options Optional parameter that can include specific IDs for the documents. * @returns An array of document IDs. */ async addDocuments(documents: Document[], options?: { ids?: string[] }) { return this.addVectors( await this.embeddings.embedDocuments(documents.map((d) => d.pageContent)), documents, options ); } /** * Method to delete data from the Weaviate index. It can delete data based * on specific IDs or a filter. * @param params Object that includes either an array of IDs or a filter for the data to be deleted. * @returns Promise that resolves when the deletion is complete. */ async delete(params: { ids?: string[]; filter?: WeaviateFilter; }): Promise<void> { const { ids, filter } = params; if (ids && ids.length > 0) { for (const id of ids) { let deleter = this.client.data .deleter() .withClassName(this.indexName) .withId(id); if (this.tenant) { deleter = deleter.withTenant(this.tenant); } await deleter.do(); } } else if (filter) { let batchDeleter = this.client.batch .objectsBatchDeleter() .withClassName(this.indexName) .withWhere(filter.where); if (this.tenant) { batchDeleter = batchDeleter.withTenant(this.tenant); } await batchDeleter.do(); } else { throw new Error( `This method requires either "ids" or "filter" to be set in the input object` ); } } /** * Method to perform a similarity search on the stored vectors in the * Weaviate index. It returns the top k most similar documents and their * similarity scores. * @param query The query vector. * @param k The number of most similar documents to return. * @param filter Optional filter to apply to the search. * @returns An array of tuples, where each tuple contains a document and its similarity score. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: WeaviateFilter ): Promise<[Document, number][]> { const resultsWithEmbedding = await this.similaritySearchVectorWithScoreAndEmbedding(query, k, filter); return resultsWithEmbedding.map(([document, score, _embedding]) => [ document, score, ]); } /** * Method to perform a similarity search on the stored vectors in the * Weaviate index. It returns the top k most similar documents, their * similarity scores and embedding vectors. * @param query The query vector. * @param k The number of most similar documents to return. * @param filter Optional filter to apply to the search. * @returns An array of tuples, where each tuple contains a document, its similarity score and its embedding vector. */ async similaritySearchVectorWithScoreAndEmbedding( query: number[], k: number, filter?: WeaviateFilter ): Promise<[Document, number, number[]][]> { try { let builder = this.client.graphql .get() .withClassName(this.indexName) .withFields( `${this.queryAttrs.join(" ")} _additional { distance vector }` ) .withNearVector({ vector: query, distance: filter?.distance, }) .withLimit(k); if (this.tenant) { builder = builder.withTenant(this.tenant); } if (filter?.where) { builder = builder.withWhere(filter.where); } const result = await builder.do(); const documents: [Document, number, number[]][] = []; for (const data of result.data.Get[this.indexName]) { const { [this.textKey]: text, _additional, ...rest }: ResultRow = data; documents.push([ new Document({ pageContent: text, metadata: rest, }), _additional.distance, _additional.vector, ]); } return documents; } catch (e) { throw Error(`'Error in similaritySearch' ${e}`); } } /** * Return documents selected using the maximal marginal relevance. * Maximal marginal relevance optimizes for similarity to the query AND diversity * among selected documents. * * @param {string} query - Text to look up documents similar to. * @param {number} options.k - Number of documents to return. * @param {number} options.fetchK - Number of documents to fetch before passing to the MMR algorithm. * @param {number} options.lambda - Number between 0 and 1 that determines the degree of diversity among the results, * where 0 corresponds to maximum diversity and 1 to minimum diversity. * @param {this["FilterType"]} options.filter - Optional filter * @param _callbacks * * @returns {Promise<Document[]>} - List of documents selected by maximal marginal relevance. */ override async maxMarginalRelevanceSearch( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]>, _callbacks?: undefined ): Promise<Document[]> { const { k, fetchK = 20, lambda = 0.5, filter } = options; const queryEmbedding: number[] = await this.embeddings.embedQuery(query); const allResults: [Document, number, number[]][] = await this.similaritySearchVectorWithScoreAndEmbedding( queryEmbedding, fetchK, filter ); const embeddingList = allResults.map( ([_doc, _score, embedding]) => embedding ); const mmrIndexes = maximalMarginalRelevance( queryEmbedding, embeddingList, lambda, k ); return mmrIndexes .filter((idx) => idx !== -1) .map((idx) => allResults[idx][0]); } /** * Static method to create a new `WeaviateStore` instance from a list of * texts. It first creates documents from the texts and metadata, then * adds the documents to the Weaviate index. * @param texts Array of texts. * @param metadatas Metadata for the texts. Can be a single object or an array of objects. * @param embeddings Embeddings to be used for the texts. * @param args Arguments required to create a new `WeaviateStore` instance. * @returns A new `WeaviateStore` instance. */ static fromTexts( texts: string[], metadatas: object | object[], embeddings: EmbeddingsInterface, args: WeaviateLibArgs ): Promise<WeaviateStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return WeaviateStore.fromDocuments(docs, embeddings, args); } /** * Static method to create a new `WeaviateStore` instance from a list of * documents. It adds the documents to the Weaviate index. * @param docs Array of documents. * @param embeddings Embeddings to be used for the documents. * @param args Arguments required to create a new `WeaviateStore` instance. * @returns A new `WeaviateStore` instance. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, args: WeaviateLibArgs ): Promise<WeaviateStore> { const instance = new this(embeddings, args); await instance.addDocuments(docs); return instance; } /** * Static method to create a new `WeaviateStore` instance from an existing * Weaviate index. * @param embeddings Embeddings to be used for the Weaviate index. * @param args Arguments required to create a new `WeaviateStore` instance. * @returns A new `WeaviateStore` instance. */ static async fromExistingIndex( embeddings: EmbeddingsInterface, args: WeaviateLibArgs ): Promise<WeaviateStore> { return new this(embeddings, args); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/libsql.ts
import { Document } from "@langchain/core/documents"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import type { Client, InStatement } from "@libsql/client"; import { SqliteWhereBuilder, WhereCondition, } from "../utils/sqlite_where_builder.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any type MetadataDefault = Record<string, any>; /** * Interface for LibSQLVectorStore configuration options. */ export interface LibSQLVectorStoreArgs { db: Client; /** Name of the table to store vectors. Defaults to "vectors". */ table?: string; /** Name of the column to store embeddings. Defaults to "embedding". */ column?: string; // TODO: Support adding additional columns to the table for metadata. } /** * A vector store using LibSQL/Turso for storage and retrieval. */ export class LibSQLVectorStore< Metadata extends MetadataDefault = MetadataDefault > extends VectorStore { declare FilterType: string | InStatement | WhereCondition<Metadata>; private db; private readonly table: string; private readonly column: string; /** * Returns the type of vector store. * @returns {string} The string "libsql". */ _vectorstoreType(): string { return "libsql"; } /** * Initializes a new instance of the LibSQLVectorStore. * @param {EmbeddingsInterface} embeddings - The embeddings interface to use. * @param {Client} db - The LibSQL client instance. * @param {LibSQLVectorStoreArgs} options - Configuration options for the vector store. */ constructor(embeddings: EmbeddingsInterface, options: LibSQLVectorStoreArgs) { super(embeddings, options); this.db = options.db; this.table = options.table || "vectors"; this.column = options.column || "embedding"; } /** * Adds documents to the vector store. * @param {Document<Metadata>[]} documents - The documents to add. * @returns {Promise<string[]>} The IDs of the added documents. */ async addDocuments(documents: Document<Metadata>[]): Promise<string[]> { const texts = documents.map(({ pageContent }) => pageContent); const embeddings = await this.embeddings.embedDocuments(texts); return this.addVectors(embeddings, documents); } /** * Adds vectors to the vector store. * @param {number[][]} vectors - The vectors to add. * @param {Document<Metadata>[]} documents - The documents associated with the vectors. * @returns {Promise<string[]>} The IDs of the added vectors. */ async addVectors( vectors: number[][], documents: Document<Metadata>[] ): Promise<string[]> { const rows = vectors.map((embedding, idx) => ({ content: documents[idx].pageContent, embedding: `[${embedding.join(",")}]`, metadata: JSON.stringify(documents[idx].metadata), })); const batchSize = 100; const ids: string[] = []; for (let i = 0; i < rows.length; i += batchSize) { const chunk = rows.slice(i, i + batchSize); const insertQueries: InStatement[] = chunk.map((row) => ({ sql: `INSERT INTO ${this.table} (content, metadata, ${this.column}) VALUES (:content, :metadata, vector(:embedding)) RETURNING ${this.table}.rowid AS id`, args: row, })); const results = await this.db.batch(insertQueries); ids.push( ...results.flatMap((result) => result.rows.map((row) => String(row.id))) ); } return ids; } /** * Performs a similarity search using a vector query and returns documents with their scores. * @param {number[]} query - The query vector. * @param {number} k - The number of results to return. * @returns {Promise<[Document<Metadata>, number][]>} An array of tuples containing the similar documents and their scores. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[Document<Metadata>, number][]> { // Potential SQL injection risk if query vector is not properly sanitized. if (!query.every((num) => typeof num === "number" && !Number.isNaN(num))) { throw new Error("Invalid query vector: all elements must be numbers"); } const queryVector = `[${query.join(",")}]`; const sql = { sql: `SELECT ${this.table}.rowid as id, ${this.table}.content, ${this.table}.metadata, vector_distance_cos(${this.table}.${this.column}, vector(:queryVector)) AS distance FROM vector_top_k('idx_${this.table}_${this.column}', vector(:queryVector), CAST(:k AS INTEGER)) as top_k JOIN ${this.table} ON top_k.rowid = ${this.table}.rowid`, args: { queryVector, k }, } satisfies InStatement; // Filter is a raw sql where clause, so append it to the join if (typeof filter === "string") { sql.sql += ` AND ${filter}`; } else if (typeof filter === "object") { // Filter is an in statement. if ("sql" in filter) { sql.sql += ` AND ${filter.sql}`; sql.args = { ...filter.args, ...sql.args, }; } else { const builder = new SqliteWhereBuilder(filter); const where = builder.buildWhereClause(); sql.sql += ` AND ${where.sql}`; sql.args = { ...where.args, ...sql.args, }; } } const results = await this.db.execute(sql); // eslint-disable-next-line @typescript-eslint/no-explicit-any return results.rows.map((row: any) => { const metadata = JSON.parse(row.metadata); const doc = new Document<Metadata>({ id: String(row.id), metadata, pageContent: row.content, }); return [doc, row.distance]; }); } /** * Deletes vectors from the store. * @param {Object} params - Delete parameters. * @param {string[] | number[]} [params.ids] - The ids of the vectors to delete. * @returns {Promise<void>} */ async delete(params: { ids?: string[] | number[]; deleteAll?: boolean; }): Promise<void> { if (params.deleteAll) { await this.db.execute(`DELETE FROM ${this.table}`); } else if (params.ids !== undefined) { await this.db.batch( params.ids.map((id) => ({ sql: `DELETE FROM ${this.table} WHERE rowid = :id`, args: { id }, })) ); } else { throw new Error( `You must provide an "ids" parameter or a "deleteAll" parameter.` ); } } /** * Creates a new LibSQLVectorStore instance from texts. * @param {string[]} texts - The texts to add to the store. * @param {object[] | object} metadatas - The metadata for the texts. * @param {EmbeddingsInterface} embeddings - The embeddings interface to use. * @param {Client} dbClient - The LibSQL client instance. * @param {LibSQLVectorStoreArgs} [options] - Configuration options for the vector store. * @returns {Promise<LibSQLVectorStore>} A new LibSQLVectorStore instance. */ static async fromTexts<Metadata extends MetadataDefault = MetadataDefault>( texts: string[], metadatas: Metadata[] | Metadata, embeddings: EmbeddingsInterface, options: LibSQLVectorStoreArgs ): Promise<LibSQLVectorStore<Metadata>> { const docs = texts.map((text, i) => { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; return new Document({ pageContent: text, metadata }); }); return LibSQLVectorStore.fromDocuments(docs, embeddings, options); } /** * Creates a new LibSQLVectorStore instance from documents. * @param {Document[]} docs - The documents to add to the store. * @param {EmbeddingsInterface} embeddings - The embeddings interface to use. * @param {Client} dbClient - The LibSQL client instance. * @param {LibSQLVectorStoreArgs} [options] - Configuration options for the vector store. * @returns {Promise<LibSQLVectorStore>} A new LibSQLVectorStore instance. */ static async fromDocuments< Metadata extends MetadataDefault = MetadataDefault >( docs: Document<Metadata>[], embeddings: EmbeddingsInterface, options: LibSQLVectorStoreArgs ): Promise<LibSQLVectorStore<Metadata>> { const instance = new this<Metadata>(embeddings, options); await instance.addDocuments(docs); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/prisma.ts
import { Document } from "@langchain/core/documents"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; const IdColumnSymbol = Symbol("id"); const ContentColumnSymbol = Symbol("content"); type ColumnSymbol = typeof IdColumnSymbol | typeof ContentColumnSymbol; declare type Value = unknown; declare type RawValue = Value | Sql; declare class Sql { strings: string[]; constructor( rawStrings: ReadonlyArray<string>, rawValues: ReadonlyArray<RawValue> ); } type PrismaNamespace = { ModelName: Record<string, string>; Sql: typeof Sql; raw: (sql: string) => Sql; join: ( values: RawValue[], separator?: string, prefix?: string, suffix?: string ) => Sql; sql: (strings: ReadonlyArray<string>, ...values: RawValue[]) => Sql; }; type PrismaClient = { $queryRaw<T = unknown>( query: TemplateStringsArray | Sql, // eslint-disable-next-line @typescript-eslint/no-explicit-any ...values: any[] ): Promise<T>; $executeRaw( query: TemplateStringsArray | Sql, // eslint-disable-next-line @typescript-eslint/no-explicit-any ...values: any[] ): // eslint-disable-next-line @typescript-eslint/no-explicit-any Promise<any>; // eslint-disable-next-line @typescript-eslint/no-explicit-any $transaction<P extends Promise<any>[]>(arg: [...P]): Promise<any>; }; type ObjectIntersect<A, B> = { [P in keyof A & keyof B]: A[P] | B[P]; }; type ModelColumns<TModel extends Record<string, unknown>> = { [K in keyof TModel]?: true | ColumnSymbol; }; export type PrismaSqlFilter<TModel extends Record<string, unknown>> = { [K in keyof TModel]?: { equals?: TModel[K]; in?: TModel[K][]; notIn?: TModel[K][]; isNull?: TModel[K]; isNotNull?: TModel[K]; like?: TModel[K]; lt?: TModel[K]; lte?: TModel[K]; gt?: TModel[K]; gte?: TModel[K]; not?: TModel[K]; }; }; const OpMap = { equals: "=", in: "IN", notIn: "NOT IN", isNull: "IS NULL", isNotNull: "IS NOT NULL", like: "LIKE", lt: "<", lte: "<=", gt: ">", gte: ">=", not: "<>", }; type SimilarityModel< TModel extends Record<string, unknown> = Record<string, unknown>, TColumns extends ModelColumns<TModel> = ModelColumns<TModel> > = Pick<TModel, keyof ObjectIntersect<TModel, TColumns>> & { _distance: number | null; }; type DefaultPrismaVectorStore = PrismaVectorStore< Record<string, unknown>, string, ModelColumns<Record<string, unknown>>, PrismaSqlFilter<Record<string, unknown>> >; /** * A specific implementation of the VectorStore class that is designed to * work with Prisma. It provides methods for adding models, documents, and * vectors, as well as for performing similarity searches. */ export class PrismaVectorStore< TModel extends Record<string, unknown>, TModelName extends string, TSelectModel extends ModelColumns<TModel>, TFilterModel extends PrismaSqlFilter<TModel> > extends VectorStore { declare FilterType: TFilterModel; protected tableName: string; protected vectorColumnName: string; protected selectColumns: string[]; filter?: TFilterModel; idColumn: keyof TModel & string; contentColumn: keyof TModel & string; static IdColumn: typeof IdColumnSymbol = IdColumnSymbol; static ContentColumn: typeof ContentColumnSymbol = ContentColumnSymbol; protected db: PrismaClient; protected Prisma: PrismaNamespace; _vectorstoreType(): string { return "prisma"; } constructor( embeddings: EmbeddingsInterface, config: { db: PrismaClient; prisma: PrismaNamespace; tableName: TModelName; vectorColumnName: string; columns: TSelectModel; filter?: TFilterModel; } ) { super(embeddings, {}); this.Prisma = config.prisma; this.db = config.db; const entries = Object.entries(config.columns); const idColumn = entries.find((i) => i[1] === IdColumnSymbol)?.[0]; const contentColumn = entries.find( (i) => i[1] === ContentColumnSymbol )?.[0]; if (idColumn == null) throw new Error("Missing ID column"); if (contentColumn == null) throw new Error("Missing content column"); this.idColumn = idColumn; this.contentColumn = contentColumn; this.tableName = config.tableName; this.vectorColumnName = config.vectorColumnName; this.selectColumns = entries .map(([key, alias]) => (alias && key) || null) .filter((x): x is string => !!x); if (config.filter) { this.filter = config.filter; } } /** * Creates a new PrismaVectorStore with the specified model. * @param db The PrismaClient instance. * @returns An object with create, fromTexts, and fromDocuments methods. */ static withModel<TModel extends Record<string, unknown>>(db: PrismaClient) { function create< TPrisma extends PrismaNamespace, TColumns extends ModelColumns<TModel>, TFilters extends PrismaSqlFilter<TModel> >( embeddings: EmbeddingsInterface, config: { prisma: TPrisma; tableName: keyof TPrisma["ModelName"] & string; vectorColumnName: string; columns: TColumns; filter?: TFilters; } ) { type ModelName = keyof TPrisma["ModelName"] & string; return new PrismaVectorStore<TModel, ModelName, TColumns, TFilters>( embeddings, { ...config, db } ); } async function fromTexts< TPrisma extends PrismaNamespace, TColumns extends ModelColumns<TModel> >( texts: string[], metadatas: TModel[], embeddings: EmbeddingsInterface, dbConfig: { prisma: TPrisma; tableName: keyof TPrisma["ModelName"] & string; vectorColumnName: string; columns: TColumns; } ) { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return PrismaVectorStore.fromDocuments(docs, embeddings, { ...dbConfig, db, }); } async function fromDocuments< TPrisma extends PrismaNamespace, TColumns extends ModelColumns<TModel>, TFilters extends PrismaSqlFilter<TModel> >( docs: Document<TModel>[], embeddings: EmbeddingsInterface, dbConfig: { prisma: TPrisma; tableName: keyof TPrisma["ModelName"] & string; vectorColumnName: string; columns: TColumns; } ) { type ModelName = keyof TPrisma["ModelName"] & string; const instance = new PrismaVectorStore< TModel, ModelName, TColumns, TFilters >(embeddings, { ...dbConfig, db }); await instance.addDocuments(docs); return instance; } return { create, fromTexts, fromDocuments }; } /** * Adds the specified models to the store. * @param models The models to add. * @returns A promise that resolves when the models have been added. */ async addModels(models: TModel[]) { return this.addDocuments( models.map((metadata) => { const pageContent = metadata[this.contentColumn]; if (typeof pageContent !== "string") throw new Error("Content column must be a string"); return new Document({ pageContent, metadata }); }) ); } /** * Adds the specified documents to the store. * @param documents The documents to add. * @returns A promise that resolves when the documents have been added. */ async addDocuments(documents: Document<TModel>[]) { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents ); } /** * Adds the specified vectors to the store. * @param vectors The vectors to add. * @param documents The documents associated with the vectors. * @returns A promise that resolves when the vectors have been added. */ async addVectors(vectors: number[][], documents: Document<TModel>[]) { // table name, column name cannot be parametrised // these fields are thus not escaped by Prisma and can be dangerous if user input is used const idColumnRaw = this.Prisma.raw(`"${this.idColumn}"`); const tableNameRaw = this.Prisma.raw(`"${this.tableName}"`); const vectorColumnRaw = this.Prisma.raw(`"${this.vectorColumnName}"`); await this.db.$transaction( vectors.map((vector, idx) => this.db.$executeRaw( this.Prisma.sql`UPDATE ${tableNameRaw} SET ${vectorColumnRaw} = ${`[${vector.join(",")}]`}::vector WHERE ${idColumnRaw} = ${documents[idx].metadata[this.idColumn]} ` ) ) ); } /** * Performs a similarity search with the specified query. * @param query The query to use for the similarity search. * @param k The number of results to return. * @param _filter The filter to apply to the results. * @param _callbacks The callbacks to use during the search. * @returns A promise that resolves with the search results. */ async similaritySearch( query: string, k = 4, filter: this["FilterType"] | undefined = undefined ): Promise<Document<SimilarityModel<TModel, TSelectModel>>[]> { const results = await this.similaritySearchVectorWithScore( await this.embeddings.embedQuery(query), k, filter ); return results.map((result) => result[0]); } /** * Performs a similarity search with the specified query and returns the * results along with their scores. * @param query The query to use for the similarity search. * @param k The number of results to return. * @param filter The filter to apply to the results. * @param _callbacks The callbacks to use during the search. * @returns A promise that resolves with the search results and their scores. */ async similaritySearchWithScore( query: string, k?: number, filter?: this["FilterType"] ) { return super.similaritySearchWithScore(query, k, filter); } /** * Performs a similarity search with the specified vector and returns the * results along with their scores. * @param query The vector to use for the similarity search. * @param k The number of results to return. * @param filter The filter to apply to the results. * @returns A promise that resolves with the search results and their scores. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[Document<SimilarityModel<TModel, TSelectModel>>, number][]> { // table name, column names cannot be parametrised // these fields are thus not escaped by Prisma and can be dangerous if user input is used const vectorColumnRaw = this.Prisma.raw(`"${this.vectorColumnName}"`); const tableNameRaw = this.Prisma.raw(`"${this.tableName}"`); const selectRaw = this.Prisma.raw( this.selectColumns.map((x) => `"${x}"`).join(", ") ); const vector = `[${query.join(",")}]`; const articles = await this.db.$queryRaw< Array<SimilarityModel<TModel, TSelectModel>> >( this.Prisma.join( [ this.Prisma.sql` SELECT ${selectRaw}, ${vectorColumnRaw} <=> ${vector}::vector as "_distance" FROM ${tableNameRaw} `, this.buildSqlFilterStr(filter ?? this.filter), this.Prisma.sql` ORDER BY "_distance" ASC LIMIT ${k}; `, ].filter((x) => x != null), "" ) ); const results: [Document<SimilarityModel<TModel, TSelectModel>>, number][] = []; for (const article of articles) { if (article._distance != null && article[this.contentColumn] != null) { results.push([ new Document({ pageContent: article[this.contentColumn] as string, metadata: article, }), article._distance, ]); } } return results; } buildSqlFilterStr(filter?: this["FilterType"]) { if (filter == null) return null; return this.Prisma.join( Object.entries(filter).flatMap(([key, ops]) => Object.entries(ops).map(([opName, value]) => { // column name, operators cannot be parametrised // these fields are thus not escaped by Prisma and can be dangerous if user input is used const opNameKey = opName as keyof typeof OpMap; const colRaw = this.Prisma.raw(`"${key}"`); const opRaw = this.Prisma.raw(OpMap[opNameKey]); switch (OpMap[opNameKey]) { case OpMap.notIn: case OpMap.in: { if (!Array.isArray(value)) { throw new Error( `Invalid filter: IN operator requires an array. Received: ${JSON.stringify( value, null, 2 )}` ); } if (value.length === 0) { const isInOperator = OpMap[opNameKey] === OpMap.in; // For empty arrays: // - IN () should return FALSE (nothing can be in an empty set) // - NOT IN () should return TRUE (everything is not in an empty set) return this.Prisma.sql`${!isInOperator}`; } return this.Prisma.sql`${colRaw} ${opRaw} (${this.Prisma.join( value )})`; } case OpMap.isNull: case OpMap.isNotNull: return this.Prisma.sql`${colRaw} ${opRaw}`; default: return this.Prisma.sql`${colRaw} ${opRaw} ${value}`; } }) ), " AND ", " WHERE " ); } /** * Creates a new PrismaVectorStore from the specified texts. * @param texts The texts to use to create the store. * @param metadatas The metadata for the texts. * @param embeddings The embeddings to use. * @param dbConfig The database configuration. * @returns A promise that resolves with the new PrismaVectorStore. */ static async fromTexts( texts: string[], metadatas: object[], embeddings: EmbeddingsInterface, dbConfig: { db: PrismaClient; prisma: PrismaNamespace; tableName: string; vectorColumnName: string; columns: ModelColumns<Record<string, unknown>>; } ): Promise<DefaultPrismaVectorStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return PrismaVectorStore.fromDocuments(docs, embeddings, dbConfig); } /** * Creates a new PrismaVectorStore from the specified documents. * @param docs The documents to use to create the store. * @param embeddings The embeddings to use. * @param dbConfig The database configuration. * @returns A promise that resolves with the new PrismaVectorStore. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: { db: PrismaClient; prisma: PrismaNamespace; tableName: string; vectorColumnName: string; columns: ModelColumns<Record<string, unknown>>; } ): Promise<DefaultPrismaVectorStore> { const instance = new PrismaVectorStore(embeddings, dbConfig); await instance.addDocuments(docs); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/cassandra.ts
/* eslint-disable prefer-template */ import { v4 as uuidv4 } from "uuid"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore, MaxMarginalRelevanceSearchOptions, } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; import { maximalMarginalRelevance } from "@langchain/core/utils/math"; import { CassandraClientArgs, Column, Filter, Index, WhereClause, CassandraTableArgs, CassandraTable, } from "../utils/cassandra.js"; /** * @deprecated * Import from "../utils/cassandra.js" instead. */ export { Column, Filter, Index, WhereClause }; export type SupportedVectorTypes = "cosine" | "dot_product" | "euclidean"; export interface CassandraLibArgs extends CassandraClientArgs, Omit<CassandraTableArgs, "nonKeyColumns" | "keyspace"> { // keyspace is optional on CassandraClientArgs, but mandatory on CassandraTableArgs; we make it mandatory here keyspace: string; vectorType?: SupportedVectorTypes; dimensions: number; metadataColumns?: Column[]; nonKeyColumns?: Column | Column[]; } /** * Class for interacting with the Cassandra database. It extends the * VectorStore class and provides methods for adding vectors and * documents, searching for similar vectors, and creating instances from * texts or documents. */ export class CassandraStore extends VectorStore { // eslint-disable-next-line @typescript-eslint/no-explicit-any declare FilterType: WhereClause; private readonly table: CassandraTable; private readonly idColumnAutoName = "id"; private readonly idColumnAutoGenerated: boolean; private readonly vectorColumnName = "vector"; private readonly vectorColumn: Column; private readonly textColumnName = "text"; private readonly textColumn: Column; private readonly metadataColumnDefaultName = "metadata"; private readonly metadataColumns: Column[]; private readonly similarityColumn: Column; private readonly embeddingColumnAlias = "embedding"; _vectorstoreType(): string { return "cassandra"; } private _cleanArgs( args: CassandraLibArgs ): CassandraLibArgs & { metadataColumns: Column[]; nonKeyColumns: Column[] } { const { table, dimensions, primaryKey, nonKeyColumns, indices, metadataColumns, vectorType = "cosine", } = args; if (!table || !dimensions) { throw new Error("Missing required arguments"); } // Utility function to ensure the argument is treated as an array function _toArray<T>(value: T | T[]): T[] { return Array.isArray(value) ? value : [value]; } const indicesArg = indices || []; // Use the primary key if provided, else default to a single auto-generated UUID column let primaryKeyArg: Column[]; if (primaryKey) { primaryKeyArg = _toArray(primaryKey); } else { primaryKeyArg = [ { name: this.idColumnAutoName, type: "uuid", partition: true }, ]; } // The combined nonKeyColumns and metadataColumns, de-duped by name const combinedColumns = [ ..._toArray(nonKeyColumns || []), ..._toArray(metadataColumns || []), ]; const deduplicatedColumns = combinedColumns.filter( (col, index, self) => self.findIndex((c) => c.name === col.name) === index ); const nonKeyColumnsArg: Column[] = [...deduplicatedColumns]; // If no metadata columns are specified, add a default metadata column consistent with Langchain Python if (nonKeyColumnsArg.length === 0) { nonKeyColumnsArg.push({ name: this.metadataColumnDefaultName, type: "map<text, text>", }); indicesArg.push({ name: `idx_${this.metadataColumnDefaultName}_${table}_keys`, value: `(keys(${this.metadataColumnDefaultName}))`, }); indicesArg.push({ name: `idx_${this.metadataColumnDefaultName}_${table}_entries`, value: `(entries(${this.metadataColumnDefaultName}))`, }); } const addDefaultNonKeyColumnIfNeeded = (defaultColumn: Column) => { const column = nonKeyColumnsArg.find( (col) => col.name === defaultColumn.name ); if (!column) { nonKeyColumnsArg.push(defaultColumn); } }; addDefaultNonKeyColumnIfNeeded({ name: this.textColumnName, type: "text" }); addDefaultNonKeyColumnIfNeeded({ name: this.vectorColumnName, type: `VECTOR<FLOAT,${dimensions}>`, alias: this.embeddingColumnAlias, }); // If no index is specified for the vector column, add a default index if ( !indicesArg.some((index) => new RegExp(`\\(\\s*${this.vectorColumnName.toLowerCase()}\\s*\\)`).test( index.value.toLowerCase() ) ) ) { indicesArg.push({ name: `idx_${this.vectorColumnName}_${table}`, value: `(${this.vectorColumnName})`, options: `{'similarity_function': '${vectorType.toLowerCase()}'}`, }); } // Metadata the user will see excludes vector column and text column const metadataColumnsArg = [...primaryKeyArg, ...nonKeyColumnsArg].filter( (column) => column.name !== this.vectorColumnName && column.name !== this.textColumnName ); return { ...args, vectorType, primaryKey: primaryKeyArg, nonKeyColumns: nonKeyColumnsArg, metadataColumns: metadataColumnsArg, indices: indicesArg, }; } private _getColumnByName( columns: Column | Column[], columnName: string ): Column { const columnsArray = Array.isArray(columns) ? columns : [columns]; const column = columnsArray.find((col) => col.name === columnName); if (!column) { throw new Error(`Column ${columnName} not found`); } return column; } constructor(embeddings: EmbeddingsInterface, args: CassandraLibArgs) { super(embeddings, args); const cleanedArgs = this._cleanArgs(args); // This check here to help the compiler understand that nonKeyColumns will always // have values after the _cleanArgs call. It is the cleanest way to handle the fact // that the compiler is not able to make this determination, no matter how hard we try! if (!cleanedArgs.nonKeyColumns || cleanedArgs.nonKeyColumns.length === 0) { throw new Error("No non-key columns provided"); } this.vectorColumn = this._getColumnByName( cleanedArgs.nonKeyColumns, this.vectorColumnName ); this.textColumn = this._getColumnByName( cleanedArgs.nonKeyColumns, this.textColumnName ); this.similarityColumn = { name: `similarity_${cleanedArgs.vectorType}(${this.vectorColumnName},?)`, alias: "similarity_score", type: "", }; this.idColumnAutoGenerated = !args.primaryKey; this.metadataColumns = cleanedArgs.metadataColumns; this.table = new CassandraTable(cleanedArgs); } /** * Method to save vectors to the Cassandra database. * @param vectors Vectors to save. * @param documents The documents associated with the vectors. * @returns Promise that resolves when the vectors have been added. */ async addVectors(vectors: number[][], documents: Document[]): Promise<void> { if (vectors.length === 0) { return; } // Prepare the values for upsert const values = vectors.map((vector, index) => { const document = documents[index]; const docMetadata = document.metadata || {}; // If idColumnAutoGenerated is true and ID is not provided, generate a UUID if ( this.idColumnAutoGenerated && (docMetadata[this.idColumnAutoName] === undefined || docMetadata[this.idColumnAutoName] === "") ) { docMetadata[this.idColumnAutoName] = uuidv4(); } // Construct the row const row = []; // Add values for each metadata column this.metadataColumns.forEach((col) => { row.push(docMetadata[col.name] || null); }); // Add the text content and vector row.push(document.pageContent); row.push(new Float32Array(vector)); return row; }); const columns = [ ...this.metadataColumns, { name: this.textColumnName, type: "" }, { name: this.vectorColumnName, type: "" }, ]; return this.table.upsert(values, columns); } getCassandraTable(): CassandraTable { return this.table; } /** * Method to add documents to the Cassandra database. * @param documents The documents to add. * @returns Promise that resolves when the documents have been added. */ async addDocuments(documents: Document[]): Promise<void> { return this.addVectors( await this.embeddings.embedDocuments(documents.map((d) => d.pageContent)), documents ); } /** * Helper method to search for vectors that are similar to a given query vector. * @param query The query vector. * @param k The number of similar Documents to return. * @param filter Optional filter to be applied as a WHERE clause. * @param includeEmbedding Whether to include the embedding vectors in the results. * @returns Promise that resolves with an array of tuples, each containing a Document and a score. */ async search( query: number[], k: number, filter?: WhereClause, includeEmbedding?: boolean ): Promise<[Document, number][]> { const vectorAsFloat32Array = new Float32Array(query); const similarityColumnWithBinds = { ...this.similarityColumn, binds: [vectorAsFloat32Array], }; const queryCols = [ ...this.metadataColumns, this.textColumn, similarityColumnWithBinds, ]; if (includeEmbedding) { queryCols.push(this.vectorColumn); } const orderBy: Filter = { name: this.vectorColumnName, operator: "ANN OF", value: [vectorAsFloat32Array], }; const queryResultSet = await this.table.select( queryCols, filter, [orderBy], k ); return queryResultSet?.rows.map((row) => { const textContent = row[this.textColumnName]; const sanitizedRow = { ...row }; delete sanitizedRow[this.textColumnName]; delete sanitizedRow.similarity_score; Object.keys(sanitizedRow).forEach((key) => { if (sanitizedRow[key] === null) { delete sanitizedRow[key]; } }); return [ new Document({ pageContent: textContent, metadata: sanitizedRow }), row.similarity_score, ]; }); } /** * Method to search for vectors that are similar to a given query vector. * @param query The query vector. * @param k The number of similar Documents to return. * @param filter Optional filter to be applied as a WHERE clause. * @returns Promise that resolves with an array of tuples, each containing a Document and a score. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: WhereClause ): Promise<[Document, number][]> { return this.search(query, k, filter, false); } /** * Method to search for vectors that are similar to a given query vector, but with * the results selected using the maximal marginal relevance. * @param query The query string. * @param options.k The number of similar Documents to return. * @param options.fetchK=4*k The number of records to fetch before passing to the MMR algorithm. * @param options.lambda=0.5 The degree of diversity among the results between 0 (maximum diversity) and 1 (minimum diversity). * @param options.filter Optional filter to be applied as a WHERE clause. * @returns List of documents selected by maximal marginal relevance. */ async maxMarginalRelevanceSearch( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]> ): Promise<Document[]> { const { k, fetchK = 4 * k, lambda = 0.5, filter } = options; const queryEmbedding = await this.embeddings.embedQuery(query); const queryResults = await this.search( queryEmbedding, fetchK, filter, true ); const embeddingList = queryResults.map( (doc) => doc[0].metadata[this.embeddingColumnAlias] ); const mmrIndexes = maximalMarginalRelevance( queryEmbedding, embeddingList, lambda, k ); return mmrIndexes.map((idx) => { const doc = queryResults[idx][0]; delete doc.metadata[this.embeddingColumnAlias]; return doc; }); } /** * Static method to create an instance of CassandraStore from texts. * @param texts The texts to use. * @param metadatas The metadata associated with the texts. * @param embeddings The embeddings to use. * @param args The arguments for the CassandraStore. * @returns Promise that resolves with a new instance of CassandraStore. */ static async fromTexts( texts: string[], metadatas: object | object[], embeddings: EmbeddingsInterface, args: CassandraLibArgs ): Promise<CassandraStore> { const docs: Document[] = []; for (let index = 0; index < texts.length; index += 1) { const metadata = Array.isArray(metadatas) ? metadatas[index] : metadatas; const doc = new Document({ pageContent: texts[index], metadata, }); docs.push(doc); } return CassandraStore.fromDocuments(docs, embeddings, args); } /** * Static method to create an instance of CassandraStore from documents. * @param docs The documents to use. * @param embeddings The embeddings to use. * @param args The arguments for the CassandraStore. * @returns Promise that resolves with a new instance of CassandraStore. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, args: CassandraLibArgs ): Promise<CassandraStore> { const instance = new this(embeddings, args); await instance.addDocuments(docs); return instance; } /** * Static method to create an instance of CassandraStore from an existing * index. * @param embeddings The embeddings to use. * @param args The arguments for the CassandraStore. * @returns Promise that resolves with a new instance of CassandraStore. */ static async fromExistingIndex( embeddings: EmbeddingsInterface, args: CassandraLibArgs ): Promise<CassandraStore> { const instance = new this(embeddings, args); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/vectara.ts
import * as uuid from "uuid"; import { Document } from "@langchain/core/documents"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { VectorStore } from "@langchain/core/vectorstores"; import { BaseCallbackConfig, Callbacks, } from "@langchain/core/callbacks/manager"; import { FakeEmbeddings } from "@langchain/core/utils/testing"; /** * Interface for the arguments required to initialize a VectaraStore * instance. */ export interface VectaraLibArgs { customerId: number; corpusId: number | number[]; apiKey: string; verbose?: boolean; source?: string; } /** * Interface for the headers required for Vectara API calls. */ interface VectaraCallHeader { headers: { "x-api-key": string; "Content-Type": string; "customer-id": string; "X-Source": string; }; } /** * Interface for the file objects to be uploaded to Vectara. */ export interface VectaraFile { // The contents of the file to be uploaded. blob: Blob; // The name of the file to be uploaded. fileName: string; } /** * Interface for the context configuration used in Vectara API calls. */ export interface VectaraContextConfig { // The amount of context before. Ignored if sentences_before is set. charsBefore?: number; // The amount of context after. Ignored if sentences_after is set. charsAfter?: number; // The amount of context before, in sentences. sentencesBefore?: number; // The amount of context after, in sentences. sentencesAfter?: number; // The tag that wraps the snippet at the start. startTag?: string; // The tag that wraps the snippet at the end. endTag?: string; } export interface MMRConfig { enabled?: boolean; mmrTopK?: number; diversityBias?: number; } export interface VectaraSummary { // Whether to enable summarization. enabled: boolean; // The name of the summarizer+prompt combination to use for summarization. summarizerPromptName?: string; // Maximum number of results to summarize. maxSummarizedResults: number; // ISO 639-1 or ISO 639-3 language code for the response, or "auto" to indicate that // the auto-detected language of the incoming query should be used. responseLang: string; } // VectaraFilter holds all the arguments for result retrieval by Vectara // It's not really a filter, but a collection of arguments for the Vectara API // However, it's been named "XXXFilter" in other places, so we keep the name here for consistency. export interface VectaraFilter extends BaseCallbackConfig { // The start position in the result set start?: number; // Example of a vectara filter string can be: "doc.rating > 3.0 and part.lang = 'deu'" // See https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. filter?: string; // Improve retrieval accuracy using Hybrid search, by adjusting the value of lambda (0...1) // between neural search and keyword-based search factors. Values between 0.01 and 0.2 tend to work well. // see https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching for more details. lambda?: number; // See Vectara Search API docs for more details on the following options: https://docs.vectara.com/docs/api-reference/search-apis/search contextConfig?: VectaraContextConfig; mmrConfig?: MMRConfig; } export const DEFAULT_FILTER: VectaraFilter = { start: 0, filter: "", lambda: 0.0, contextConfig: { sentencesBefore: 2, sentencesAfter: 2, startTag: "<b>", endTag: "</b>", }, mmrConfig: { enabled: false, mmrTopK: 0, diversityBias: 0.0, }, }; interface SummaryResult { documents: Document[]; scores: number[]; summary: string; } export interface VectaraRetrieverInput { vectara: VectaraStore; topK: number; summaryConfig?: VectaraSummary; callbacks?: Callbacks; tags?: string[]; metadata?: Record<string, unknown>; verbose?: boolean; } /** * Class for interacting with the Vectara API. Extends the VectorStore * class. */ export class VectaraStore extends VectorStore { get lc_secrets(): { [key: string]: string } { return { apiKey: "VECTARA_API_KEY", corpusId: "VECTARA_CORPUS_ID", customerId: "VECTARA_CUSTOMER_ID", }; } get lc_aliases(): { [key: string]: string } { return { apiKey: "vectara_api_key", corpusId: "vectara_corpus_id", customerId: "vectara_customer_id", }; } declare FilterType: VectaraFilter; private apiEndpoint = "api.vectara.io"; private apiKey: string; private corpusId: number[]; private customerId: number; private verbose: boolean; private source: string; private vectaraApiTimeoutSeconds = 60; _vectorstoreType(): string { return "vectara"; } constructor(args: VectaraLibArgs) { // Vectara doesn't need embeddings, but we need to pass something to the parent constructor // The embeddings are abstracted out from the user in Vectara. super(new FakeEmbeddings(), args); const apiKey = args.apiKey ?? getEnvironmentVariable("VECTARA_API_KEY"); if (!apiKey) { throw new Error("Vectara api key is not provided."); } this.apiKey = apiKey; this.source = args.source ?? "langchainjs"; const corpusId = args.corpusId ?? getEnvironmentVariable("VECTARA_CORPUS_ID") ?.split(",") .map((id) => { const num = Number(id); if (Number.isNaN(num)) throw new Error("Vectara corpus id is not a number."); return num; }); if (!corpusId) { throw new Error("Vectara corpus id is not provided."); } if (typeof corpusId === "number") { this.corpusId = [corpusId]; } else { if (corpusId.length === 0) throw new Error("Vectara corpus id is not provided."); this.corpusId = corpusId; } const customerId = args.customerId ?? getEnvironmentVariable("VECTARA_CUSTOMER_ID"); if (!customerId) { throw new Error("Vectara customer id is not provided."); } this.customerId = customerId; this.verbose = args.verbose ?? false; } /** * Returns a header for Vectara API calls. * @returns A Promise that resolves to a VectaraCallHeader object. */ async getJsonHeader(): Promise<VectaraCallHeader> { return { headers: { "x-api-key": this.apiKey, "Content-Type": "application/json", "customer-id": this.customerId.toString(), "X-Source": this.source, }, }; } /** * Throws an error, as this method is not implemented. Use addDocuments * instead. * @param _vectors Not used. * @param _documents Not used. * @returns Does not return a value. */ async addVectors( _vectors: number[][], _documents: Document[] ): Promise<void> { throw new Error( "Method not implemented. Please call addDocuments instead." ); } /** * Method to delete data from the Vectara corpus. * @param params an array of document IDs to be deleted * @returns Promise that resolves when the deletion is complete. */ async deleteDocuments(ids: string[]): Promise<void> { if (ids && ids.length > 0) { const headers = await this.getJsonHeader(); for (const id of ids) { const data = { customer_id: this.customerId, corpus_id: this.corpusId[0], document_id: id, }; try { const controller = new AbortController(); const timeout = setTimeout( () => controller.abort(), this.vectaraApiTimeoutSeconds * 1000 ); const response = await fetch( `https://${this.apiEndpoint}/v1/delete-doc`, { method: "POST", headers: headers?.headers, body: JSON.stringify(data), signal: controller.signal, } ); clearTimeout(timeout); if (response.status !== 200) { throw new Error( `Vectara API returned status code ${response.status} when deleting document ${id}` ); } } catch (e) { const error = new Error(`Error ${(e as Error).message}`); // eslint-disable-next-line @typescript-eslint/no-explicit-any (error as any).code = 500; throw error; } } } else { throw new Error(`no "ids" specified for deletion`); } } /** * Adds documents to the Vectara store. * @param documents An array of Document objects to add to the Vectara store. * @returns A Promise that resolves to an array of document IDs indexed in Vectara. */ async addDocuments(documents: Document[]): Promise<string[]> { if (this.corpusId.length > 1) throw new Error("addDocuments does not support multiple corpus ids"); const headers = await this.getJsonHeader(); const doc_ids: string[] = []; let countAdded = 0; for (const document of documents) { const doc_id: string = document.metadata?.document_id ?? uuid.v4(); const data = { customer_id: this.customerId, corpus_id: this.corpusId[0], document: { document_id: doc_id, title: document.metadata?.title ?? "", metadata_json: JSON.stringify(document.metadata ?? {}), section: [ { text: document.pageContent, }, ], }, }; try { const controller = new AbortController(); const timeout = setTimeout( () => controller.abort(), this.vectaraApiTimeoutSeconds * 1000 ); const response = await fetch(`https://${this.apiEndpoint}/v1/index`, { method: "POST", headers: headers?.headers, body: JSON.stringify(data), signal: controller.signal, }); clearTimeout(timeout); const result = await response.json(); if ( result.status?.code !== "OK" && result.status?.code !== "ALREADY_EXISTS" ) { const error = new Error( `Vectara API returned status code ${ result.status?.code }: ${JSON.stringify(result.message)}` ); // eslint-disable-next-line @typescript-eslint/no-explicit-any (error as any).code = 500; throw error; } else { countAdded += 1; doc_ids.push(doc_id); } } catch (e) { const error = new Error( `Error ${(e as Error).message} while adding document` ); // eslint-disable-next-line @typescript-eslint/no-explicit-any (error as any).code = 500; throw error; } } if (this.verbose) { console.log(`Added ${countAdded} documents to Vectara`); } return doc_ids; } /** * Vectara provides a way to add documents directly via their API. This API handles * pre-processing and chunking internally in an optimal manner. This method is a wrapper * to utilize that API within LangChain. * * @param files An array of VectaraFile objects representing the files and their respective file names to be uploaded to Vectara. * @param metadata Optional. An array of metadata objects corresponding to each file in the `filePaths` array. * @returns A Promise that resolves to the number of successfully uploaded files. */ async addFiles( files: VectaraFile[], metadatas: Record<string, unknown> | undefined = undefined ) { if (this.corpusId.length > 1) throw new Error("addFiles does not support multiple corpus ids"); const doc_ids: string[] = []; for (const [index, file] of files.entries()) { const md = metadatas ? metadatas[index] : {}; const data = new FormData(); data.append("file", file.blob, file.fileName); data.append("doc-metadata", JSON.stringify(md)); const response = await fetch( `https://api.vectara.io/v1/upload?c=${this.customerId}&o=${this.corpusId[0]}&d=true`, { method: "POST", headers: { "x-api-key": this.apiKey, "X-Source": this.source, }, body: data, } ); const { status } = response; if (status === 409) { throw new Error(`File at index ${index} already exists in Vectara`); } else if (status !== 200) { throw new Error(`Vectara API returned status code ${status}`); } else { const result = await response.json(); const doc_id = result.document.documentId; doc_ids.push(doc_id); } } if (this.verbose) { console.log(`Uploaded ${files.length} files to Vectara`); } return doc_ids; } /** * Performs a Vectara API call based on the arguments provided. * @param query The query string for the similarity search. * @param k Optional. The number of results to return. Default is 10. * @param filter Optional. A VectaraFilter object to refine the search results. * @returns A Promise that resolves to an array of tuples, each containing a Document and its score. */ async vectaraQuery( query: string, k: number, vectaraFilterObject: VectaraFilter, summary: VectaraSummary = { enabled: false, maxSummarizedResults: 0, responseLang: "eng", } ): Promise<SummaryResult> { const headers = await this.getJsonHeader(); const { start, filter, lambda, contextConfig, mmrConfig } = vectaraFilterObject; const corpusKeys = this.corpusId.map((corpusId) => ({ customerId: this.customerId, corpusId, metadataFilter: filter, lexicalInterpolationConfig: { lambda }, })); const data = { query: [ { query, start, numResults: mmrConfig?.enabled ? mmrConfig.mmrTopK : k, contextConfig, ...(mmrConfig?.enabled ? { rerankingConfig: { rerankerId: 272725718, mmrConfig: { diversityBias: mmrConfig.diversityBias }, }, } : {}), corpusKey: corpusKeys, ...(summary?.enabled ? { summary: [summary] } : {}), }, ], }; const controller = new AbortController(); const timeout = setTimeout( () => controller.abort(), this.vectaraApiTimeoutSeconds * 1000 ); const response = await fetch(`https://${this.apiEndpoint}/v1/query`, { method: "POST", headers: headers?.headers, body: JSON.stringify(data), signal: controller.signal, }); clearTimeout(timeout); if (response.status !== 200) { throw new Error(`Vectara API returned status code ${response.status}`); } const result = await response.json(); const responses = result.responseSet[0].response; const documents = result.responseSet[0].document; for (let i = 0; i < responses.length; i += 1) { const responseMetadata = responses[i].metadata; const documentMetadata = documents[responses[i].documentIndex].metadata; const combinedMetadata: Record<string, unknown> = {}; responseMetadata.forEach((item: { name: string; value: unknown }) => { combinedMetadata[item.name] = item.value; }); documentMetadata.forEach((item: { name: string; value: unknown }) => { combinedMetadata[item.name] = item.value; }); responses[i].metadata = combinedMetadata; } const res: SummaryResult = { documents: responses.map( (response: { text: string; metadata: Record<string, unknown>; score: number; }) => new Document({ pageContent: response.text, metadata: response.metadata, }) ), scores: responses.map( (response: { text: string; metadata: Record<string, unknown>; score: number; }) => response.score ), summary: result.responseSet[0].summary[0]?.text ?? "", }; return res; } /** * Performs a similarity search and returns documents along with their * scores. * @param query The query string for the similarity search. * @param k Optional. The number of results to return. Default is 10. * @param filter Optional. A VectaraFilter object to refine the search results. * @returns A Promise that resolves to an array of tuples, each containing a Document and its score. */ async similaritySearchWithScore( query: string, k?: number, filter?: VectaraFilter ): Promise<[Document, number][]> { const summaryResult = await this.vectaraQuery( query, k || 10, filter || DEFAULT_FILTER ); const res = summaryResult.documents.map( (document, index) => [document, summaryResult.scores[index]] as [Document, number] ); return res; } /** * Performs a similarity search and returns documents. * @param query The query string for the similarity search. * @param k Optional. The number of results to return. Default is 10. * @param filter Optional. A VectaraFilter object to refine the search results. * @returns A Promise that resolves to an array of Document objects. */ async similaritySearch( query: string, k?: number, filter?: VectaraFilter ): Promise<Document[]> { const documents = await this.similaritySearchWithScore( query, k || 10, filter || DEFAULT_FILTER ); return documents.map((result) => result[0]); } /** * Throws an error, as this method is not implemented. Use * similaritySearch or similaritySearchWithScore instead. * @param _query Not used. * @param _k Not used. * @param _filter Not used. * @returns Does not return a value. */ async similaritySearchVectorWithScore( _query: number[], _k: number, _filter?: VectaraFilter | undefined ): Promise<[Document, number][]> { throw new Error( "Method not implemented. Please call similaritySearch or similaritySearchWithScore instead." ); } /** * Creates a VectaraStore instance from texts. * @param texts An array of text strings. * @param metadatas Metadata for the texts. Can be a single object or an array of objects. * @param _embeddings Not used. * @param args A VectaraLibArgs object for initializing the VectaraStore instance. * @returns A Promise that resolves to a VectaraStore instance. */ static fromTexts( texts: string[], metadatas: object | object[], _embeddings: EmbeddingsInterface, args: VectaraLibArgs ): Promise<VectaraStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return VectaraStore.fromDocuments(docs, new FakeEmbeddings(), args); } /** * Creates a VectaraStore instance from documents. * @param docs An array of Document objects. * @param _embeddings Not used. * @param args A VectaraLibArgs object for initializing the VectaraStore instance. * @returns A Promise that resolves to a VectaraStore instance. */ static async fromDocuments( docs: Document[], _embeddings: EmbeddingsInterface, args: VectaraLibArgs ): Promise<VectaraStore> { const instance = new this(args); await instance.addDocuments(docs); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/neo4j_vector.ts
import neo4j from "neo4j-driver"; import * as uuid from "uuid"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; // eslint-disable-next-line @typescript-eslint/no-explicit-any type Any = any; export type SearchType = "vector" | "hybrid"; export type IndexType = "NODE" | "RELATIONSHIP"; export type DistanceStrategy = "euclidean" | "cosine"; export type Metadata = Record<string, unknown>; interface Neo4jVectorStoreArgs { url: string; username: string; password: string; database?: string; preDeleteCollection?: boolean; textNodeProperty?: string; textNodeProperties?: string[]; embeddingNodeProperty?: string; keywordIndexName?: string; indexName?: string; searchType?: SearchType; indexType?: IndexType; retrievalQuery?: string; nodeLabel?: string; createIdIndex?: boolean; } const DEFAULT_SEARCH_TYPE = "vector"; const DEFAULT_INDEX_TYPE = "NODE"; const DEFAULT_DISTANCE_STRATEGY = "cosine"; const DEFAULT_NODE_EMBEDDING_PROPERTY = "embedding"; /** * @security *Security note*: Make sure that the database connection uses credentials * that are narrowly-scoped to only include necessary permissions. * Failure to do so may result in data corruption or loss, since the calling * code may attempt commands that would result in deletion, mutation * of data if appropriately prompted or reading sensitive data if such * data is present in the database. * The best way to guard against such negative outcomes is to (as appropriate) * limit the permissions granted to the credentials used with this tool. * For example, creating read only users for the database is a good way to * ensure that the calling code cannot mutate or delete data. * * @link See https://js.langchain.com/docs/security for more information. */ export class Neo4jVectorStore extends VectorStore { private driver: neo4j.Driver; private database: string; private preDeleteCollection: boolean; private nodeLabel: string; private embeddingNodeProperty: string; private embeddingDimension: number; private textNodeProperty: string; private keywordIndexName: string; private indexName: string; private retrievalQuery: string; private searchType: SearchType; private indexType: IndexType; private distanceStrategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY; private supportMetadataFilter = true; private isEnterprise = false; _vectorstoreType(): string { return "neo4jvector"; } constructor(embeddings: EmbeddingsInterface, config: Neo4jVectorStoreArgs) { super(embeddings, config); } static async initialize( embeddings: EmbeddingsInterface, config: Neo4jVectorStoreArgs ) { const store = new Neo4jVectorStore(embeddings, config); await store._initializeDriver(config); await store._verifyConnectivity(); const { preDeleteCollection = false, nodeLabel = "Chunk", textNodeProperty = "text", embeddingNodeProperty = DEFAULT_NODE_EMBEDDING_PROPERTY, keywordIndexName = "keyword", indexName = "vector", retrievalQuery = "", searchType = DEFAULT_SEARCH_TYPE, indexType = DEFAULT_INDEX_TYPE, } = config; store.embeddingDimension = (await embeddings.embedQuery("foo")).length; store.preDeleteCollection = preDeleteCollection; store.nodeLabel = nodeLabel; store.textNodeProperty = textNodeProperty; store.embeddingNodeProperty = embeddingNodeProperty; store.keywordIndexName = keywordIndexName; store.indexName = indexName; store.retrievalQuery = retrievalQuery; store.searchType = searchType; store.indexType = indexType; if (store.preDeleteCollection) { await store._dropIndex(); } return store; } async _initializeDriver({ url, username, password, database = "neo4j", }: Neo4jVectorStoreArgs) { try { this.driver = neo4j.driver(url, neo4j.auth.basic(username, password)); this.database = database; } catch (error) { throw new Error( "Could not create a Neo4j driver instance. Please check the connection details." ); } } async _verifyConnectivity() { await this.driver.verifyAuthentication(); } async _verifyVersion() { try { const data = await this.query("CALL dbms.components()"); const versionString: string = data[0].versions[0]; const targetVersion = [5, 11, 0]; let version: number[]; if (versionString.includes("aura")) { // Get the 'x.y.z' part before '-aura' const baseVersion = versionString.split("-")[0]; version = baseVersion.split(".").map(Number); version.push(0); } else { version = versionString.split(".").map(Number); } if (isVersionLessThan(version, targetVersion)) { throw new Error( "Version index is only supported in Neo4j version 5.11 or greater" ); } const metadataTargetVersion = [5, 18, 0]; if (isVersionLessThan(version, metadataTargetVersion)) { this.supportMetadataFilter = false; } this.isEnterprise = data[0].edition === "enterprise"; } catch (error) { console.error("Database version check failed:", error); } } async close() { await this.driver.close(); } async _dropIndex() { try { await this.query(` MATCH (n:\`${this.nodeLabel}\`) CALL { WITH n DETACH DELETE n } IN TRANSACTIONS OF 10000 ROWS; `); await this.query(`DROP INDEX ${this.indexName}`); } catch (error) { console.error("An error occurred while dropping the index:", error); } } async query(query: string, params: Any = {}): Promise<Any[]> { const session = this.driver.session({ database: this.database }); const result = await session.run(query, params); return toObjects(result.records); } static async fromTexts( texts: string[], metadatas: Any, embeddings: EmbeddingsInterface, config: Neo4jVectorStoreArgs ): Promise<Neo4jVectorStore> { const docs = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return Neo4jVectorStore.fromDocuments(docs, embeddings, config); } static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, config: Neo4jVectorStoreArgs ): Promise<Neo4jVectorStore> { const { searchType = DEFAULT_SEARCH_TYPE, createIdIndex = true, textNodeProperties = [], } = config; const store = await this.initialize(embeddings, config); const embeddingDimension = await store.retrieveExistingIndex(); if (!embeddingDimension) { await store.createNewIndex(); } else if (store.embeddingDimension !== embeddingDimension) { throw new Error( `Index with name "${store.indexName}" already exists. The provided embedding function and vector index dimensions do not match. Embedding function dimension: ${store.embeddingDimension} Vector index dimension: ${embeddingDimension}` ); } if (searchType === "hybrid") { const ftsNodeLabel = await store.retrieveExistingFtsIndex(); if (!ftsNodeLabel) { await store.createNewKeywordIndex(textNodeProperties); } else { if (ftsNodeLabel !== store.nodeLabel) { throw Error( "Vector and keyword index don't index the same node label" ); } } } if (createIdIndex) { await store.query( `CREATE CONSTRAINT IF NOT EXISTS FOR (n:${store.nodeLabel}) REQUIRE n.id IS UNIQUE;` ); } await store.addDocuments(docs); return store; } static async fromExistingIndex( embeddings: EmbeddingsInterface, config: Neo4jVectorStoreArgs ) { const { searchType = DEFAULT_SEARCH_TYPE, keywordIndexName = "keyword" } = config; if (searchType === "hybrid" && !keywordIndexName) { throw Error( "keyword_index name has to be specified when using hybrid search option" ); } const store = await this.initialize(embeddings, config); const embeddingDimension = await store.retrieveExistingIndex(); if (!embeddingDimension) { throw Error( "The specified vector index name does not exist. Make sure to check if you spelled it correctly" ); } if (store.embeddingDimension !== embeddingDimension) { throw new Error( `The provided embedding function and vector index dimensions do not match. Embedding function dimension: ${store.embeddingDimension} Vector index dimension: ${embeddingDimension}` ); } if (searchType === "hybrid") { const ftsNodeLabel = await store.retrieveExistingFtsIndex(); if (!ftsNodeLabel) { throw Error( "The specified keyword index name does not exist. Make sure to check if you spelled it correctly" ); } else { if (ftsNodeLabel !== store.nodeLabel) { throw Error( "Vector and keyword index don't index the same node label" ); } } } return store; } static async fromExistingGraph( embeddings: EmbeddingsInterface, config: Neo4jVectorStoreArgs ) { const { textNodeProperties = [], embeddingNodeProperty = DEFAULT_NODE_EMBEDDING_PROPERTY, searchType = DEFAULT_SEARCH_TYPE, retrievalQuery = "", nodeLabel, } = config; let _retrievalQuery = retrievalQuery; if (textNodeProperties.length === 0) { throw Error( "Parameter `text_node_properties` must not be an empty array" ); } if (!retrievalQuery) { _retrievalQuery = ` RETURN reduce(str='', k IN ${JSON.stringify(textNodeProperties)} | str + '\\n' + k + ': ' + coalesce(node[k], '')) AS text, node {.*, \`${embeddingNodeProperty}\`: Null, id: Null, ${textNodeProperties .map((prop) => `\`${prop}\`: Null`) .join(", ")} } AS metadata, score `; } const store = await this.initialize(embeddings, { ...config, retrievalQuery: _retrievalQuery, }); const embeddingDimension = await store.retrieveExistingIndex(); if (!embeddingDimension) { await store.createNewIndex(); } else if (store.embeddingDimension !== embeddingDimension) { throw new Error( `Index with name ${store.indexName} already exists. The provided embedding function and vector index dimensions do not match.\nEmbedding function dimension: ${store.embeddingDimension}\nVector index dimension: ${embeddingDimension}` ); } if (searchType === "hybrid") { const ftsNodeLabel = await store.retrieveExistingFtsIndex( textNodeProperties ); if (!ftsNodeLabel) { await store.createNewKeywordIndex(textNodeProperties); } else { if (ftsNodeLabel !== store.nodeLabel) { throw Error( "Vector and keyword index don't index the same node label" ); } } } // eslint-disable-next-line no-constant-condition while (true) { const fetchQuery = ` MATCH (n:\`${nodeLabel}\`) WHERE n.${embeddingNodeProperty} IS null AND any(k in $props WHERE n[k] IS NOT null) RETURN elementId(n) AS id, reduce(str='', k IN $props | str + '\\n' + k + ':' + coalesce(n[k], '')) AS text LIMIT 1000 `; const data = await store.query(fetchQuery, { props: textNodeProperties }); if (!data) { break; } const textEmbeddings = await embeddings.embedDocuments( data.map((el) => el.text) ); const params = { data: data.map((el, index) => ({ id: el.id, embedding: textEmbeddings[index], })), }; await store.query( ` UNWIND $data AS row MATCH (n:\`${nodeLabel}\`) WHERE elementId(n) = row.id CALL db.create.setVectorProperty(n, '${embeddingNodeProperty}', row.embedding) YIELD node RETURN count(*) `, params ); if (data.length < 1000) { break; } } return store; } async createNewIndex(): Promise<void> { const indexQuery = ` CALL db.index.vector.createNodeIndex( $index_name, $node_label, $embedding_node_property, toInteger($embedding_dimension), $similarity_metric ) `; const parameters = { index_name: this.indexName, node_label: this.nodeLabel, embedding_node_property: this.embeddingNodeProperty, embedding_dimension: this.embeddingDimension, similarity_metric: this.distanceStrategy, }; await this.query(indexQuery, parameters); } async retrieveExistingIndex() { let indexInformation = await this.query( ` SHOW INDEXES YIELD name, type, labelsOrTypes, properties, options WHERE type = 'VECTOR' AND (name = $index_name OR (labelsOrTypes[0] = $node_label AND properties[0] = $embedding_node_property)) RETURN name, labelsOrTypes, properties, options `, { index_name: this.indexName, node_label: this.nodeLabel, embedding_node_property: this.embeddingNodeProperty, } ); if (indexInformation) { indexInformation = this.sortByIndexName(indexInformation, this.indexName); try { const [index] = indexInformation; const [labelOrType] = index.labelsOrTypes; const [property] = index.properties; this.indexName = index.name; this.nodeLabel = labelOrType; this.embeddingNodeProperty = property; const embeddingDimension = index.options.indexConfig["vector.dimensions"]; return Number(embeddingDimension); } catch (error) { return null; } } return null; } async retrieveExistingFtsIndex( textNodeProperties: string[] = [] ): Promise<string | null> { const indexInformation = await this.query( ` SHOW INDEXES YIELD name, type, labelsOrTypes, properties, options WHERE type = 'FULLTEXT' AND (name = $keyword_index_name OR (labelsOrTypes = [$node_label] AND properties = $text_node_property)) RETURN name, labelsOrTypes, properties, options `, { keyword_index_name: this.keywordIndexName, node_label: this.nodeLabel, text_node_property: textNodeProperties.length > 0 ? textNodeProperties : [this.textNodeProperty], } ); if (indexInformation) { // Sort the index information by index name const sortedIndexInformation = this.sortByIndexName( indexInformation, this.indexName ); try { const [index] = sortedIndexInformation; const [labelOrType] = index.labelsOrTypes; const [property] = index.properties; this.keywordIndexName = index.name; this.textNodeProperty = property; this.nodeLabel = labelOrType; return labelOrType; } catch (error) { return null; } } return null; } async createNewKeywordIndex( textNodeProperties: string[] = [] ): Promise<void> { const nodeProps = textNodeProperties.length > 0 ? textNodeProperties : [this.textNodeProperty]; // Construct the Cypher query to create a new full text index const ftsIndexQuery = ` CREATE FULLTEXT INDEX ${this.keywordIndexName} FOR (n:\`${this.nodeLabel}\`) ON EACH [${nodeProps.map((prop) => `n.\`${prop}\``).join(", ")}] `; await this.query(ftsIndexQuery); } sortByIndexName( values: Array<{ [key: string]: Any }>, indexName: string ): Array<{ [key: string]: Any }> { return values.sort( (a, b) => (a.name === indexName ? -1 : 0) - (b.name === indexName ? -1 : 0) ); } async addVectors( vectors: number[][], documents: Document[], metadatas?: Record<string, Any>[], ids?: string[] ): Promise<string[]> { let _ids = ids; const _metadatas = metadatas; if (!_ids) { _ids = documents.map(() => uuid.v1()); } const importQuery = ` UNWIND $data AS row CALL { WITH row MERGE (c:\`${this.nodeLabel}\` {id: row.id}) WITH c, row CALL db.create.setVectorProperty(c, '${this.embeddingNodeProperty}', row.embedding) YIELD node SET c.\`${this.textNodeProperty}\` = row.text SET c += row.metadata } IN TRANSACTIONS OF 1000 ROWS `; const parameters = { data: documents.map(({ pageContent, metadata }, index) => ({ text: pageContent, metadata: _metadatas ? _metadatas[index] : metadata, embedding: vectors[index], id: _ids ? _ids[index] : null, })), }; await this.query(importQuery, parameters); return _ids; } async addDocuments(documents: Document[]): Promise<string[]> { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents ); } async similaritySearch( query: string, k = 4, params: Record<string, Any> = {} ): Promise<Document[]> { const embedding = await this.embeddings.embedQuery(query); const results = await this.similaritySearchVectorWithScore( embedding, k, query, params ); return results.map((result) => result[0]); } async similaritySearchWithScore( query: string, k = 4, params: Record<string, Any> = {} ): Promise<[Document, number][]> { const embedding = await this.embeddings.embedQuery(query); return this.similaritySearchVectorWithScore(embedding, k, query, params); } async similaritySearchVectorWithScore( vector: number[], k: number, query: string, params: Record<string, Any> = {} ): Promise<[Document, number][]> { let indexQuery: string; let filterParams: Record<string, Any>; const { filter } = params; if (filter) { if (!this.supportMetadataFilter) { throw new Error( "Metadata filtering is only supported in Neo4j version 5.18 or greater." ); } if (this.searchType === "hybrid") { throw new Error( "Metadata filtering can't be use in combination with a hybrid search approach." ); } const parallelQuery = this.isEnterprise ? "CYPHER runtime = parallel parallelRuntimeSupport=all " : ""; const baseIndexQuery = ` ${parallelQuery} MATCH (n:\`${this.nodeLabel}\`) WHERE n.\`${this.embeddingNodeProperty}\` IS NOT NULL AND size(n.\`${this.embeddingNodeProperty}\`) = toInteger(${this.embeddingDimension}) AND `; const baseCosineQuery = ` WITH n as node, vector.similarity.cosine( n.\`${this.embeddingNodeProperty}\`, $embedding ) AS score ORDER BY score DESC LIMIT toInteger($k) `; const [fSnippets, fParams] = constructMetadataFilter(filter); indexQuery = baseIndexQuery + fSnippets + baseCosineQuery; filterParams = fParams; } else { indexQuery = getSearchIndexQuery(this.searchType, this.indexType); filterParams = {}; } let defaultRetrieval: string; if (this.indexType === "RELATIONSHIP") { defaultRetrieval = ` RETURN relationship.${this.textNodeProperty} AS text, score, relationship {.*, ${this.textNodeProperty}: Null, ${this.embeddingNodeProperty}: Null, id: Null } AS metadata `; } else { defaultRetrieval = ` RETURN node.${this.textNodeProperty} AS text, score, node {.*, ${this.textNodeProperty}: Null, ${this.embeddingNodeProperty}: Null, id: Null } AS metadata `; } const retrievalQuery = this.retrievalQuery ? this.retrievalQuery : defaultRetrieval; const readQuery = `${indexQuery} ${retrievalQuery}`; const parameters = { index: this.indexName, k: Number(k), embedding: vector, keyword_index: this.keywordIndexName, query: removeLuceneChars(query), ...params, ...filterParams, }; const results = await this.query(readQuery, parameters); if (results) { if (results.some((result) => result.text == null)) { if (!this.retrievalQuery) { throw new Error( "Make sure that none of the '" + this.textNodeProperty + "' properties on nodes with label '" + this.nodeLabel + "' are missing or empty" ); } else { throw new Error( "Inspect the 'retrievalQuery' and ensure it doesn't return null for the 'text' column" ); } } const docs: [Document, number][] = results.map((result: Any) => [ new Document({ pageContent: result.text, metadata: Object.fromEntries( Object.entries(result.metadata).filter(([_, v]) => v !== null) ), }), result.score, ]); return docs; } return []; } } function toObjects(records: neo4j.Record[]) { const recordValues: Record<string, Any>[] = records.map((record) => { const rObj = record.toObject(); const out: { [key: string]: Any } = {}; Object.keys(rObj).forEach((key) => { out[key] = itemIntToString(rObj[key]); }); return out; }); return recordValues; } function itemIntToString(item: Any): Any { if (neo4j.isInt(item)) return item.toString(); if (Array.isArray(item)) return item.map((ii) => itemIntToString(ii)); if (["number", "string", "boolean"].indexOf(typeof item) !== -1) return item; if (item === null) return item; if (typeof item === "object") return objIntToString(item); } function objIntToString(obj: Any) { const entry = extractFromNeoObjects(obj); let newObj: Any = null; if (Array.isArray(entry)) { newObj = entry.map((item) => itemIntToString(item)); } else if (entry !== null && typeof entry === "object") { newObj = {}; Object.keys(entry).forEach((key) => { newObj[key] = itemIntToString(entry[key]); }); } return newObj; } function extractFromNeoObjects(obj: Any) { if ( // eslint-disable-next-line obj instanceof (neo4j.types.Node as any) || // eslint-disable-next-line obj instanceof (neo4j.types.Relationship as any) ) { return obj.properties; // eslint-disable-next-line } else if (obj instanceof (neo4j.types.Path as any)) { // eslint-disable-next-line return [].concat.apply<any[], any[], any[]>([], extractPathForRows(obj)); } return obj; } function extractPathForRows(path: neo4j.Path) { let { segments } = path; // Zero length path. No relationship, end === start if (!Array.isArray(path.segments) || path.segments.length < 1) { segments = [{ ...path, end: null } as Any]; } return segments.map((segment: Any) => [ objIntToString(segment.start), objIntToString(segment.relationship), objIntToString(segment.end), ].filter((part) => part !== null) ); } function getSearchIndexQuery( searchType: SearchType, indexType: IndexType = DEFAULT_INDEX_TYPE ): string { if (indexType === "NODE") { const typeToQueryMap: { [key in SearchType]: string } = { vector: "CALL db.index.vector.queryNodes($index, $k, $embedding) YIELD node, score", hybrid: ` CALL { CALL db.index.vector.queryNodes($index, $k, $embedding) YIELD node, score WITH collect({node:node, score:score}) AS nodes, max(score) AS max UNWIND nodes AS n // We use 0 as min RETURN n.node AS node, (n.score / max) AS score UNION CALL db.index.fulltext.queryNodes($keyword_index, $query, {limit: $k}) YIELD node, score WITH collect({node: node, score: score}) AS nodes, max(score) AS max UNWIND nodes AS n RETURN n.node AS node, (n.score / max) AS score } WITH node, max(score) AS score ORDER BY score DESC LIMIT toInteger($k) `, }; return typeToQueryMap[searchType]; } else { return ` CALL db.index.vector.queryRelationships($index, $k, $embedding) YIELD relationship, score `; } } function removeLuceneChars(text: string | null) { if (text === undefined || text === null) { return null; } // Remove Lucene special characters const specialChars = [ "+", "-", "&", "|", "!", "(", ")", "{", "}", "[", "]", "^", '"', "~", "*", "?", ":", "\\", ]; let modifiedText = text; for (const char of specialChars) { modifiedText = modifiedText.split(char).join(" "); } return modifiedText.trim(); } function isVersionLessThan(v1: number[], v2: number[]): boolean { for (let i = 0; i < Math.min(v1.length, v2.length); i += 1) { if (v1[i] < v2[i]) { return true; } else if (v1[i] > v2[i]) { return false; } } // If all the corresponding parts are equal, the shorter version is less return v1.length < v2.length; } // Filter utils const COMPARISONS_TO_NATIVE: Record<string, string> = { $eq: "=", $ne: "<>", $lt: "<", $lte: "<=", $gt: ">", $gte: ">=", }; const COMPARISONS_TO_NATIVE_OPERATORS = new Set( Object.keys(COMPARISONS_TO_NATIVE) ); const TEXT_OPERATORS = new Set(["$like", "$ilike"]); const LOGICAL_OPERATORS = new Set(["$and", "$or"]); const SPECIAL_CASED_OPERATORS = new Set(["$in", "$nin", "$between"]); const SUPPORTED_OPERATORS = new Set([ ...COMPARISONS_TO_NATIVE_OPERATORS, ...TEXT_OPERATORS, ...LOGICAL_OPERATORS, ...SPECIAL_CASED_OPERATORS, ]); const IS_IDENTIFIER_REGEX = /^[a-zA-Z_][a-zA-Z0-9_]*$/; function combineQueries( inputQueries: [string, Record<string, Any>][], operator: string ): [string, Record<string, Any>] { let combinedQuery = ""; const combinedParams: Record<string, Any> = {}; const paramCounter: Record<string, number> = {}; for (const [query, params] of inputQueries) { let newQuery = query; for (const [param, value] of Object.entries(params)) { if (param in paramCounter) { paramCounter[param] += 1; } else { paramCounter[param] = 1; } const newParamName = `${param}_${paramCounter[param]}`; newQuery = newQuery.replace(`$${param}`, `$${newParamName}`); combinedParams[newParamName] = value; } if (combinedQuery) { combinedQuery += ` ${operator} `; } combinedQuery += `(${newQuery})`; } return [combinedQuery, combinedParams]; } function collectParams( inputData: [string, Record<string, string>][] ): [string[], Record<string, Any>] { const queryParts: string[] = []; const params: Record<string, Any> = {}; for (const [queryPart, param] of inputData) { queryParts.push(queryPart); Object.assign(params, param); } return [queryParts, params]; } function handleFieldFilter( field: string, value: Any, paramNumber = 1 ): [string, Record<string, Any>] { if (typeof field !== "string") { throw new Error( `field should be a string but got: ${typeof field} with value: ${field}` ); } if (field.startsWith("$")) { throw new Error( `Invalid filter condition. Expected a field but got an operator: ${field}` ); } // Allow [a - zA - Z0 -9_], disallow $ for now until we support escape characters if (!IS_IDENTIFIER_REGEX.test(field)) { throw new Error( `Invalid field name: ${field}. Expected a valid identifier.` ); } let operator: string; let filterValue: Any; if (typeof value === "object" && value !== null && !Array.isArray(value)) { const keys = Object.keys(value); if (keys.length !== 1) { throw new Error(`Invalid filter condition. Expected a value which is a dictionary with a single key that corresponds to an operator but got a dictionary with ${keys.length} keys. The first few keys are: ${keys .slice(0, 3) .join(", ")} `); } // eslint-disable-next-line prefer-destructuring operator = keys[0]; filterValue = value[operator]; if (!SUPPORTED_OPERATORS.has(operator)) { throw new Error( `Invalid operator: ${operator}. Expected one of ${SUPPORTED_OPERATORS}` ); } } else { operator = "$eq"; filterValue = value; } if (COMPARISONS_TO_NATIVE_OPERATORS.has(operator)) { const native = COMPARISONS_TO_NATIVE[operator]; const querySnippet = `n.${field} ${native} $param_${paramNumber}`; const queryParam = { [`param_${paramNumber}`]: filterValue }; return [querySnippet, queryParam]; } else if (operator === "$between") { const [low, high] = filterValue; const querySnippet = `$param_${paramNumber}_low <= n.${field} <= $param_${paramNumber}_high`; const queryParam = { [`param_${paramNumber}_low`]: low, [`param_${paramNumber}_high`]: high, }; return [querySnippet, queryParam]; } else if (["$in", "$nin", "$like", "$ilike"].includes(operator)) { if (["$in", "$nin"].includes(operator)) { filterValue.forEach((val: Any) => { if ( typeof val !== "string" && typeof val !== "number" && typeof val !== "boolean" ) { throw new Error(`Unsupported type: ${typeof val} for value: ${val}`); } }); } if (operator === "$in") { const querySnippet = `n.${field} IN $param_${paramNumber}`; const queryParam = { [`param_${paramNumber}`]: filterValue }; return [querySnippet, queryParam]; } else if (operator === "$nin") { const querySnippet = `n.${field} NOT IN $param_${paramNumber}`; const queryParam = { [`param_${paramNumber}`]: filterValue }; return [querySnippet, queryParam]; } else if (operator === "$like") { const querySnippet = `n.${field} CONTAINS $param_${paramNumber}`; const queryParam = { [`param_${paramNumber}`]: filterValue.slice(0, -1) }; return [querySnippet, queryParam]; } else if (operator === "$ilike") { const querySnippet = `toLower(n.${field}) CONTAINS $param_${paramNumber}`; const queryParam = { [`param_${paramNumber}`]: filterValue.slice(0, -1) }; return [querySnippet, queryParam]; } else { throw new Error("Not Implemented"); } } else { throw new Error("Not Implemented"); } } function constructMetadataFilter( filter: Record<string, Any> ): [string, Record<string, Any>] { if (typeof filter !== "object" || filter === null) { throw new Error("Expected a dictionary representing the filter condition."); } const entries = Object.entries(filter); if (entries.length === 1) { const [key, value] = entries[0]; if (key.startsWith("$")) { if (!["$and", "$or"].includes(key.toLowerCase())) { throw new Error( `Invalid filter condition. Expected $and or $or but got: ${key}` ); } if (!Array.isArray(value)) { throw new Error( `Expected an array for logical conditions, but got ${typeof value} for value: ${value}` ); } const operation = key.toLowerCase() === "$and" ? "AND" : "OR"; const combinedQueries = combineQueries( value.map((v) => constructMetadataFilter(v)), operation ); return combinedQueries; } else { return handleFieldFilter(key, value); } } else if (entries.length > 1) { for (const [key] of entries) { if (key.startsWith("$")) { throw new Error( `Invalid filter condition. Expected a field but got an operator: ${key}` ); } } const and_multiple = collectParams( entries.map(([field, val], index) => handleFieldFilter(field, val, index + 1) ) ); if (and_multiple.length >= 1) { return [and_multiple[0].join(" AND "), and_multiple[1]]; } else { throw Error( "Invalid filter condition. Expected a dictionary but got an empty dictionary" ); } } else { throw new Error("Filter condition contains no entries."); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/analyticdb.ts
import * as uuid from "uuid"; import pg, { Pool, PoolConfig } from "pg"; import { from as copyFrom } from "pg-copy-streams"; import { pipeline } from "node:stream/promises"; import { Readable } from "node:stream"; import { VectorStore } from "@langchain/core/vectorstores"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { Document } from "@langchain/core/documents"; const _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain_document"; /** * Interface defining the arguments required to create an instance of * `AnalyticDBVectorStore`. */ export interface AnalyticDBArgs { connectionOptions: PoolConfig; embeddingDimension?: number; collectionName?: string; preDeleteCollection?: boolean; } /** * Interface defining the structure of data to be stored in the * AnalyticDB. */ interface DataType { id: string; embedding: number[]; document: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any metadata: Record<string, any>; } /** * Class that provides methods for creating and managing a collection of * documents in an AnalyticDB, adding documents or vectors to the * collection, performing similarity search on vectors, and creating an * instance of `AnalyticDBVectorStore` from texts or documents. */ export class AnalyticDBVectorStore extends VectorStore { // eslint-disable-next-line @typescript-eslint/no-explicit-any declare FilterType: Record<string, any>; private pool: Pool; private embeddingDimension?: number; private collectionName: string; private preDeleteCollection: boolean; private isCreateCollection = false; _vectorstoreType(): string { return "analyticdb"; } constructor(embeddings: EmbeddingsInterface, args: AnalyticDBArgs) { super(embeddings, args); this.pool = new pg.Pool({ host: args.connectionOptions.host, port: args.connectionOptions.port, database: args.connectionOptions.database, user: args.connectionOptions.user, password: args.connectionOptions.password, }); this.embeddingDimension = args.embeddingDimension; this.collectionName = args.collectionName || _LANGCHAIN_DEFAULT_COLLECTION_NAME; this.preDeleteCollection = args.preDeleteCollection || false; } /** * Closes all the clients in the pool and terminates the pool. * @returns Promise that resolves when all clients are closed and the pool is terminated. */ async end(): Promise<void> { return this.pool.end(); } /** * Creates a new table in the database if it does not already exist. The * table is created with columns for id, embedding, document, and * metadata. An index is also created on the embedding column if it does * not already exist. * @returns Promise that resolves when the table and index are created. */ async createTableIfNotExists(): Promise<void> { if (!this.embeddingDimension) { this.embeddingDimension = ( await this.embeddings.embedQuery("test") ).length; } const client = await this.pool.connect(); try { await client.query("BEGIN"); // Create the table if it doesn't exist await client.query(` CREATE TABLE IF NOT EXISTS ${this.collectionName} ( id TEXT PRIMARY KEY DEFAULT NULL, embedding REAL[], document TEXT, metadata JSON ); `); // Check if the index exists const indexName = `${this.collectionName}_embedding_idx`; const indexQuery = ` SELECT 1 FROM pg_indexes WHERE indexname = '${indexName}'; `; const result = await client.query(indexQuery); // Create the index if it doesn't exist if (result.rowCount === 0) { const indexStatement = ` CREATE INDEX ${indexName} ON ${this.collectionName} USING ann(embedding) WITH ( "dim" = ${this.embeddingDimension}, "hnsw_m" = 100 ); `; await client.query(indexStatement); } await client.query("COMMIT"); } catch (err) { await client.query("ROLLBACK"); throw err; } finally { client.release(); } } /** * Deletes the collection from the database if it exists. * @returns Promise that resolves when the collection is deleted. */ async deleteCollection(): Promise<void> { const dropStatement = `DROP TABLE IF EXISTS ${this.collectionName};`; await this.pool.query(dropStatement); } /** * Creates a new collection in the database. If `preDeleteCollection` is * true, any existing collection with the same name is deleted before the * new collection is created. * @returns Promise that resolves when the collection is created. */ async createCollection(): Promise<void> { if (this.preDeleteCollection) { await this.deleteCollection(); } await this.createTableIfNotExists(); this.isCreateCollection = true; } /** * Adds an array of documents to the collection. The documents are first * converted to vectors using the `embedDocuments` method of the * `embeddings` instance. * @param documents Array of Document instances to be added to the collection. * @returns Promise that resolves when the documents are added. */ async addDocuments(documents: Document[]): Promise<void> { // When the pageContent is empty in certain scenarios (such as when using unstructuredIo), an error occurs during embedding. const filteredDocs = documents.filter((doc) => doc.pageContent); if (filteredDocs.length !== documents.length) { console.warn( `[AnalyticDB]: Filtered out ${ documents.length - filteredDocs.length } empty documents.` ); } const texts = filteredDocs.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), filteredDocs ); } /** * Adds an array of vectors and corresponding documents to the collection. * The vectors and documents are batch inserted into the database. * @param vectors Array of vectors to be added to the collection. * @param documents Array of Document instances corresponding to the vectors. * @returns Promise that resolves when the vectors and documents are added. */ async addVectors(vectors: number[][], documents: Document[]): Promise<void> { if (vectors.length === 0) { return; } if (vectors.length !== documents.length) { throw new Error(`Vectors and documents must have the same length`); } if (!this.embeddingDimension) { this.embeddingDimension = ( await this.embeddings.embedQuery("test") ).length; } if (vectors[0].length !== this.embeddingDimension) { throw new Error( `Vectors must have the same length as the number of dimensions (${this.embeddingDimension})` ); } if (!this.isCreateCollection) { await this.createCollection(); } const client = await this.pool.connect(); try { const chunkSize = 500; const chunksTableData: DataType[] = []; for (let i = 0; i < documents.length; i += 1) { chunksTableData.push({ id: uuid.v4(), embedding: vectors[i], document: documents[i].pageContent, metadata: documents[i].metadata, }); // Execute the batch insert when the batch size is reached if (chunksTableData.length === chunkSize) { const rs = new Readable(); let currentIndex = 0; rs._read = function () { if (currentIndex === chunkSize) { rs.push(null); } else { const data = chunksTableData[currentIndex]; rs.push( `${data.id}\t{${data.embedding.join(",")}}\t${ data.document }\t${JSON.stringify(data.metadata)}\n` ); currentIndex += 1; } }; const ws = client.query( copyFrom( `COPY ${this.collectionName}(id, embedding, document, metadata) FROM STDIN` ) ); await pipeline(rs, ws); // Clear the chunksTableData list for the next batch chunksTableData.length = 0; } } // Insert any remaining records that didn't make up a full batch if (chunksTableData.length > 0) { const rs = new Readable(); let currentIndex = 0; rs._read = function () { if (currentIndex === chunksTableData.length) { rs.push(null); } else { const data = chunksTableData[currentIndex]; rs.push( `${data.id}\t{${data.embedding.join(",")}}\t${ data.document }\t${JSON.stringify(data.metadata)}\n` ); currentIndex += 1; } }; const ws = client.query( copyFrom( `COPY ${this.collectionName}(id, embedding, document, metadata) FROM STDIN` ) ); await pipeline(rs, ws); } } finally { client.release(); } } /** * Performs a similarity search on the vectors in the collection. The * search is performed using the given query vector and returns the top k * most similar vectors along with their corresponding documents and * similarity scores. * @param query Query vector for the similarity search. * @param k Number of top similar vectors to return. * @param filter Optional. Filter to apply on the metadata of the documents. * @returns Promise that resolves to an array of tuples, each containing a Document instance and its similarity score. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[Document, number][]> { if (!this.isCreateCollection) { await this.createCollection(); } let filterCondition = ""; const filterEntries = filter ? Object.entries(filter) : []; if (filterEntries.length > 0) { const conditions = filterEntries.map( (_, index) => `metadata->>$${2 * index + 3} = $${2 * index + 4}` ); filterCondition = `WHERE ${conditions.join(" AND ")}`; } const sqlQuery = ` SELECT *, l2_distance(embedding, $1::real[]) AS distance FROM ${this.collectionName} ${filterCondition} ORDER BY embedding <-> $1 LIMIT $2; `; // Execute the query and fetch the results const { rows } = await this.pool.query(sqlQuery, [ query, k, ...filterEntries.flatMap(([key, value]) => [key, value]), ]); const result: [Document, number][] = rows.map((row) => [ new Document({ pageContent: row.document, metadata: row.metadata }), row.distance, ]); return result; } /** * Creates an instance of `AnalyticDBVectorStore` from an array of texts * and corresponding metadata. The texts are first converted to Document * instances before being added to the collection. * @param texts Array of texts to be added to the collection. * @param metadatas Array or object of metadata corresponding to the texts. * @param embeddings Embeddings instance used to convert the texts to vectors. * @param dbConfig Configuration for the AnalyticDB. * @returns Promise that resolves to an instance of `AnalyticDBVectorStore`. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: AnalyticDBArgs ): Promise<AnalyticDBVectorStore> { const docs = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return AnalyticDBVectorStore.fromDocuments(docs, embeddings, dbConfig); } /** * Creates an instance of `AnalyticDBVectorStore` from an array of * Document instances. The documents are added to the collection. * @param docs Array of Document instances to be added to the collection. * @param embeddings Embeddings instance used to convert the documents to vectors. * @param dbConfig Configuration for the AnalyticDB. * @returns Promise that resolves to an instance of `AnalyticDBVectorStore`. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: AnalyticDBArgs ): Promise<AnalyticDBVectorStore> { const instance = new this(embeddings, dbConfig); await instance.addDocuments(docs); return instance; } /** * Creates an instance of `AnalyticDBVectorStore` from an existing index * in the database. A new collection is created in the database. * @param embeddings Embeddings instance used to convert the documents to vectors. * @param dbConfig Configuration for the AnalyticDB. * @returns Promise that resolves to an instance of `AnalyticDBVectorStore`. */ static async fromExistingIndex( embeddings: EmbeddingsInterface, dbConfig: AnalyticDBArgs ): Promise<AnalyticDBVectorStore> { const instance = new this(embeddings, dbConfig); await instance.createCollection(); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/rockset.ts
import { MainApi } from "@rockset/client"; import type { CreateCollectionRequest } from "@rockset/client/dist/codegen/api.d.ts"; import { Collection } from "@rockset/client/dist/codegen/api.js"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; /** * Generic Rockset vector storage error */ export class RocksetStoreError extends Error { /** * Constructs a RocksetStoreError * @param message The error message */ constructor(message: string) { super(message); this.name = this.constructor.name; } } /** * Error that is thrown when a RocksetStore function is called * after `destroy()` is called (meaning the collection would be * deleted). */ export class RocksetStoreDestroyedError extends RocksetStoreError { constructor() { super("The Rockset store has been destroyed"); this.name = this.constructor.name; } } /** * Functions to measure vector distance/similarity by. * See https://rockset.com/docs/vector-functions/#vector-distance-functions * @enum SimilarityMetric */ export const SimilarityMetric = { CosineSimilarity: "COSINE_SIM", EuclideanDistance: "EUCLIDEAN_DIST", DotProduct: "DOT_PRODUCT", } as const; export type SimilarityMetric = (typeof SimilarityMetric)[keyof typeof SimilarityMetric]; interface CollectionNotFoundError { message_key: string; } /** * Vector store arguments * @interface RocksetStore */ export interface RocksetLibArgs { /** * The rockset client object constructed with `rocksetConfigure` * @type {MainAPI} */ client: MainApi; /** * The name of the Rockset collection to store vectors * @type {string} */ collectionName: string; /** * The name of othe Rockset workspace that holds @member collectionName * @type {string} */ workspaceName?: string; /** * The name of the collection column to contain page contnent of documents * @type {string} */ textKey?: string; /** * The name of the collection column to contain vectors * @type {string} */ embeddingKey?: string; /** * The SQL `WHERE` clause to filter by * @type {string} */ filter?: string; /** * The metric used to measure vector relationship * @type {SimilarityMetric} */ similarityMetric?: SimilarityMetric; } /** * Exposes Rockset's vector store/search functionality */ export class RocksetStore extends VectorStore { declare FilterType: string; client: MainApi; collectionName: string; workspaceName: string; textKey: string; embeddingKey: string; filter?: string; private _similarityMetric: SimilarityMetric; private similarityOrder: "ASC" | "DESC"; private destroyed: boolean; /** * Gets a string representation of the type of this VectorStore * @returns {"rockset"} */ _vectorstoreType(): "rockset" { return "rockset"; } /** * Constructs a new RocksetStore * @param {Embeddings} embeddings Object used to embed queries and * page content * @param {RocksetLibArgs} args */ constructor(embeddings: EmbeddingsInterface, args: RocksetLibArgs) { super(embeddings, args); this.embeddings = embeddings; this.client = args.client; this.collectionName = args.collectionName; this.workspaceName = args.workspaceName ?? "commons"; this.textKey = args.textKey ?? "text"; this.embeddingKey = args.embeddingKey ?? "embedding"; this.filter = args.filter; this.similarityMetric = args.similarityMetric ?? SimilarityMetric.CosineSimilarity; this.setSimilarityOrder(); } /** * Sets the object's similarity order based on what * SimilarityMetric is being used */ private setSimilarityOrder() { this.checkIfDestroyed(); this.similarityOrder = this.similarityMetric === SimilarityMetric.EuclideanDistance ? "ASC" : "DESC"; } /** * Embeds and adds Documents to the store. * @param {Documents[]} documents The documents to store * @returns {Promise<string[]?>} The _id's of the documents added */ async addDocuments(documents: Document[]): Promise<string[] | undefined> { const texts = documents.map(({ pageContent }) => pageContent); return await this.addVectors( await this.embeddings.embedDocuments(texts), documents ); } /** * Adds vectors to the store given their corresponding Documents * @param {number[][]} vectors The vectors to store * @param {Document[]} documents The Documents they represent * @return {Promise<string[]?>} The _id's of the added documents */ async addVectors(vectors: number[][], documents: Document[]) { this.checkIfDestroyed(); const rocksetDocs = []; for (let i = 0; i < documents.length; i += 1) { const currDoc = documents[i]; const currVector = vectors[i]; rocksetDocs.push({ [this.textKey]: currDoc.pageContent, [this.embeddingKey]: currVector, ...currDoc.metadata, }); } return ( await this.client.documents.addDocuments( this.workspaceName, this.collectionName, { data: rocksetDocs, } ) ).data?.map((docStatus) => docStatus._id || ""); } /** * Deletes Rockset documements given their _id's * @param {string[]} ids The IDS to remove documents with */ async delete(ids: string[]): Promise<void> { this.checkIfDestroyed(); await this.client.documents.deleteDocuments( this.workspaceName, this.collectionName, { data: ids.map((id) => ({ _id: id })), } ); } /** * Gets the most relevant documents to a query along * with their similarity score. The returned documents * are ordered by similarity (most similar at the first * index) * @param {number[]} query The embedded query to search * the store by * @param {number} k The number of documents to retreive * @param {string?} filter The SQL `WHERE` clause to filter by */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: string ): Promise<[Document, number][]> { this.checkIfDestroyed(); if (filter && this.filter) { throw new RocksetStoreError( "cannot provide both `filter` and `this.filter`" ); } const similarityKey = "similarity"; const _filter = filter ?? this.filter; return ( ( await this.client.queries.query({ sql: { query: ` SELECT * EXCEPT("${this.embeddingKey}"), "${this.textKey}", ${this.similarityMetric}(:query, "${ this.embeddingKey }") AS "${similarityKey}" FROM "${this.workspaceName}"."${this.collectionName}" ${_filter ? `WHERE ${_filter}` : ""} ORDER BY "${similarityKey}" ${this.similarityOrder} LIMIT ${k} `, parameters: [ { name: "query", type: "", value: `[${query.toString()}]`, }, ], }, }) ).results?.map((rocksetDoc) => [ new Document<Record<string, object>>({ pageContent: rocksetDoc[this.textKey], metadata: (({ [this.textKey]: t, [similarityKey]: s, ...rocksetDoc }) => rocksetDoc)(rocksetDoc), }), rocksetDoc[similarityKey] as number, ]) ?? [] ); } /** * Constructs and returns a RocksetStore object given texts to store. * @param {string[]} texts The texts to store * @param {object[] | object} metadatas The metadatas that correspond * to @param texts * @param {Embeddings} embeddings The object used to embed queries * and page content * @param {RocksetLibArgs} dbConfig The options to be passed into the * RocksetStore constructor * @returns {RocksetStore} */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: RocksetLibArgs ): Promise<RocksetStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return RocksetStore.fromDocuments(docs, embeddings, dbConfig); } /** * Constructs, adds docs to, and returns a RocksetStore object * @param {Document[]} docs The Documents to store * @param {Embeddings} embeddings The object used to embed queries * and page content * @param {RocksetLibArgs} dbConfig The options to be passed into the * RocksetStore constructor * @returns {RocksetStore} */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: RocksetLibArgs ): Promise<RocksetStore> { const args = { ...dbConfig, textKey: dbConfig.textKey ?? "text" }; const instance = new this(embeddings, args); await instance.addDocuments(docs); return instance; } /** * Checks if a Rockset collection exists. * @param {RocksetLibArgs} dbConfig The object containing the collection * and workspace names * @return {boolean} whether the collection exists */ private static async collectionExists(dbConfig: RocksetLibArgs) { try { await dbConfig.client.collections.getCollection( dbConfig.workspaceName ?? "commons", dbConfig.collectionName ); } catch (err) { if ( (err as CollectionNotFoundError).message_key === "COLLECTION_DOES_NOT_EXIST" ) { return false; } throw err; } return true; } /** * Checks whether a Rockset collection is ready to be queried. * @param {RocksetLibArgs} dbConfig The object containing the collection * name and workspace * @return {boolean} whether the collection is ready */ private static async collectionReady(dbConfig: RocksetLibArgs) { return ( ( await dbConfig.client.collections.getCollection( dbConfig.workspaceName ?? "commons", dbConfig.collectionName ) ).data?.status === Collection.StatusEnum.READY ); } /** * Deletes the collection this RocksetStore uses * @param {boolean?} waitUntilDeletion Whether to sleep until the * collection is ready to be * queried */ async destroy(waitUntilDeletion?: boolean) { await this.client.collections.deleteCollection( this.workspaceName, this.collectionName ); this.destroyed = true; if (waitUntilDeletion) { while ( await RocksetStore.collectionExists({ collectionName: this.collectionName, client: this.client, }) ); } } /** * Checks if this RocksetStore has been destroyed. * @throws {RocksetStoreDestroyederror} if it has. */ private checkIfDestroyed() { if (this.destroyed) { throw new RocksetStoreDestroyedError(); } } /** * Creates a new Rockset collection and returns a RocksetStore that * uses it * @param {Embeddings} embeddings Object used to embed queries and * page content * @param {RocksetLibArgs} dbConfig The options to be passed into the * RocksetStore constructor * @param {CreateCollectionRequest?} collectionOptions The arguments to sent with the * HTTP request when creating the * collection. Setting a field mapping * that `VECTOR_ENFORCE`s is recommended * when using this function. See * https://rockset.com/docs/vector-functions/#vector_enforce * @returns {RocsketStore} */ static async withNewCollection( embeddings: EmbeddingsInterface, dbConfig: RocksetLibArgs, collectionOptions?: CreateCollectionRequest ): Promise<RocksetStore> { if ( collectionOptions?.name && dbConfig.collectionName !== collectionOptions?.name ) { throw new RocksetStoreError( "`dbConfig.name` and `collectionOptions.name` do not match" ); } await dbConfig.client.collections.createCollection( dbConfig.workspaceName ?? "commons", collectionOptions || { name: dbConfig.collectionName } ); while ( !(await this.collectionExists(dbConfig)) || !(await this.collectionReady(dbConfig)) ); return new this(embeddings, dbConfig); } public get similarityMetric() { return this._similarityMetric; } public set similarityMetric(metric: SimilarityMetric) { this._similarityMetric = metric; this.setSimilarityOrder(); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/hnswlib.ts
import type { HierarchicalNSW as HierarchicalNSWT, SpaceName, } from "hnswlib-node"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { SaveableVectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; import { SynchronousInMemoryDocstore } from "../stores/doc/in_memory.js"; /** * Interface for the base configuration of HNSWLib. It includes the space * name and the number of dimensions. */ export interface HNSWLibBase { space: SpaceName; numDimensions?: number; } /** * Interface for the arguments that can be passed to the HNSWLib * constructor. It extends HNSWLibBase and includes properties for the * document store and HNSW index. */ export interface HNSWLibArgs extends HNSWLibBase { docstore?: SynchronousInMemoryDocstore; index?: HierarchicalNSWT; } /** * Class that implements a vector store using Hierarchical Navigable Small * World (HNSW) graphs. It extends the SaveableVectorStore class and * provides methods for adding documents and vectors, performing * similarity searches, and saving and loading the vector store. */ export class HNSWLib extends SaveableVectorStore { declare FilterType: (doc: Document) => boolean; _index?: HierarchicalNSWT; docstore: SynchronousInMemoryDocstore; args: HNSWLibBase; _vectorstoreType(): string { return "hnswlib"; } constructor(embeddings: EmbeddingsInterface, args: HNSWLibArgs) { super(embeddings, args); this._index = args.index; this.args = args; this.embeddings = embeddings; this.docstore = args?.docstore ?? new SynchronousInMemoryDocstore(); } /** * Method to add documents to the vector store. It first converts the * documents to vectors using the embeddings, then adds the vectors to the * vector store. * @param documents The documents to be added to the vector store. * @returns A Promise that resolves when the documents have been added. */ async addDocuments(documents: Document[]): Promise<void> { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents ); } private static async getHierarchicalNSW(args: HNSWLibBase) { const { HierarchicalNSW } = await HNSWLib.imports(); if (!args.space) { throw new Error("hnswlib-node requires a space argument"); } if (args.numDimensions === undefined) { throw new Error("hnswlib-node requires a numDimensions argument"); } return new HierarchicalNSW(args.space, args.numDimensions); } private async initIndex(vectors: number[][]) { if (!this._index) { if (this.args.numDimensions === undefined) { this.args.numDimensions = vectors[0].length; } this.index = await HNSWLib.getHierarchicalNSW(this.args); } if (!this.index.getCurrentCount()) { this.index.initIndex(vectors.length); } } public get index(): HierarchicalNSWT { if (!this._index) { throw new Error( "Vector store not initialised yet. Try calling `addTexts` first." ); } return this._index; } private set index(index: HierarchicalNSWT) { this._index = index; } /** * Method to add vectors to the vector store. It first initializes the * index if it hasn't been initialized yet, then adds the vectors to the * index and the documents to the document store. * @param vectors The vectors to be added to the vector store. * @param documents The documents corresponding to the vectors. * @returns A Promise that resolves when the vectors and documents have been added. */ async addVectors(vectors: number[][], documents: Document[]) { if (vectors.length === 0) { return; } await this.initIndex(vectors); // TODO here we could optionally normalise the vectors to unit length // so that dot product is equivalent to cosine similarity, like this // https://github.com/nmslib/hnswlib/issues/384#issuecomment-1155737730 // While we only support OpenAI embeddings this isn't necessary if (vectors.length !== documents.length) { throw new Error(`Vectors and metadatas must have the same length`); } if (vectors[0].length !== this.args.numDimensions) { throw new Error( `Vectors must have the same length as the number of dimensions (${this.args.numDimensions})` ); } const capacity = this.index.getMaxElements(); const needed = this.index.getCurrentCount() + vectors.length; if (needed > capacity) { this.index.resizeIndex(needed); } const docstoreSize = this.index.getCurrentCount(); const toSave: Record<string, Document> = {}; for (let i = 0; i < vectors.length; i += 1) { this.index.addPoint(vectors[i], docstoreSize + i); toSave[docstoreSize + i] = documents[i]; } this.docstore.add(toSave); } /** * Method to perform a similarity search in the vector store using a query * vector. It returns the k most similar documents along with their * similarity scores. An optional filter function can be provided to * filter the documents. * @param query The query vector. * @param k The number of most similar documents to return. * @param filter An optional filter function to filter the documents. * @returns A Promise that resolves to an array of tuples, where each tuple contains a document and its similarity score. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ) { if (this.args.numDimensions && !this._index) { await this.initIndex([[]]); } if (query.length !== this.args.numDimensions) { throw new Error( `Query vector must have the same length as the number of dimensions (${this.args.numDimensions})` ); } if (k > this.index.getCurrentCount()) { const total = this.index.getCurrentCount(); console.warn( `k (${k}) is greater than the number of elements in the index (${total}), setting k to ${total}` ); // eslint-disable-next-line no-param-reassign k = total; } const filterFunction = (label: number): boolean => { if (!filter) { return true; } const document = this.docstore.search(String(label)); // eslint-disable-next-line no-instanceof/no-instanceof if (typeof document !== "string") { return filter(document); } return false; }; const result = this.index.searchKnn( query, k, filter ? filterFunction : undefined ); return result.neighbors.map( (docIndex, resultIndex) => [ this.docstore.search(String(docIndex)), result.distances[resultIndex], ] as [Document, number] ); } /** * Method to delete the vector store from a directory. It deletes the * hnswlib.index file, the docstore.json file, and the args.json file from * the directory. * @param params An object with a directory property that specifies the directory from which to delete the vector store. * @returns A Promise that resolves when the vector store has been deleted. */ async delete(params: { directory: string }) { const fs = await import("node:fs/promises"); const path = await import("node:path"); try { await fs.access(path.join(params.directory, "hnswlib.index")); } catch (err) { throw new Error( `Directory ${params.directory} does not contain a hnswlib.index file.` ); } await Promise.all([ await fs.rm(path.join(params.directory, "hnswlib.index"), { force: true, }), await fs.rm(path.join(params.directory, "docstore.json"), { force: true, }), await fs.rm(path.join(params.directory, "args.json"), { force: true }), ]); } /** * Method to save the vector store to a directory. It saves the HNSW * index, the arguments, and the document store to the directory. * @param directory The directory to which to save the vector store. * @returns A Promise that resolves when the vector store has been saved. */ async save(directory: string) { const fs = await import("node:fs/promises"); const path = await import("node:path"); await fs.mkdir(directory, { recursive: true }); await Promise.all([ this.index.writeIndex(path.join(directory, "hnswlib.index")), await fs.writeFile( path.join(directory, "args.json"), JSON.stringify(this.args) ), await fs.writeFile( path.join(directory, "docstore.json"), JSON.stringify(Array.from(this.docstore._docs.entries())) ), ]); } /** * Static method to load a vector store from a directory. It reads the * HNSW index, the arguments, and the document store from the directory, * then creates a new HNSWLib instance with these values. * @param directory The directory from which to load the vector store. * @param embeddings The embeddings to be used by the HNSWLib instance. * @returns A Promise that resolves to a new HNSWLib instance. */ static async load(directory: string, embeddings: EmbeddingsInterface) { const fs = await import("node:fs/promises"); const path = await import("node:path"); const args = JSON.parse( await fs.readFile(path.join(directory, "args.json"), "utf8") ); const index = await HNSWLib.getHierarchicalNSW(args); const [docstoreFiles] = await Promise.all([ fs .readFile(path.join(directory, "docstore.json"), "utf8") .then(JSON.parse), index.readIndex(path.join(directory, "hnswlib.index")), ]); args.docstore = new SynchronousInMemoryDocstore(new Map(docstoreFiles)); args.index = index; return new HNSWLib(embeddings, args); } /** * Static method to create a new HNSWLib instance from texts and metadata. * It creates a new Document instance for each text and metadata, then * calls the fromDocuments method to create the HNSWLib instance. * @param texts The texts to be used to create the documents. * @param metadatas The metadata to be used to create the documents. * @param embeddings The embeddings to be used by the HNSWLib instance. * @param dbConfig An optional configuration object for the document store. * @returns A Promise that resolves to a new HNSWLib instance. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig?: { docstore?: SynchronousInMemoryDocstore; } ): Promise<HNSWLib> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return HNSWLib.fromDocuments(docs, embeddings, dbConfig); } /** * Static method to create a new HNSWLib instance from documents. It * creates a new HNSWLib instance, adds the documents to it, then returns * the instance. * @param docs The documents to be added to the HNSWLib instance. * @param embeddings The embeddings to be used by the HNSWLib instance. * @param dbConfig An optional configuration object for the document store. * @returns A Promise that resolves to a new HNSWLib instance. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig?: { docstore?: SynchronousInMemoryDocstore; } ): Promise<HNSWLib> { const args: HNSWLibArgs = { docstore: dbConfig?.docstore, space: "cosine", }; const instance = new this(embeddings, args); await instance.addDocuments(docs); return instance; } static async imports(): Promise<{ HierarchicalNSW: typeof HierarchicalNSWT; }> { try { const { default: { HierarchicalNSW }, } = await import("hnswlib-node"); return { HierarchicalNSW }; // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (err: any) { throw new Error( `Could not import hnswlib-node. Please install hnswlib-node as a dependency with, e.g. \`npm install -S hnswlib-node\`.\n\nError: ${err?.message}` ); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/supabase.ts
import type { SupabaseClient } from "@supabase/supabase-js"; import type { PostgrestFilterBuilder } from "@supabase/postgrest-js"; import { MaxMarginalRelevanceSearchOptions, VectorStore, } from "@langchain/core/vectorstores"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { Document } from "@langchain/core/documents"; import { maximalMarginalRelevance } from "@langchain/core/utils/math"; /** * Interface for the parameters required for searching embeddings. */ interface SearchEmbeddingsParams { query_embedding: number[]; match_count: number; // int filter?: SupabaseMetadata | SupabaseFilterRPCCall; } // eslint-disable-next-line @typescript-eslint/ban-types, @typescript-eslint/no-explicit-any export type SupabaseMetadata = Record<string, any>; // eslint-disable-next-line @typescript-eslint/ban-types, @typescript-eslint/no-explicit-any export type SupabaseFilter = PostgrestFilterBuilder<any, any, any>; export type SupabaseFilterRPCCall = (rpcCall: SupabaseFilter) => SupabaseFilter; /** * Interface for the response returned when searching embeddings. */ interface SearchEmbeddingsResponse { id: number; content: string; metadata: object; embedding: number[]; similarity: number; } /** * Interface for the arguments required to initialize a Supabase library. */ export interface SupabaseLibArgs { client: SupabaseClient; tableName?: string; queryName?: string; filter?: SupabaseMetadata | SupabaseFilterRPCCall; upsertBatchSize?: number; } /** * Supabase vector store integration. * * Setup: * Install `@langchain/community` and `@supabase/supabase-js`. * * ```bash * npm install @langchain/community @supabase/supabase-js * ``` * * See https://js.langchain.com/docs/integrations/vectorstores/supabase for * instructions on how to set up your Supabase instance. * * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_community.vectorstores_supabase.SupabaseVectorStore.html#constructor) * * <details open> * <summary><strong>Instantiate</strong></summary> * * ```typescript * import { SupabaseVectorStore } from "@langchain/community/vectorstores/supabase"; * import { OpenAIEmbeddings } from "@langchain/openai"; * * import { createClient } from "@supabase/supabase-js"; * * const embeddings = new OpenAIEmbeddings({ * model: "text-embedding-3-small", * }); * * const supabaseClient = createClient( * process.env.SUPABASE_URL, * process.env.SUPABASE_PRIVATE_KEY * ); * * const vectorStore = new SupabaseVectorStore(embeddings, { * client: supabaseClient, * tableName: "documents", * queryName: "match_documents", * }); * ``` * </details> * * <br /> * * <details> * <summary><strong>Add documents</strong></summary> * * ```typescript * import type { Document } from '@langchain/core/documents'; * * const document1 = { pageContent: "foo", metadata: { baz: "bar" } }; * const document2 = { pageContent: "thud", metadata: { bar: "baz" } }; * const document3 = { pageContent: "i will be deleted :(", metadata: {} }; * * const documents: Document[] = [document1, document2, document3]; * const ids = ["1", "2", "3"]; * await vectorStore.addDocuments(documents, { ids }); * ``` * </details> * * <br /> * * <details> * <summary><strong>Delete documents</strong></summary> * * ```typescript * await vectorStore.delete({ ids: ["3"] }); * ``` * </details> * * <br /> * * <details> * <summary><strong>Similarity search</strong></summary> * * ```typescript * const results = await vectorStore.similaritySearch("thud", 1); * for (const doc of results) { * console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`); * } * // Output: * thud [{"baz":"bar"}] * ``` * </details> * * <br /> * * * <details> * <summary><strong>Similarity search with filter</strong></summary> * * ```typescript * const resultsWithFilter = await vectorStore.similaritySearch("thud", 1, { baz: "bar" }); * * for (const doc of resultsWithFilter) { * console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`); * } * // Output: * foo [{"baz":"bar"}] * ``` * </details> * * <br /> * * * <details> * <summary><strong>Similarity search with score</strong></summary> * * ```typescript * const resultsWithScore = await vectorStore.similaritySearchWithScore("qux", 1); * for (const [doc, score] of resultsWithScore) { * console.log(`* [SIM=${score.toFixed(6)}] ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`); * } * // Output: * [SIM=0.000000] qux [{"bar":"baz","baz":"bar"}] * ``` * </details> * * <br /> * * <details> * <summary><strong>As a retriever</strong></summary> * * ```typescript * const retriever = vectorStore.asRetriever({ * searchType: "mmr", // Leave blank for standard similarity search * k: 1, * }); * const resultAsRetriever = await retriever.invoke("thud"); * console.log(resultAsRetriever); * * // Output: [Document({ metadata: { "baz":"bar" }, pageContent: "thud" })] * ``` * </details> * * <br /> */ export class SupabaseVectorStore extends VectorStore { declare FilterType: SupabaseMetadata | SupabaseFilterRPCCall; client: SupabaseClient; tableName: string; queryName: string; filter?: SupabaseMetadata | SupabaseFilterRPCCall; upsertBatchSize = 500; _vectorstoreType(): string { return "supabase"; } constructor(embeddings: EmbeddingsInterface, args: SupabaseLibArgs) { super(embeddings, args); this.client = args.client; this.tableName = args.tableName || "documents"; this.queryName = args.queryName || "match_documents"; this.filter = args.filter; this.upsertBatchSize = args.upsertBatchSize ?? this.upsertBatchSize; } /** * Adds documents to the vector store. * @param documents The documents to add. * @param options Optional parameters for adding the documents. * @returns A promise that resolves when the documents have been added. */ async addDocuments( documents: Document[], options?: { ids?: string[] | number[] } ) { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents, options ); } /** * Adds vectors to the vector store. * @param vectors The vectors to add. * @param documents The documents associated with the vectors. * @param options Optional parameters for adding the vectors. * @returns A promise that resolves with the IDs of the added vectors when the vectors have been added. */ async addVectors( vectors: number[][], documents: Document[], options?: { ids?: string[] | number[] } ) { const rows = vectors.map((embedding, idx) => ({ content: documents[idx].pageContent, embedding, metadata: documents[idx].metadata, })); // upsert returns 500/502/504 (yes really any of them) if given too many rows/characters // ~2000 trips it, but my data is probably smaller than average pageContent and metadata let returnedIds: string[] = []; for (let i = 0; i < rows.length; i += this.upsertBatchSize) { const chunk = rows.slice(i, i + this.upsertBatchSize).map((row, j) => { if (options?.ids) { return { id: options.ids[i + j], ...row }; } return row; }); const res = await this.client.from(this.tableName).upsert(chunk).select(); if (res.error) { throw new Error( `Error inserting: ${res.error.message} ${res.status} ${res.statusText}` ); } if (res.data) { returnedIds = returnedIds.concat(res.data.map((row) => row.id)); } } return returnedIds; } /** * Deletes vectors from the vector store. * @param params The parameters for deleting vectors. * @returns A promise that resolves when the vectors have been deleted. */ async delete(params: { ids: string[] | number[] }): Promise<void> { const { ids } = params; for (const id of ids) { await this.client.from(this.tableName).delete().eq("id", id); } } protected async _searchSupabase( query: number[], k: number, filter?: this["FilterType"] ): Promise<SearchEmbeddingsResponse[]> { if (filter && this.filter) { throw new Error("cannot provide both `filter` and `this.filter`"); } const _filter = filter ?? this.filter ?? {}; const matchDocumentsParams: Partial<SearchEmbeddingsParams> = { query_embedding: query, }; let filterFunction: SupabaseFilterRPCCall; if (typeof _filter === "function") { filterFunction = (rpcCall) => _filter(rpcCall).limit(k); } else if (typeof _filter === "object") { matchDocumentsParams.filter = _filter; matchDocumentsParams.match_count = k; filterFunction = (rpcCall) => rpcCall; } else { throw new Error("invalid filter type"); } const rpcCall = this.client.rpc(this.queryName, matchDocumentsParams); const { data: searches, error } = await filterFunction(rpcCall); if (error) { throw new Error( `Error searching for documents: ${error.code} ${error.message} ${error.details}` ); } return searches; } /** * Performs a similarity search on the vector store. * @param query The query vector. * @param k The number of results to return. * @param filter Optional filter to apply to the search. * @returns A promise that resolves with the search results when the search is complete. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[Document, number][]> { const searches = await this._searchSupabase(query, k, filter); const result: [Document, number][] = searches.map((resp) => [ new Document({ metadata: resp.metadata, pageContent: resp.content, }), resp.similarity, ]); return result; } /** * Return documents selected using the maximal marginal relevance. * Maximal marginal relevance optimizes for similarity to the query AND diversity * among selected documents. * * @param {string} query - Text to look up documents similar to. * @param {number} options.k - Number of documents to return. * @param {number} options.fetchK=20- Number of documents to fetch before passing to the MMR algorithm. * @param {number} options.lambda=0.5 - Number between 0 and 1 that determines the degree of diversity among the results, * where 0 corresponds to maximum diversity and 1 to minimum diversity. * @param {SupabaseLibArgs} options.filter - Optional filter to apply to the search. * * @returns {Promise<Document[]>} - List of documents selected by maximal marginal relevance. */ async maxMarginalRelevanceSearch( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]> ): Promise<Document[]> { const queryEmbedding = await this.embeddings.embedQuery(query); const searches = await this._searchSupabase( queryEmbedding, options.fetchK ?? 20, options.filter ); const embeddingList = searches.map((searchResp) => searchResp.embedding); const mmrIndexes = maximalMarginalRelevance( queryEmbedding, embeddingList, options.lambda, options.k ); return mmrIndexes.map( (idx) => new Document({ metadata: searches[idx].metadata, pageContent: searches[idx].content, }) ); } /** * Creates a new SupabaseVectorStore instance from an array of texts. * @param texts The texts to create documents from. * @param metadatas The metadata for the documents. * @param embeddings The embeddings to use. * @param dbConfig The configuration for the Supabase database. * @returns A promise that resolves with a new SupabaseVectorStore instance when the instance has been created. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: SupabaseLibArgs ): Promise<SupabaseVectorStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return SupabaseVectorStore.fromDocuments(docs, embeddings, dbConfig); } /** * Creates a new SupabaseVectorStore instance from an array of documents. * @param docs The documents to create the instance from. * @param embeddings The embeddings to use. * @param dbConfig The configuration for the Supabase database. * @returns A promise that resolves with a new SupabaseVectorStore instance when the instance has been created. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: SupabaseLibArgs ): Promise<SupabaseVectorStore> { const instance = new this(embeddings, dbConfig); await instance.addDocuments(docs); return instance; } /** * Creates a new SupabaseVectorStore instance from an existing index. * @param embeddings The embeddings to use. * @param dbConfig The configuration for the Supabase database. * @returns A promise that resolves with a new SupabaseVectorStore instance when the instance has been created. */ static async fromExistingIndex( embeddings: EmbeddingsInterface, dbConfig: SupabaseLibArgs ): Promise<SupabaseVectorStore> { const instance = new this(embeddings, dbConfig); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/turbopuffer.ts
import { v4 as uuidv4 } from "uuid"; import { type DocumentInterface, Document } from "@langchain/core/documents"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { AsyncCaller, AsyncCallerParams, } from "@langchain/core/utils/async_caller"; import { chunkArray } from "@langchain/core/utils/chunk_array"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { VectorStore } from "@langchain/core/vectorstores"; export type TurbopufferDistanceMetric = "cosine_distance" | "euclidean_squared"; export type TurbopufferFilterType = Record< string, Array<[string, string[] | string]> >; export interface TurbopufferParams extends AsyncCallerParams { apiKey?: string; namespace?: string; distanceMetric?: TurbopufferDistanceMetric; apiUrl?: string; batchSize?: number; } export interface TurbopufferQueryResult { dist: number; id: number; vector?: number[]; attributes: Record<string, string>; } export class TurbopufferVectorStore extends VectorStore { declare FilterType: TurbopufferFilterType; get lc_secrets(): { [key: string]: string } { return { apiKey: "TURBOPUFFER_API_KEY", }; } get lc_aliases(): { [key: string]: string } { return { apiKey: "TURBOPUFFER_API_KEY", }; } // Handle minification for tracing static lc_name(): string { return "TurbopufferVectorStore"; } protected distanceMetric: TurbopufferDistanceMetric = "cosine_distance"; protected apiKey: string; protected namespace = "default"; protected apiUrl = "https://api.turbopuffer.com/v1"; caller: AsyncCaller; batchSize = 3000; public _vectorstoreType(): string { return "turbopuffer"; } constructor(embeddings: EmbeddingsInterface, args: TurbopufferParams) { super(embeddings, args); const { apiKey: argsApiKey, namespace, distanceMetric, apiUrl, batchSize, ...asyncCallerArgs } = args; const apiKey = argsApiKey ?? getEnvironmentVariable("TURBOPUFFER_API_KEY"); if (!apiKey) { throw new Error( `Turbopuffer API key not found.\nPlease pass it in as "apiKey" or set it as an environment variable called "TURBOPUFFER_API_KEY"` ); } this.apiKey = apiKey; this.namespace = namespace ?? this.namespace; this.distanceMetric = distanceMetric ?? this.distanceMetric; this.apiUrl = apiUrl ?? this.apiUrl; this.batchSize = batchSize ?? this.batchSize; this.caller = new AsyncCaller({ maxConcurrency: 6, maxRetries: 0, ...asyncCallerArgs, }); } defaultHeaders() { return { Authorization: `Bearer ${this.apiKey}`, "Content-Type": "application/json", }; } async callWithRetry( fetchUrl: string, stringifiedBody: string | undefined, method = "POST" ) { const json = await this.caller.call(async () => { const headers: Record<string, string> = { Authorization: `Bearer ${this.apiKey}`, }; if (stringifiedBody !== undefined) { headers["Content-Type"] = "application/json"; } const response = await fetch(fetchUrl, { method, headers, body: stringifiedBody, }); if (response.status !== 200) { const error = new Error( `Failed to call turbopuffer. Response status ${ response.status }\nFull response: ${await response.text()}` ); // eslint-disable-next-line @typescript-eslint/no-explicit-any (error as any).response = response; throw error; } return response.json(); }); return json; } async addVectors( vectors: number[][], documents: DocumentInterface[], options?: { ids?: string[] } ): Promise<string[]> { if (options?.ids && options.ids.length !== vectors.length) { throw new Error( "Number of ids provided does not match number of vectors" ); } if (documents.length !== vectors.length) { throw new Error( "Number of documents provided does not match number of vectors" ); } if (documents.length === 0) { throw new Error("No documents provided"); } const batchedVectors: number[][][] = chunkArray(vectors, this.batchSize); const batchedDocuments: DocumentInterface[][] = chunkArray( documents, this.batchSize ); const batchedIds = options?.ids ? chunkArray(options.ids, this.batchSize) : batchedDocuments.map((docs) => docs.map((_) => uuidv4())); const batchRequests = batchedVectors.map(async (batchVectors, index) => { const batchDocs = batchedDocuments[index]; const batchIds = batchedIds[index]; if (batchIds.length !== batchVectors.length) { throw new Error( "Number of ids provided does not match number of vectors" ); } const attributes: Record<string, (string | null)[]> = { __lc_page_content: batchDocs.map((doc) => doc.pageContent), }; const usedMetadataFields = new Set( batchDocs.map((doc) => Object.keys(doc.metadata)).flat() ); for (const key of usedMetadataFields) { attributes[key] = batchDocs.map((doc) => { if (doc.metadata[key] !== undefined) { if (typeof doc.metadata[key] === "string") { return doc.metadata[key]; } else { console.warn( [ `[WARNING]: Dropping non-string metadata key "${key}" with value "${JSON.stringify( doc.metadata[key] )}".`, `turbopuffer currently supports only string metadata values.`, ].join("\n") ); return null; } } else { return null; } }); } const data = { ids: batchIds, vectors: batchVectors, attributes, }; return this.callWithRetry( `${this.apiUrl}/vectors/${this.namespace}`, JSON.stringify(data) ); }); // Execute all batch requests in parallel await Promise.all(batchRequests); return batchedIds.flat(); } async delete(params: { deleteIndex?: boolean }): Promise<void> { if (params.deleteIndex) { await this.callWithRetry( `${this.apiUrl}/vectors/${this.namespace}`, undefined, "DELETE" ); } else { throw new Error(`You must provide a "deleteIndex" flag.`); } } async addDocuments( documents: DocumentInterface[], options?: { ids?: string[] } ): Promise<string[]> { const vectors = await this.embeddings.embedDocuments( documents.map((doc) => doc.pageContent) ); return this.addVectors(vectors, documents, options); } protected async queryVectors( query: number[], k: number, includeVector?: boolean, // See https://Turbopuffer.com/docs/reference/query for more info filter?: this["FilterType"] ): Promise<TurbopufferQueryResult[]> { const data = { vector: query, top_k: k, distance_metric: this.distanceMetric, filters: filter, include_attributes: true, include_vectors: includeVector, }; return this.callWithRetry( `${this.apiUrl}/vectors/${this.namespace}/query`, JSON.stringify(data) ); } async similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[DocumentInterface, number][]> { const search = await this.queryVectors(query, k, false, filter); const result: [DocumentInterface, number][] = search.map((res) => { const { __lc_page_content, ...metadata } = res.attributes; return [ new Document({ pageContent: __lc_page_content, metadata, }), res.dist, ]; }); return result; } static async fromDocuments( docs: DocumentInterface[], embeddings: EmbeddingsInterface, dbConfig: TurbopufferParams ): Promise<TurbopufferVectorStore> { const instance = new this(embeddings, dbConfig); await instance.addDocuments(docs); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/googlevertexai.ts
import * as uuid from "uuid"; import flatten from "flat"; import { GoogleAuth, GoogleAuthOptions } from "google-auth-library"; import { VectorStore } from "@langchain/core/vectorstores"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { Document, DocumentInput } from "@langchain/core/documents"; import { AsyncCaller, AsyncCallerCallOptions, AsyncCallerParams, } from "@langchain/core/utils/async_caller"; import { GoogleVertexAIConnection } from "../utils/googlevertexai-connection.js"; import { Docstore } from "../stores/doc/base.js"; import { GoogleVertexAIConnectionParams, GoogleResponse, GoogleAbstractedClientOpsMethod, } from "../types/googlevertexai-types.js"; /** * Allows us to create IdDocument classes that contain the ID. */ export interface IdDocumentInput extends DocumentInput { id?: string; } /** * A Document that optionally includes the ID of the document. */ export class IdDocument extends Document implements IdDocumentInput { id?: string; constructor(fields: IdDocumentInput) { super(fields); this.id = fields.id; } } interface IndexEndpointConnectionParams extends GoogleVertexAIConnectionParams<GoogleAuthOptions> { indexEndpoint: string; } interface DeployedIndex { id: string; index: string; // There are other attributes, but we don't care about them right now } interface IndexEndpointResponse extends GoogleResponse { data: { deployedIndexes: DeployedIndex[]; publicEndpointDomainName: string; // There are other attributes, but we don't care about them right now }; } class IndexEndpointConnection extends GoogleVertexAIConnection< AsyncCallerCallOptions, IndexEndpointResponse, GoogleAuthOptions > { indexEndpoint: string; constructor(fields: IndexEndpointConnectionParams, caller: AsyncCaller) { super(fields, caller, new GoogleAuth(fields.authOptions)); this.indexEndpoint = fields.indexEndpoint; } async buildUrl(): Promise<string> { const projectId = await this.client.getProjectId(); const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/indexEndpoints/${this.indexEndpoint}`; return url; } buildMethod(): GoogleAbstractedClientOpsMethod { return "GET"; } async request( options: AsyncCallerCallOptions ): Promise<IndexEndpointResponse> { return this._request(undefined, options); } } /** * Used to represent parameters that are necessary to delete documents * from the matching engine. These must be a list of string IDs */ export interface MatchingEngineDeleteParams { ids: string[]; } interface RemoveDatapointParams extends GoogleVertexAIConnectionParams<GoogleAuthOptions> { index: string; } interface RemoveDatapointRequest { datapointIds: string[]; } interface RemoveDatapointResponse extends GoogleResponse { // Should be empty } class RemoveDatapointConnection extends GoogleVertexAIConnection< AsyncCallerCallOptions, RemoveDatapointResponse, GoogleAuthOptions > { index: string; constructor(fields: RemoveDatapointParams, caller: AsyncCaller) { super(fields, caller, new GoogleAuth(fields.authOptions)); this.index = fields.index; } async buildUrl(): Promise<string> { const projectId = await this.client.getProjectId(); const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/indexes/${this.index}:removeDatapoints`; return url; } buildMethod(): GoogleAbstractedClientOpsMethod { return "POST"; } async request( datapointIds: string[], options: AsyncCallerCallOptions ): Promise<RemoveDatapointResponse> { const data: RemoveDatapointRequest = { datapointIds, }; return this._request(data, options); } } interface UpsertDatapointParams extends GoogleVertexAIConnectionParams<GoogleAuthOptions> { index: string; } export interface Restriction { namespace: string; allowList?: string[]; denyList?: string[]; } interface CrowdingTag { crowdingAttribute: string; } interface IndexDatapoint { datapointId: string; featureVector: number[]; restricts?: Restriction[]; crowdingTag?: CrowdingTag; } interface UpsertDatapointRequest { datapoints: IndexDatapoint[]; } interface UpsertDatapointResponse extends GoogleResponse { // Should be empty } class UpsertDatapointConnection extends GoogleVertexAIConnection< AsyncCallerCallOptions, UpsertDatapointResponse, GoogleAuthOptions > { index: string; constructor(fields: UpsertDatapointParams, caller: AsyncCaller) { super(fields, caller, new GoogleAuth(fields.authOptions)); this.index = fields.index; } async buildUrl(): Promise<string> { const projectId = await this.client.getProjectId(); const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/indexes/${this.index}:upsertDatapoints`; return url; } buildMethod(): GoogleAbstractedClientOpsMethod { return "POST"; } async request( datapoints: IndexDatapoint[], options: AsyncCallerCallOptions ): Promise<UpsertDatapointResponse> { const data: UpsertDatapointRequest = { datapoints, }; return this._request(data, options); } } interface FindNeighborsConnectionParams extends GoogleVertexAIConnectionParams<GoogleAuthOptions> { indexEndpoint: string; deployedIndexId: string; } interface FindNeighborsRequestQuery { datapoint: { datapointId: string; featureVector: number[]; restricts?: Restriction[]; }; neighborCount: number; } interface FindNeighborsRequest { deployedIndexId: string; queries: FindNeighborsRequestQuery[]; } interface FindNeighborsResponseNeighbor { datapoint: { datapointId: string; crowdingTag: { crowdingTagAttribute: string; }; }; distance: number; } interface FindNeighborsResponseNearestNeighbor { id: string; neighbors: FindNeighborsResponseNeighbor[]; } interface FindNeighborsResponse extends GoogleResponse { data: { nearestNeighbors: FindNeighborsResponseNearestNeighbor[]; }; } class FindNeighborsConnection extends GoogleVertexAIConnection< AsyncCallerCallOptions, FindNeighborsResponse, GoogleAuthOptions > implements FindNeighborsConnectionParams { indexEndpoint: string; deployedIndexId: string; constructor(params: FindNeighborsConnectionParams, caller: AsyncCaller) { super(params, caller, new GoogleAuth(params.authOptions)); this.indexEndpoint = params.indexEndpoint; this.deployedIndexId = params.deployedIndexId; } async buildUrl(): Promise<string> { const projectId = await this.client.getProjectId(); const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/indexEndpoints/${this.indexEndpoint}:findNeighbors`; return url; } buildMethod(): GoogleAbstractedClientOpsMethod { return "POST"; } async request( request: FindNeighborsRequest, options: AsyncCallerCallOptions ): Promise<FindNeighborsResponse> { return this._request(request, options); } } /** * Information about the Matching Engine public API endpoint. * Primarily exported to allow for testing. */ export interface PublicAPIEndpointInfo { apiEndpoint?: string; deployedIndexId?: string; } /** * Parameters necessary to configure the Matching Engine. */ export interface MatchingEngineArgs extends GoogleVertexAIConnectionParams<GoogleAuthOptions>, IndexEndpointConnectionParams, UpsertDatapointParams { docstore: Docstore; callerParams?: AsyncCallerParams; callerOptions?: AsyncCallerCallOptions; apiEndpoint?: string; deployedIndexId?: string; } /** * A class that represents a connection to a Google Vertex AI Matching Engine * instance. */ export class MatchingEngine extends VectorStore implements MatchingEngineArgs { declare FilterType: Restriction[]; /** * Docstore that retains the document, stored by ID */ docstore: Docstore; /** * The host to connect to for queries and upserts. */ apiEndpoint: string; apiVersion = "v1"; endpoint = "us-central1-aiplatform.googleapis.com"; location = "us-central1"; /** * The id for the index endpoint */ indexEndpoint: string; /** * The id for the index */ index: string; /** * Explicitly set Google Auth credentials if you cannot get them from google auth application-default login * This is useful for serverless or autoscaling environments like Fargate */ authOptions: GoogleAuthOptions; /** * The id for the "deployed index", which is an identifier in the * index endpoint that references the index (but is not the index id) */ deployedIndexId: string; callerParams: AsyncCallerParams; callerOptions: AsyncCallerCallOptions; caller: AsyncCaller; indexEndpointClient: IndexEndpointConnection; removeDatapointClient: RemoveDatapointConnection; upsertDatapointClient: UpsertDatapointConnection; constructor(embeddings: EmbeddingsInterface, args: MatchingEngineArgs) { super(embeddings, args); this.embeddings = embeddings; this.docstore = args.docstore; this.apiEndpoint = args.apiEndpoint ?? this.apiEndpoint; this.deployedIndexId = args.deployedIndexId ?? this.deployedIndexId; this.apiVersion = args.apiVersion ?? this.apiVersion; this.endpoint = args.endpoint ?? this.endpoint; this.location = args.location ?? this.location; this.indexEndpoint = args.indexEndpoint ?? this.indexEndpoint; this.index = args.index ?? this.index; this.authOptions = args.authOptions ?? this.authOptions; this.callerParams = args.callerParams ?? this.callerParams; this.callerOptions = args.callerOptions ?? this.callerOptions; this.caller = new AsyncCaller(this.callerParams || {}); const indexClientParams: IndexEndpointConnectionParams = { endpoint: this.endpoint, location: this.location, apiVersion: this.apiVersion, indexEndpoint: this.indexEndpoint, authOptions: this.authOptions, }; this.indexEndpointClient = new IndexEndpointConnection( indexClientParams, this.caller ); const removeClientParams: RemoveDatapointParams = { endpoint: this.endpoint, location: this.location, apiVersion: this.apiVersion, index: this.index, authOptions: this.authOptions, }; this.removeDatapointClient = new RemoveDatapointConnection( removeClientParams, this.caller ); const upsertClientParams: UpsertDatapointParams = { endpoint: this.endpoint, location: this.location, apiVersion: this.apiVersion, index: this.index, authOptions: this.authOptions, }; this.upsertDatapointClient = new UpsertDatapointConnection( upsertClientParams, this.caller ); } _vectorstoreType(): string { return "googlevertexai"; } async addDocuments(documents: Document[]): Promise<void> { const texts: string[] = documents.map((doc) => doc.pageContent); const vectors: number[][] = await this.embeddings.embedDocuments(texts); return this.addVectors(vectors, documents); } async addVectors(vectors: number[][], documents: Document[]): Promise<void> { if (vectors.length !== documents.length) { throw new Error(`Vectors and metadata must have the same length`); } const datapoints: IndexDatapoint[] = vectors.map((vector, idx) => this.buildDatapoint(vector, documents[idx]) ); const options = {}; const response = await this.upsertDatapointClient.request( datapoints, options ); if (Object.keys(response?.data ?? {}).length === 0) { // Nothing in the response in the body means we saved it ok const idDoc = documents as IdDocument[]; const docsToStore: Record<string, Document> = {}; idDoc.forEach((doc) => { if (doc.id) { docsToStore[doc.id] = doc; } }); await this.docstore.add(docsToStore); } } // TODO: Refactor this into a utility type and use with pinecone as well? // eslint-disable-next-line @typescript-eslint/no-explicit-any cleanMetadata(documentMetadata: Record<string, any>): { [key: string]: string | number | boolean | string[] | null; } { type metadataType = { [key: string]: string | number | boolean | string[] | null; }; function getStringArrays( prefix: string, // eslint-disable-next-line @typescript-eslint/no-explicit-any m: Record<string, any> ): Record<string, string[]> { let ret: Record<string, string[]> = {}; Object.keys(m).forEach((key) => { const newPrefix = prefix.length > 0 ? `${prefix}.${key}` : key; const val = m[key]; if (!val) { // Ignore it } else if (Array.isArray(val)) { // Make sure everything in the array is a string ret[newPrefix] = val.map((v) => `${v}`); } else if (typeof val === "object") { const subArrays = getStringArrays(newPrefix, val); ret = { ...ret, ...subArrays }; } }); return ret; } const stringArrays: Record<string, string[]> = getStringArrays( "", documentMetadata ); const flatMetadata: metadataType = flatten(documentMetadata); Object.keys(flatMetadata).forEach((key) => { Object.keys(stringArrays).forEach((arrayKey) => { const matchKey = `${arrayKey}.`; if (key.startsWith(matchKey)) { delete flatMetadata[key]; } }); }); const metadata: metadataType = { ...flatMetadata, ...stringArrays, }; return metadata; } /** * Given the metadata from a document, convert it to an array of Restriction * objects that may be passed to the Matching Engine and stored. * The default implementation flattens any metadata and includes it as * an "allowList". Subclasses can choose to convert some of these to * "denyList" items or to add additional restrictions (for example, to format * dates into a different structure or to add additional restrictions * based on the date). * @param documentMetadata - The metadata from a document * @returns a Restriction[] (or an array of a subclass, from the FilterType) */ metadataToRestrictions( // eslint-disable-next-line @typescript-eslint/no-explicit-any documentMetadata: Record<string, any> ): this["FilterType"] { const metadata = this.cleanMetadata(documentMetadata); const restrictions: this["FilterType"] = []; for (const key of Object.keys(metadata)) { // Make sure the value is an array (or that we'll ignore it) let valArray; const val = metadata[key]; if (val === null) { valArray = null; } else if (Array.isArray(val) && val.length > 0) { valArray = val; } else { valArray = [`${val}`]; } // Add to the restrictions if we do have a valid value if (valArray) { // Determine if this key is for the allowList or denyList // TODO: get which ones should be on the deny list const listType = "allowList"; // Create the restriction const restriction: Restriction = { namespace: key, [listType]: valArray, }; // Add it to the restriction list restrictions.push(restriction); } } return restrictions; } /** * Create an index datapoint for the vector and document id. * If an id does not exist, create it and set the document to its value. * @param vector * @param document */ buildDatapoint(vector: number[], document: IdDocument): IndexDatapoint { if (!document.id) { // eslint-disable-next-line no-param-reassign document.id = uuid.v4(); } const ret: IndexDatapoint = { datapointId: document.id, featureVector: vector, }; const restrictions = this.metadataToRestrictions(document.metadata); if (restrictions?.length > 0) { ret.restricts = restrictions; } return ret; } async delete(params: MatchingEngineDeleteParams): Promise<void> { const options = {}; await this.removeDatapointClient.request(params.ids, options); } async similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[Document, number][]> { // Format the query into the request const deployedIndexId = await this.getDeployedIndexId(); const requestQuery: FindNeighborsRequestQuery = { neighborCount: k, datapoint: { datapointId: `0`, featureVector: query, }, }; if (filter) { requestQuery.datapoint.restricts = filter; } const request: FindNeighborsRequest = { deployedIndexId, queries: [requestQuery], }; // Build the connection. // Has to be done here, since we defer getting the endpoint until // we need it. const apiEndpoint = await this.getPublicAPIEndpoint(); const findNeighborsParams: FindNeighborsConnectionParams = { endpoint: apiEndpoint, indexEndpoint: this.indexEndpoint, apiVersion: this.apiVersion, location: this.location, deployedIndexId, authOptions: this.authOptions, }; const connection = new FindNeighborsConnection( findNeighborsParams, this.caller ); // Make the call const options = {}; const response = await connection.request(request, options); // Get the document for each datapoint id and return them const nearestNeighbors = response?.data?.nearestNeighbors ?? []; const nearestNeighbor = nearestNeighbors[0]; const neighbors = nearestNeighbor?.neighbors ?? []; const ret: [Document, number][] = await Promise.all( neighbors.map(async (neighbor) => { const id = neighbor?.datapoint?.datapointId; const distance = neighbor?.distance; let doc: IdDocument; try { doc = await this.docstore.search(id); } catch (xx) { // Documents that are in the index are returned, even if they // are not in the document store, to allow for some way to get // the id so they can be deleted. console.error(xx); console.warn( [ `Document with id "${id}" is missing from the backing docstore.`, `This can occur if you clear the docstore without deleting from the corresponding Matching Engine index.`, `To resolve this, you should call .delete() with this id as part of the "ids" parameter.`, ].join("\n") ); doc = new Document({ pageContent: `Missing document ${id}` }); } doc.id ??= id; return [doc, distance]; }) ); return ret; } /** * For this index endpoint, figure out what API Endpoint URL and deployed * index ID should be used to do upserts and queries. * Also sets the `apiEndpoint` and `deployedIndexId` property for future use. * @return The URL */ async determinePublicAPIEndpoint(): Promise<PublicAPIEndpointInfo> { const response: IndexEndpointResponse = await this.indexEndpointClient.request(this.callerOptions); // Get the endpoint const publicEndpointDomainName = response?.data?.publicEndpointDomainName; this.apiEndpoint = publicEndpointDomainName; // Determine which of the deployed indexes match the index id // and get the deployed index id. The list of deployed index ids // contain the "index name" or path, but not the index id by itself, // so we need to extract it from the name const indexPathPattern = /projects\/.+\/locations\/.+\/indexes\/(.+)$/; const deployedIndexes = response?.data?.deployedIndexes ?? []; const deployedIndex = deployedIndexes.find((index) => { const deployedIndexPath = index.index; const match = deployedIndexPath.match(indexPathPattern); if (match) { const [, potentialIndexId] = match; if (potentialIndexId === this.index) { return true; } } return false; }); if (deployedIndex) { this.deployedIndexId = deployedIndex.id; } return { apiEndpoint: this.apiEndpoint, deployedIndexId: this.deployedIndexId, }; } async getPublicAPIEndpoint(): Promise<string> { return ( this.apiEndpoint ?? (await this.determinePublicAPIEndpoint()).apiEndpoint ); } async getDeployedIndexId(): Promise<string> { return ( this.deployedIndexId ?? (await this.determinePublicAPIEndpoint()).deployedIndexId ); } static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: MatchingEngineArgs ): Promise<VectorStore> { const docs: Document[] = texts.map( (text, index): Document => ({ pageContent: text, metadata: Array.isArray(metadatas) ? metadatas[index] : metadatas, }) ); return this.fromDocuments(docs, embeddings, dbConfig); } static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: MatchingEngineArgs ): Promise<VectorStore> { const ret = new MatchingEngine(embeddings, dbConfig); await ret.addDocuments(docs); return ret; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/elasticsearch.ts
import * as uuid from "uuid"; import { Client, estypes } from "@elastic/elasticsearch"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; /** * Type representing the k-nearest neighbors (k-NN) engine used in * Elasticsearch. */ type ElasticKnnEngine = "hnsw"; /** * Type representing the similarity measure used in Elasticsearch. */ type ElasticSimilarity = "l2_norm" | "dot_product" | "cosine"; /** * Interface defining the options for vector search in Elasticsearch. */ interface VectorSearchOptions { readonly engine?: ElasticKnnEngine; readonly similarity?: ElasticSimilarity; readonly m?: number; readonly efConstruction?: number; readonly candidates?: number; } /** * Interface defining the arguments required to create an Elasticsearch * client. */ export interface ElasticClientArgs { readonly client: Client; readonly indexName?: string; readonly vectorSearchOptions?: VectorSearchOptions; } /** * Type representing a filter object in Elasticsearch. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any type ElasticFilter = object | { field: string; operator: string; value: any }[]; type ElasticMetadataTerms = { // eslint-disable-next-line @typescript-eslint/no-explicit-any must: { [operator: string]: { [field: string]: any } }[]; // eslint-disable-next-line @typescript-eslint/no-explicit-any must_not: { [operator: string]: { [field: string]: any } }[]; // eslint-disable-next-line @typescript-eslint/no-explicit-any should?: { [operator: string]: { [field: string]: any } }[]; minimum_should_match?: number; }; /** * Class for interacting with an Elasticsearch database. It extends the * VectorStore base class and provides methods for adding documents and * vectors to the Elasticsearch database, performing similarity searches, * deleting documents, and more. */ export class ElasticVectorSearch extends VectorStore { declare FilterType: ElasticFilter; private readonly client: Client; private readonly indexName: string; private readonly engine: ElasticKnnEngine; private readonly similarity: ElasticSimilarity; private readonly efConstruction: number; private readonly m: number; private readonly candidates: number; _vectorstoreType(): string { return "elasticsearch"; } constructor(embeddings: EmbeddingsInterface, args: ElasticClientArgs) { super(embeddings, args); this.engine = args.vectorSearchOptions?.engine ?? "hnsw"; this.similarity = args.vectorSearchOptions?.similarity ?? "l2_norm"; this.m = args.vectorSearchOptions?.m ?? 16; this.efConstruction = args.vectorSearchOptions?.efConstruction ?? 100; this.candidates = args.vectorSearchOptions?.candidates ?? 200; this.client = args.client.child({ headers: { "user-agent": "langchain-js-vs/0.0.1" }, }); this.indexName = args.indexName ?? "documents"; } /** * Method to add documents to the Elasticsearch database. It first * converts the documents to vectors using the embeddings, then adds the * vectors to the database. * @param documents The documents to add to the database. * @param options Optional parameter that can contain the IDs for the documents. * @returns A promise that resolves with the IDs of the added documents. */ async addDocuments(documents: Document[], options?: { ids?: string[] }) { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents, options ); } /** * Method to add vectors to the Elasticsearch database. It ensures the * index exists, then adds the vectors and their corresponding documents * to the database. * @param vectors The vectors to add to the database. * @param documents The documents corresponding to the vectors. * @param options Optional parameter that can contain the IDs for the documents. * @returns A promise that resolves with the IDs of the added documents. */ async addVectors( vectors: number[][], documents: Document[], options?: { ids?: string[] } ) { await this.ensureIndexExists( vectors[0].length, this.engine, this.similarity, this.efConstruction, this.m ); const documentIds = options?.ids ?? Array.from({ length: vectors.length }, () => uuid.v4()); const operations = vectors.flatMap((embedding, idx) => [ { index: { _id: documentIds[idx], _index: this.indexName, }, }, { embedding, metadata: documents[idx].metadata, text: documents[idx].pageContent, }, ]); const results = await this.client.bulk({ refresh: true, operations }); if (results.errors) { const reasons = results.items.map( (result) => result.index?.error?.reason ); throw new Error(`Failed to insert documents:\n${reasons.join("\n")}`); } return documentIds; } /** * Method to perform a similarity search in the Elasticsearch database * using a vector. It returns the k most similar documents along with * their similarity scores. * @param query The query vector. * @param k The number of most similar documents to return. * @param filter Optional filter to apply to the search. * @returns A promise that resolves with an array of tuples, where each tuple contains a Document and its similarity score. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: ElasticFilter ): Promise<[Document, number][]> { const result = await this.client.search({ index: this.indexName, size: k, knn: { field: "embedding", query_vector: query, filter: { bool: this.buildMetadataTerms(filter) }, k, num_candidates: this.candidates, }, }); // eslint-disable-next-line @typescript-eslint/no-explicit-any return result.hits.hits.map((hit: any) => [ new Document({ pageContent: hit._source.text, metadata: hit._source.metadata, }), hit._score, ]); } /** * Method to delete documents from the Elasticsearch database. * @param params Object containing the IDs of the documents to delete. * @returns A promise that resolves when the deletion is complete. */ async delete(params: { ids: string[] }): Promise<void> { const operations = params.ids.map((id) => ({ delete: { _id: id, _index: this.indexName, }, })); if (operations.length > 0) await this.client.bulk({ refresh: true, operations }); } /** * Static method to create an ElasticVectorSearch instance from texts. It * creates Document instances from the texts and their corresponding * metadata, then calls the fromDocuments method to create the * ElasticVectorSearch instance. * @param texts The texts to create the ElasticVectorSearch instance from. * @param metadatas The metadata corresponding to the texts. * @param embeddings The embeddings to use for the documents. * @param args The arguments to create the Elasticsearch client. * @returns A promise that resolves with the created ElasticVectorSearch instance. */ static fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, args: ElasticClientArgs ): Promise<ElasticVectorSearch> { const documents = texts.map((text, idx) => { const metadata = Array.isArray(metadatas) ? metadatas[idx] : metadatas; return new Document({ pageContent: text, metadata }); }); return ElasticVectorSearch.fromDocuments(documents, embeddings, args); } /** * Static method to create an ElasticVectorSearch instance from Document * instances. It adds the documents to the Elasticsearch database, then * returns the ElasticVectorSearch instance. * @param docs The Document instances to create the ElasticVectorSearch instance from. * @param embeddings The embeddings to use for the documents. * @param dbConfig The configuration for the Elasticsearch database. * @returns A promise that resolves with the created ElasticVectorSearch instance. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: ElasticClientArgs ): Promise<ElasticVectorSearch> { const store = new ElasticVectorSearch(embeddings, dbConfig); await store.addDocuments(docs).then(() => store); return store; } /** * Static method to create an ElasticVectorSearch instance from an * existing index in the Elasticsearch database. It checks if the index * exists, then returns the ElasticVectorSearch instance if it does. * @param embeddings The embeddings to use for the documents. * @param dbConfig The configuration for the Elasticsearch database. * @returns A promise that resolves with the created ElasticVectorSearch instance if the index exists, otherwise it throws an error. */ static async fromExistingIndex( embeddings: EmbeddingsInterface, dbConfig: ElasticClientArgs ): Promise<ElasticVectorSearch> { const store = new ElasticVectorSearch(embeddings, dbConfig); const exists = await store.doesIndexExist(); if (exists) { return store; } throw new Error(`The index ${store.indexName} does not exist.`); } private async ensureIndexExists( dimension: number, engine = "hnsw", similarity = "l2_norm", efConstruction = 100, m = 16 ): Promise<void> { const request: estypes.IndicesCreateRequest = { index: this.indexName, mappings: { dynamic_templates: [ { // map all metadata properties to be keyword except loc metadata_except_loc: { match_mapping_type: "*", match: "metadata.*", unmatch: "metadata.loc", mapping: { type: "keyword" }, }, }, ], properties: { text: { type: "text" }, metadata: { type: "object", properties: { loc: { type: "object" }, // explicitly define loc as an object }, }, embedding: { type: "dense_vector", dims: dimension, index: true, similarity, index_options: { type: engine, m, ef_construction: efConstruction, }, }, }, }, }; const indexExists = await this.doesIndexExist(); if (indexExists) return; await this.client.indices.create(request); } private buildMetadataTerms(filter?: ElasticFilter): ElasticMetadataTerms { if (filter == null) return { must: [], must_not: [] }; const filters = Array.isArray(filter) ? filter : Object.entries(filter).map(([key, value]) => ({ operator: "term", field: key, value, })); const must = []; const must_not = []; const should = []; for (const condition of filters) { const metadataField = `metadata.${condition.field}`; if (condition.operator === "exists") { must.push({ [condition.operator]: { field: metadataField, }, }); } else if (condition.operator === "not_exists") { must_not.push({ exists: { field: metadataField, }, }); } else if (condition.operator === "exclude") { const toExclude = { [metadataField]: condition.value }; must_not.push({ ...(Array.isArray(condition.value) ? { terms: toExclude } : { term: toExclude }), }); } else if (condition.operator === "or") { should.push({ term: { [metadataField]: condition.value, }, }); } else { must.push({ [condition.operator]: { [metadataField]: condition.value, }, }); } } const result: ElasticMetadataTerms = { must, must_not }; if (should.length > 0) { result.should = should; result.minimum_should_match = 1; } return result; } /** * Method to check if an index exists in the Elasticsearch database. * @returns A promise that resolves with a boolean indicating whether the index exists. */ async doesIndexExist(): Promise<boolean> { return await this.client.indices.exists({ index: this.indexName }); } /** * Method to delete an index from the Elasticsearch database if it exists. * @returns A promise that resolves when the deletion is complete. */ async deleteIfExists(): Promise<void> { const indexExists = await this.doesIndexExist(); if (!indexExists) return; await this.client.indices.delete({ index: this.indexName }); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/cloudflare_vectorize.ts
import * as uuid from "uuid"; import { VectorizeIndex, VectorizeVectorMetadata, } from "@cloudflare/workers-types"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; import { AsyncCaller, type AsyncCallerParams, } from "@langchain/core/utils/async_caller"; import { chunkArray } from "@langchain/core/utils/chunk_array"; /** * @deprecated Install and import from "@langchain/cloudflare" instead. */ export interface VectorizeLibArgs extends AsyncCallerParams { index: VectorizeIndex; textKey?: string; } /** * @deprecated Install and import from "@langchain/cloudflare" instead. * * Type that defines the parameters for the delete operation in the * CloudflareVectorizeStore class. It includes ids, deleteAll flag, and namespace. */ export type VectorizeDeleteParams = { ids: string[]; }; /** * @deprecated Install and import from "@langchain/cloudflare" instead. * * Class that extends the VectorStore class and provides methods to * interact with the Cloudflare Vectorize vector database. */ export class CloudflareVectorizeStore extends VectorStore { textKey: string; namespace?: string; index: VectorizeIndex; caller: AsyncCaller; _vectorstoreType(): string { return "cloudflare_vectorize"; } constructor(embeddings: EmbeddingsInterface, args: VectorizeLibArgs) { super(embeddings, args); this.embeddings = embeddings; const { index, textKey, ...asyncCallerArgs } = args; if (!index) { throw new Error( "Must supply a Vectorize index binding, eg { index: env.VECTORIZE }" ); } this.index = index; this.textKey = textKey ?? "text"; this.caller = new AsyncCaller({ maxConcurrency: 6, maxRetries: 0, ...asyncCallerArgs, }); } /** * Method that adds documents to the Vectorize database. * @param documents Array of documents to add. * @param options Optional ids for the documents. * @returns Promise that resolves with the ids of the added documents. */ async addDocuments( documents: Document[], options?: { ids?: string[] } | string[] ) { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents, options ); } /** * Method that adds vectors to the Vectorize database. * @param vectors Array of vectors to add. * @param documents Array of documents associated with the vectors. * @param options Optional ids for the vectors. * @returns Promise that resolves with the ids of the added vectors. */ async addVectors( vectors: number[][], documents: Document[], options?: { ids?: string[] } | string[] ) { const ids = Array.isArray(options) ? options : options?.ids; const documentIds = ids == null ? documents.map(() => uuid.v4()) : ids; const vectorizeVectors = vectors.map((values, idx) => { const metadata: Record<string, VectorizeVectorMetadata> = { ...documents[idx].metadata, [this.textKey]: documents[idx].pageContent, }; return { id: documentIds[idx], metadata, values, }; }); // Stick to a limit of 500 vectors per upsert request const chunkSize = 500; const chunkedVectors = chunkArray(vectorizeVectors, chunkSize); const batchRequests = chunkedVectors.map((chunk) => this.caller.call(async () => this.index.upsert(chunk)) ); await Promise.all(batchRequests); return documentIds; } /** * Method that deletes vectors from the Vectorize database. * @param params Parameters for the delete operation. * @returns Promise that resolves when the delete operation is complete. */ async delete(params: VectorizeDeleteParams): Promise<void> { const batchSize = 1000; const batchedIds = chunkArray(params.ids, batchSize); const batchRequests = batchedIds.map((batchIds) => this.caller.call(async () => this.index.deleteByIds(batchIds)) ); await Promise.all(batchRequests); } /** * Method that performs a similarity search in the Vectorize database and * returns the results along with their scores. * @param query Query vector for the similarity search. * @param k Number of top results to return. * @returns Promise that resolves with an array of documents and their scores. */ async similaritySearchVectorWithScore( query: number[], k: number ): Promise<[Document, number][]> { const results = await this.index.query(query, { returnVectors: true, topK: k, }); const result: [Document, number][] = []; if (results.matches) { for (const res of results.matches) { const { [this.textKey]: pageContent, ...metadata } = res.vector?.metadata ?? {}; result.push([ new Document({ metadata, pageContent: pageContent as string }), res.score, ]); } } return result; } /** * Static method that creates a new instance of the CloudflareVectorizeStore class * from texts. * @param texts Array of texts to add to the Vectorize database. * @param metadatas Metadata associated with the texts. * @param embeddings Embeddings to use for the texts. * @param dbConfig Configuration for the Vectorize database. * @param options Optional ids for the vectors. * @returns Promise that resolves with a new instance of the CloudflareVectorizeStore class. */ static async fromTexts( texts: string[], metadatas: | Record<string, VectorizeVectorMetadata>[] | Record<string, VectorizeVectorMetadata>, embeddings: EmbeddingsInterface, dbConfig: VectorizeLibArgs ): Promise<CloudflareVectorizeStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return CloudflareVectorizeStore.fromDocuments(docs, embeddings, dbConfig); } /** * Static method that creates a new instance of the CloudflareVectorizeStore class * from documents. * @param docs Array of documents to add to the Vectorize database. * @param embeddings Embeddings to use for the documents. * @param dbConfig Configuration for the Vectorize database. * @param options Optional ids for the vectors. * @returns Promise that resolves with a new instance of the CloudflareVectorizeStore class. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: VectorizeLibArgs ): Promise<CloudflareVectorizeStore> { const instance = new this(embeddings, dbConfig); await instance.addDocuments(docs); return instance; } /** * Static method that creates a new instance of the CloudflareVectorizeStore class * from an existing index. * @param embeddings Embeddings to use for the documents. * @param dbConfig Configuration for the Vectorize database. * @returns Promise that resolves with a new instance of the CloudflareVectorizeStore class. */ static async fromExistingIndex( embeddings: EmbeddingsInterface, dbConfig: VectorizeLibArgs ): Promise<CloudflareVectorizeStore> { const instance = new this(embeddings, dbConfig); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/xata.ts
import { BaseClient } from "@xata.io/client"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; /** * Interface for the arguments required to create a XataClient. Includes * the client instance and the table name. */ export interface XataClientArgs<XataClient> { readonly client: XataClient; readonly table: string; } /** * Type for the filter object used in Xata database queries. */ type XataFilter = object; /** * Class for interacting with a Xata database as a VectorStore. Provides * methods to add documents and vectors to the database, delete entries, * and perform similarity searches. */ export class XataVectorSearch< XataClient extends BaseClient > extends VectorStore { declare FilterType: XataFilter; private readonly client: XataClient; private readonly table: string; _vectorstoreType(): string { return "xata"; } constructor( embeddings: EmbeddingsInterface, args: XataClientArgs<XataClient> ) { super(embeddings, args); this.client = args.client; this.table = args.table; } /** * Method to add documents to the Xata database. Maps the page content of * each document, embeds the documents using the embeddings, and adds the * vectors to the database. * @param documents Array of documents to be added. * @param options Optional object containing an array of ids. * @returns Promise resolving to an array of ids of the added documents. */ async addDocuments(documents: Document[], options?: { ids?: string[] }) { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents, options ); } /** * Method to add vectors to the Xata database. Maps each vector to a row * with the document's content, embedding, and metadata. Creates or * replaces these rows in the Xata database. * @param vectors Array of vectors to be added. * @param documents Array of documents corresponding to the vectors. * @param options Optional object containing an array of ids. * @returns Promise resolving to an array of ids of the added vectors. */ async addVectors( vectors: number[][], documents: Document[], options?: { ids?: string[] } ) { const rows = vectors .map((embedding, idx) => ({ content: documents[idx].pageContent, embedding, ...documents[idx].metadata, })) .map((row, idx) => { if (options?.ids) { return { id: options.ids[idx], ...row }; } return row; }); const res = await this.client.db[this.table].createOrReplace(rows); // Since we have an untyped BaseClient, it doesn't know the // actual return type of the overload. const results = res as unknown as { id: string }[]; const returnedIds = results.map((row) => row.id); return returnedIds; } /** * Method to delete entries from the Xata database. Deletes the entries * with the provided ids. * @param params Object containing an array of ids of the entries to be deleted. * @returns Promise resolving to void. */ async delete(params: { ids: string[] }): Promise<void> { const { ids } = params; await this.client.db[this.table].delete(ids); } /** * Method to perform a similarity search in the Xata database. Returns the * k most similar documents along with their scores. * @param query Query vector for the similarity search. * @param k Number of most similar documents to return. * @param filter Optional filter for the search. * @returns Promise resolving to an array of tuples, each containing a Document and its score. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: XataFilter | undefined ): Promise<[Document, number][]> { const { records } = await this.client.db[this.table].vectorSearch( "embedding", query, { size: k, filter, } ); return ( // eslint-disable-next-line @typescript-eslint/no-explicit-any records?.map((record: any) => [ new Document({ pageContent: record.content, metadata: Object.fromEntries( Object.entries(record).filter( ([key]) => key !== "content" && key !== "embedding" && key !== "xata" && key !== "id" ) ), }), record.xata ? record.xata.score : record.xata_score, ]) ?? [] ); } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/couchbase.ts
/* eslint-disable no-param-reassign */ /* eslint-disable @typescript-eslint/no-explicit-any */ /* eslint-disable import/no-extraneous-dependencies */ import { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Bucket, Cluster, Collection, Scope, SearchRequest, VectorQuery, VectorSearch, } from "couchbase"; import { Document } from "@langchain/core/documents"; import { v4 as uuid } from "uuid"; /** * This interface define the optional fields for adding vector * - `ids` - vector of ids for each document. If undefined, then uuid will be used * - `metadata` - vector of metadata object for each document */ export interface AddVectorOptions { ids?: string[]; metadata?: Record<string, any>[]; } /** * This interface defines the fields required to initialize a vector store * These are the fields part of config: * @property {Cluster} cluster - The Couchbase cluster that the store will interact with. * @property {string} bucketName - The name of the bucket in the Couchbase cluster. * @property {string} scopeName - The name of the scope within the bucket. * @property {string} collectionName - The name of the collection within the scope. * @property {string} indexName - The name of the index to be used for vector search. * @property {string} textKey - The key to be used for text in the documents. Defaults to "text". * @property {string} embeddingKey - The key to be used for embeddings in the documents. Defaults to "embedding". * @property {boolean} scopedIndex - Whether to use a scoped index for vector search. Defaults to true. * @property {AddVectorOptions} addVectorOptions - Options for adding vectors with specific id/metadata */ export interface CouchbaseVectorStoreArgs { cluster: Cluster; bucketName: string; scopeName: string; collectionName: string; indexName: string; textKey?: string; embeddingKey?: string; scopedIndex?: boolean; addVectorOptions?: AddVectorOptions; } /** * This type defines the search filters used in couchbase vector search * - `fields`: Optional list of fields to include in the * metadata of results. Note that these need to be stored in the index. * If nothing is specified, defaults to all the fields stored in the index. * - `searchOptions`: Optional search options that are passed to Couchbase search. Defaults to empty object. */ type CouchbaseVectorStoreFilter = { fields?: any; searchOptions?: any; }; /** * Class for interacting with the Couchbase database. It extends the * VectorStore class and provides methods for adding vectors and * documents, and searching for similar vectors. * Initiate the class using initialize() method. */ export class CouchbaseVectorStore extends VectorStore { declare FilterType: CouchbaseVectorStoreFilter; private metadataKey = "metadata"; private readonly defaultTextKey = "text"; private readonly defaultScopedIndex = true; private readonly defaultEmbeddingKey = "embedding"; private cluster: Cluster; private _bucket: Bucket; private _scope: Scope; private _collection: Collection; private bucketName: string; private scopeName: string; private collectionName: string; private indexName: string; private textKey = this.defaultTextKey; private embeddingKey = this.defaultEmbeddingKey; private scopedIndex: boolean; /** * The private constructor used to provide embedding to parent class. * Initialize the class using static initialize() method * @param embedding - object to generate embedding * @param config - the fields required to initialize a vector store */ private constructor( embedding: EmbeddingsInterface, config: CouchbaseVectorStoreArgs ) { super(embedding, config); } /** * initialize class for interacting with the Couchbase database. * It extends the VectorStore class and provides methods * for adding vectors and documents, and searching for similar vectors. * This also verifies the params * * @param embeddings - object to generate embedding * @param config - the fields required to initialize a vector store */ static async initialize( embeddings: EmbeddingsInterface, config: CouchbaseVectorStoreArgs ) { const store = new CouchbaseVectorStore(embeddings, config); const { cluster, bucketName, scopeName, collectionName, indexName, textKey, embeddingKey, scopedIndex, } = config; store.cluster = cluster; store.bucketName = bucketName; store.scopeName = scopeName; store.collectionName = collectionName; store.indexName = indexName; if (textKey) { store.textKey = textKey; } else { store.textKey = store.defaultTextKey; } if (embeddingKey) { store.embeddingKey = embeddingKey; } else { store.embeddingKey = store.defaultEmbeddingKey; } if (scopedIndex !== undefined) { store.scopedIndex = scopedIndex; } else { store.scopedIndex = store.defaultScopedIndex; } try { store._bucket = store.cluster.bucket(store.bucketName); store._scope = store._bucket.scope(store.scopeName); store._collection = store._scope.collection(store.collectionName); } catch (err) { throw new Error( "Error connecting to couchbase, Please check connection and credentials" ); } try { if ( !(await store.checkBucketExists()) || !(await store.checkIndexExists()) || !(await store.checkScopeAndCollectionExists()) ) { throw new Error("Error while initializing vector store"); } } catch (err) { throw new Error(`Error while initializing vector store: ${err}`); } return store; } /** * An asynchrononous method to verify the search indexes. * It retrieves all indexes and checks if specified index is present. * * @throws - If the specified index does not exist in the database. * * @returns - returns promise true if no error is found */ private async checkIndexExists(): Promise<boolean> { if (this.scopedIndex) { const allIndexes = await this._scope.searchIndexes().getAllIndexes(); const indexNames = allIndexes.map((index) => index.name); if (!indexNames.includes(this.indexName)) { throw new Error( `Index ${this.indexName} does not exist. Please create the index before searching.` ); } } else { const allIndexes = await this.cluster.searchIndexes().getAllIndexes(); const indexNames = allIndexes.map((index) => index.name); if (!indexNames.includes(this.indexName)) { throw new Error( `Index ${this.indexName} does not exist. Please create the index before searching.` ); } } return true; } /** * An asynchronous method to verify the existence of a bucket. * It retrieves the bucket using the bucket manager and checks if the specified bucket is present. * * @throws - If the specified bucket does not exist in the database. * * @returns - Returns a promise that resolves to true if no error is found, indicating the bucket exists. */ private async checkBucketExists(): Promise<boolean> { const bucketManager = this.cluster.buckets(); try { await bucketManager.getBucket(this.bucketName); return true; } catch (error) { throw new Error( `Bucket ${this.bucketName} does not exist. Please create the bucket before searching.` ); } } /** * An asynchronous method to verify the existence of a scope and a collection within that scope. * It retrieves all scopes and collections in the bucket, and checks if the specified scope and collection are present. * * @throws - If the specified scope does not exist in the bucket, or if the specified collection does not exist in the scope. * * @returns - Returns a promise that resolves to true if no error is found, indicating the scope and collection exist. */ private async checkScopeAndCollectionExists(): Promise<boolean> { const scopeCollectionMap: Record<string, any> = {}; // Get a list of all scopes in the bucket const scopes = await this._bucket.collections().getAllScopes(); for (const scope of scopes) { scopeCollectionMap[scope.name] = []; // Get a list of all the collections in the scope for (const collection of scope.collections) { scopeCollectionMap[scope.name].push(collection.name); } } // Check if the scope exists if (!Object.keys(scopeCollectionMap).includes(this.scopeName)) { throw new Error( `Scope ${this.scopeName} not found in Couchbase bucket ${this.bucketName}` ); } // Check if the collection exists in the scope if (!scopeCollectionMap[this.scopeName].includes(this.collectionName)) { throw new Error( `Collection ${this.collectionName} not found in scope ${this.scopeName} in Couchbase bucket ${this.bucketName}` ); } return true; } _vectorstoreType(): string { return "couchbase"; } /** * Formats couchbase metadata by removing `metadata.` from initials * @param fields - all the fields of row * @returns - formatted metadata fields */ private formatMetadata = (fields: any) => { delete fields[this.textKey]; const metadataFields: { [key: string]: any } = {}; // eslint-disable-next-line guard-for-in for (const key in fields) { const newKey = key.replace(`${this.metadataKey}.`, ""); metadataFields[newKey] = fields[key]; } return metadataFields; }; /** * Performs a similarity search on the vectors in the Couchbase database and returns the documents and their corresponding scores. * * @param queryEmbeddings - Embedding vector to look up documents similar to. * @param k - Number of documents to return. Defaults to 4. * @param filter - Optional search filter that are passed to Couchbase search. Defaults to empty object. * - `fields`: Optional list of fields to include in the * metadata of results. Note that these need to be stored in the index. * If nothing is specified, defaults to all the fields stored in the index. * - `searchOptions`: Optional search options that are passed to Couchbase search. Defaults to empty object. * * @returns - Promise of list of [document, score] that are the most similar to the query vector. * * @throws If the search operation fails. */ async similaritySearchVectorWithScore( queryEmbeddings: number[], k = 4, filter: CouchbaseVectorStoreFilter = {} ): Promise<[Document, number][]> { let { fields } = filter; const { searchOptions } = filter; if (!fields) { fields = ["*"]; } if ( !(fields.length === 1 && fields[0] === "*") && !fields.includes(this.textKey) ) { fields.push(this.textKey); } const searchRequest = new SearchRequest( VectorSearch.fromVectorQuery( new VectorQuery(this.embeddingKey, queryEmbeddings).numCandidates(k) ) ); let searchIterator; const docsWithScore: [Document, number][] = []; try { if (this.scopedIndex) { searchIterator = this._scope.search(this.indexName, searchRequest, { limit: k, fields, raw: searchOptions, }); } else { searchIterator = this.cluster.search(this.indexName, searchRequest, { limit: k, fields, raw: searchOptions, }); } const searchRows = (await searchIterator).rows; for (const row of searchRows) { const text = row.fields[this.textKey]; const metadataFields = this.formatMetadata(row.fields); const searchScore = row.score; const doc = new Document({ pageContent: text, metadata: metadataFields, }); docsWithScore.push([doc, searchScore]); } } catch (err) { console.log("error received"); throw new Error(`Search failed with error: ${err}`); } return docsWithScore; } /** * Return documents that are most similar to the vector embedding. * * @param queryEmbeddings - Embedding to look up documents similar to. * @param k - The number of similar documents to return. Defaults to 4. * @param filter - Optional search filter that are passed to Couchbase search. Defaults to empty object. * - `fields`: Optional list of fields to include in the * metadata of results. Note that these need to be stored in the index. * If nothing is specified, defaults to all the fields stored in the index. * - `searchOptions`: Optional search options that are passed to Couchbase search. Defaults to empty object. * * @returns - A promise that resolves to an array of documents that match the similarity search. */ async similaritySearchByVector( queryEmbeddings: number[], k = 4, filter: CouchbaseVectorStoreFilter = {} ): Promise<Document[]> { const docsWithScore = await this.similaritySearchVectorWithScore( queryEmbeddings, k, filter ); const docs = []; for (const doc of docsWithScore) { docs.push(doc[0]); } return docs; } /** * Return documents that are most similar to the query. * * @param query - Query to look up for similar documents * @param k - The number of similar documents to return. Defaults to 4. * @param filter - Optional search filter that are passed to Couchbase search. Defaults to empty object. * - `fields`: Optional list of fields to include in the * metadata of results. Note that these need to be stored in the index. * If nothing is specified, defaults to all the fields stored in the index. * - `searchOptions`: Optional search options that are passed to Couchbase search. Defaults to empty object. * * @returns - Promise of list of documents that are most similar to the query. */ async similaritySearch( query: string, k = 4, filter: CouchbaseVectorStoreFilter = {} ): Promise<Document[]> { const queryEmbeddings = await this.embeddings.embedQuery(query); const docsWithScore = await this.similaritySearchVectorWithScore( queryEmbeddings, k, filter ); const docs = []; for (const doc of docsWithScore) { docs.push(doc[0]); } return docs; } /** * Return documents that are most similar to the query with their scores. * * @param query - Query to look up for similar documents * @param k - The number of similar documents to return. Defaults to 4. * @param filter - Optional search filter that are passed to Couchbase search. Defaults to empty object. * - `fields`: Optional list of fields to include in the * metadata of results. Note that these need to be stored in the index. * If nothing is specified, defaults to all the fields stored in the index. * - `searchOptions`: Optional search options that are passed to Couchbase search. Defaults to empty object. * * @returns - Promise of list of documents that are most similar to the query. */ async similaritySearchWithScore( query: string, k = 4, filter: CouchbaseVectorStoreFilter = {} ): Promise<[Document, number][]> { const queryEmbeddings = await this.embeddings.embedQuery(query); const docsWithScore = await this.similaritySearchVectorWithScore( queryEmbeddings, k, filter ); return docsWithScore; } /** * upsert documents asynchronously into a couchbase collection * @param documentsToInsert Documents to be inserted into couchbase collection with embeddings, original text and metadata * @returns DocIds of the inserted documents */ private async upsertDocuments( documentsToInsert: { [x: string]: any; }[] ) { // Create promises for each document to be upserted const upsertDocumentsPromises = documentsToInsert.map((document) => { const currentDocumentKey = Object.keys(document)[0]; return this._collection .upsert(currentDocumentKey, document[currentDocumentKey]) .then(() => currentDocumentKey) .catch((e) => { console.error("error received while upserting document", e); throw new Error(`Upsert failed with error: ${e}`); }); }); try { // Upsert all documents asynchronously const docIds = await Promise.all(upsertDocumentsPromises); const successfulDocIds: string[] = []; for (const id of docIds) { if (id) { successfulDocIds.push(id); } } return successfulDocIds; } catch (e) { console.error( "An error occurred with Promise.all at upserting all documents", e ); throw e; } } /** * Add vectors and corresponding documents to a couchbase collection * If the document IDs are passed, the existing documents (if any) will be * overwritten with the new ones. * @param vectors - The vectors to be added to the collection. * @param documents - The corresponding documents to be added to the collection. * @param options - Optional parameters for adding vectors. * This may include the IDs and metadata of the documents to be added. Defaults to an empty object. * * @returns - A promise that resolves to an array of document IDs that were added to the collection. */ public async addVectors( vectors: number[][], documents: Document[], options: AddVectorOptions = {} ): Promise<string[]> { // Get document ids. if ids are not available then use UUIDs for each document let ids: string[] | undefined = options ? options.ids : undefined; if (ids === undefined) { ids = Array.from({ length: documents.length }, () => uuid()); } // Get metadata for each document. if metadata is not available, use empty object for each document let metadata: any = options ? options.metadata : undefined; if (metadata === undefined) { metadata = Array.from({ length: documents.length }, () => ({})); } const documentsToInsert = ids.map((id: string, index: number) => ({ [id]: { [this.textKey]: documents[index].pageContent, [this.embeddingKey]: vectors[index], [this.metadataKey]: metadata[index], }, })); let docIds: string[] = []; try { docIds = await this.upsertDocuments(documentsToInsert); } catch (err) { console.error("Error while adding vectors", err); throw err; } return docIds; } /** * Run texts through the embeddings and persist in vectorstore. * If the document IDs are passed, the existing documents (if any) will be * overwritten with the new ones. * @param documents - The corresponding documents to be added to the collection. * @param options - Optional parameters for adding documents. * This may include the IDs and metadata of the documents to be added. Defaults to an empty object. * * @returns - A promise that resolves to an array of document IDs that were added to the collection. */ public async addDocuments( documents: Document[], options: AddVectorOptions = {} ) { const texts = documents.map(({ pageContent }) => pageContent); const metadatas = documents.map((doc) => doc.metadata); if (!options.metadata) { options.metadata = metadatas; } return this.addVectors( await this.embeddings.embedDocuments(texts), documents, options ); } /** * Create a new CouchbaseVectorStore from a set of documents. * This function will initialize a new store, add the documents to it, and then return the store. * @param documents - The documents to be added to the new store. * @param embeddings - The embeddings to be used for the documents. * @param config - The configuration for the new CouchbaseVectorStore. This includes the options for adding vectors. * * @returns - A promise that resolves to the new CouchbaseVectorStore that contains the added documents. */ static async fromDocuments( documents: Document[], embeddings: EmbeddingsInterface, config: CouchbaseVectorStoreArgs ): Promise<CouchbaseVectorStore> { const store = await this.initialize(embeddings, config); await store.addDocuments(documents, config.addVectorOptions); return store; } /** * Create a new CouchbaseVectorStore from a set of texts. * This function will convert each text and its corresponding metadata into a Document, * initialize a new store, add the documents to it, and then return the store. * @param texts - The texts to be converted into Documents and added to the new store. * @param metadatas - The metadata for each text. If an array is passed, each text will have its corresponding metadata. * If not, all texts will have the same metadata. * @param embeddings - The embeddings to be used for the documents. * @param config - The configuration for the new CouchbaseVectorStore. This includes the options for adding vectors. * * @returns - A promise that resolves to the new CouchbaseVectorStore that contains the added documents. */ static async fromTexts( texts: string[], metadatas: any, embeddings: EmbeddingsInterface, config: CouchbaseVectorStoreArgs ): Promise<CouchbaseVectorStore> { const docs = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return await this.fromDocuments(docs, embeddings, config); } /** * Delete documents asynchronously from the collection. * This function will attempt to remove each document in the provided list of IDs from the collection. * If an error occurs during the deletion of a document, an error will be thrown with the ID of the document and the error message. * @param ids - An array of document IDs to be deleted from the collection. * * @returns - A promise that resolves when all documents have been attempted to be deleted. If a document could not be deleted, an error is thrown. */ public async delete(ids: string[]): Promise<void> { const deleteDocumentsPromises = ids.map((id) => this._collection.remove(id).catch((err) => { throw new Error( `Error while deleting document - Document Id: ${id}, Error: ${err}` ); }) ); try { await Promise.all(deleteDocumentsPromises); } catch (err) { throw new Error(`Error while deleting documents, Error: ${err}`); } } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/milvus.ts
import * as uuid from "uuid"; import { MilvusClient, DataType, DataTypeMap, ErrorCode, FieldType, ClientConfig, InsertReq, keyValueObj, } from "@zilliz/milvus2-sdk-node"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; /** * Interface for the arguments required by the Milvus class constructor. */ export interface MilvusLibArgs { collectionName?: string; partitionName?: string; primaryField?: string; vectorField?: string; textField?: string; url?: string; // db address ssl?: boolean; username?: string; password?: string; textFieldMaxLength?: number; clientConfig?: ClientConfig; autoId?: boolean; indexCreateOptions?: IndexCreateOptions; partitionKey?: string; // doc: https://milvus.io/docs/use-partition-key.md partitionKeyMaxLength?: number; } export interface IndexCreateOptions { index_type: IndexType; metric_type: MetricType; params?: keyValueObj; // index search params search_params?: keyValueObj; } export type MetricType = "L2" | "IP" | "COSINE"; /** * Type representing the type of index used in the Milvus database. */ type IndexType = | "FLAT" | "IVF_FLAT" | "IVF_SQ8" | "IVF_PQ" | "HNSW" | "RHNSW_FLAT" | "RHNSW_SQ" | "RHNSW_PQ" | "IVF_HNSW" | "ANNOY"; /** * Interface for vector search parameters. */ interface IndexSearchParam { params: { nprobe?: number; ef?: number; search_k?: number }; } interface InsertRow { [x: string]: string | number[]; } const MILVUS_PRIMARY_FIELD_NAME = "langchain_primaryid"; const MILVUS_VECTOR_FIELD_NAME = "langchain_vector"; const MILVUS_TEXT_FIELD_NAME = "langchain_text"; const MILVUS_COLLECTION_NAME_PREFIX = "langchain_col"; const MILVUS_PARTITION_KEY_MAX_LENGTH = 512; /** * Default parameters for index searching. */ const DEFAULT_INDEX_SEARCH_PARAMS: Record<IndexType, IndexSearchParam> = { FLAT: { params: {} }, IVF_FLAT: { params: { nprobe: 10 } }, IVF_SQ8: { params: { nprobe: 10 } }, IVF_PQ: { params: { nprobe: 10 } }, HNSW: { params: { ef: 10 } }, RHNSW_FLAT: { params: { ef: 10 } }, RHNSW_SQ: { params: { ef: 10 } }, RHNSW_PQ: { params: { ef: 10 } }, IVF_HNSW: { params: { nprobe: 10, ef: 10 } }, ANNOY: { params: { search_k: 10 } }, }; /** * Class for interacting with a Milvus database. Extends the VectorStore * class. */ export class Milvus extends VectorStore { get lc_secrets(): { [key: string]: string } { return { ssl: "MILVUS_SSL", username: "MILVUS_USERNAME", password: "MILVUS_PASSWORD", }; } _vectorstoreType(): string { return "milvus"; } declare FilterType: string; collectionName: string; partitionName?: string; numDimensions?: number; autoId?: boolean; primaryField: string; vectorField: string; textField: string; textFieldMaxLength: number; partitionKey?: string; partitionKeyMaxLength?: number; fields: string[]; client: MilvusClient; indexCreateParams: IndexCreateOptions; indexSearchParams: keyValueObj; constructor(public embeddings: EmbeddingsInterface, args: MilvusLibArgs) { super(embeddings, args); this.collectionName = args.collectionName ?? genCollectionName(); this.partitionName = args.partitionName; this.textField = args.textField ?? MILVUS_TEXT_FIELD_NAME; this.autoId = args.autoId ?? true; this.primaryField = args.primaryField ?? MILVUS_PRIMARY_FIELD_NAME; this.vectorField = args.vectorField ?? MILVUS_VECTOR_FIELD_NAME; this.textFieldMaxLength = args.textFieldMaxLength ?? 0; this.partitionKey = args.partitionKey; this.partitionKeyMaxLength = args.partitionKeyMaxLength ?? MILVUS_PARTITION_KEY_MAX_LENGTH; this.fields = []; const url = args.url ?? getEnvironmentVariable("MILVUS_URL"); const { address = "", username = "", password = "", ssl, } = args.clientConfig || {}; // Index creation parameters const { indexCreateOptions } = args; if (indexCreateOptions) { const { metric_type, index_type, params, search_params = {}, } = indexCreateOptions; this.indexCreateParams = { metric_type, index_type, params, }; this.indexSearchParams = { ...DEFAULT_INDEX_SEARCH_PARAMS[index_type].params, ...search_params, }; } else { // Default index creation parameters. this.indexCreateParams = { index_type: "HNSW", metric_type: "L2", params: { M: 8, efConstruction: 64 }, }; // Default index search parameters. this.indexSearchParams = { ...DEFAULT_INDEX_SEARCH_PARAMS.HNSW.params, }; } // combine args clientConfig and env variables const clientConfig: ClientConfig = { ...(args.clientConfig || {}), address: url || address, username: args.username || username, password: args.password || password, ssl: args.ssl || ssl, }; if (!clientConfig.address) { throw new Error("Milvus URL address is not provided."); } this.client = new MilvusClient(clientConfig); } /** * Adds documents to the Milvus database. * @param documents Array of Document instances to be added to the database. * @param options Optional parameter that can include specific IDs for the documents. * @returns Promise resolving to void. */ async addDocuments( documents: Document[], options?: { ids?: string[] } ): Promise<void> { const texts = documents.map(({ pageContent }) => pageContent); await this.addVectors( await this.embeddings.embedDocuments(texts), documents, options ); } /** * Adds vectors to the Milvus database. * @param vectors Array of vectors to be added to the database. * @param documents Array of Document instances associated with the vectors. * @param options Optional parameter that can include specific IDs for the documents. * @returns Promise resolving to void. */ async addVectors( vectors: number[][], documents: Document[], options?: { ids?: string[] } ): Promise<void> { if (vectors.length === 0) { return; } await this.ensureCollection(vectors, documents); if (this.partitionName !== undefined) { await this.ensurePartition(); } const documentIds = options?.ids ?? []; const insertDatas: InsertRow[] = []; // eslint-disable-next-line no-plusplus for (let index = 0; index < vectors.length; index++) { const vec = vectors[index]; const doc = documents[index]; const data: InsertRow = { [this.textField]: doc.pageContent, [this.vectorField]: vec, }; this.fields.forEach((field) => { switch (field) { case this.primaryField: if (documentIds[index] !== undefined) { data[field] = documentIds[index]; } else if (!this.autoId) { if (doc.metadata[this.primaryField] === undefined) { throw new Error( `The Collection's primaryField is configured with autoId=false, thus its value must be provided through metadata.` ); } data[field] = doc.metadata[this.primaryField]; } break; case this.textField: data[field] = doc.pageContent; break; case this.vectorField: data[field] = vec; break; default: // metadata fields if (doc.metadata[field] === undefined) { throw new Error( `The field "${field}" is not provided in documents[${index}].metadata.` ); } else if (typeof doc.metadata[field] === "object") { data[field] = JSON.stringify(doc.metadata[field]); } else { data[field] = doc.metadata[field]; } break; } }); insertDatas.push(data); } const params: InsertReq = { collection_name: this.collectionName, fields_data: insertDatas, }; if (this.partitionName !== undefined) { params.partition_name = this.partitionName; } const insertResp = this.autoId ? await this.client.insert(params) : await this.client.upsert(params); if (insertResp.status.error_code !== ErrorCode.SUCCESS) { throw new Error( `Error ${ this.autoId ? "inserting" : "upserting" } data: ${JSON.stringify(insertResp)}` ); } await this.client.flushSync({ collection_names: [this.collectionName] }); } /** * Searches for vectors in the Milvus database that are similar to a given * vector. * @param query Vector to compare with the vectors in the database. * @param k Number of similar vectors to return. * @param filter Optional filter to apply to the search. * @returns Promise resolving to an array of tuples, each containing a Document instance and a similarity score. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: string ): Promise<[Document, number][]> { const hasColResp = await this.client.hasCollection({ collection_name: this.collectionName, }); if (hasColResp.status.error_code !== ErrorCode.SUCCESS) { throw new Error(`Error checking collection: ${hasColResp}`); } if (hasColResp.value === false) { throw new Error( `Collection not found: ${this.collectionName}, please create collection before search.` ); } const filterStr = filter ?? ""; await this.grabCollectionFields(); const loadResp = await this.client.loadCollectionSync({ collection_name: this.collectionName, }); if (loadResp.error_code !== ErrorCode.SUCCESS) { throw new Error(`Error loading collection: ${loadResp}`); } // clone this.field and remove vectorField const outputFields = this.fields.filter( (field) => field !== this.vectorField ); const searchResp = await this.client.search({ collection_name: this.collectionName, search_params: { anns_field: this.vectorField, topk: k, metric_type: this.indexCreateParams.metric_type, params: JSON.stringify(this.indexSearchParams), }, output_fields: outputFields, vector_type: DataType.FloatVector, vectors: [query], filter: filterStr, }); if (searchResp.status.error_code !== ErrorCode.SUCCESS) { throw new Error(`Error searching data: ${JSON.stringify(searchResp)}`); } const results: [Document, number][] = []; searchResp.results.forEach((result) => { const fields = { pageContent: "", // eslint-disable-next-line @typescript-eslint/no-explicit-any metadata: {} as Record<string, any>, }; Object.keys(result).forEach((key) => { if (key === this.textField) { fields.pageContent = result[key]; } else if (this.fields.includes(key) || key === this.primaryField) { if (typeof result[key] === "string") { const { isJson, obj } = checkJsonString(result[key]); fields.metadata[key] = isJson ? obj : result[key]; } else { fields.metadata[key] = result[key]; } } }); results.push([new Document(fields), result.score]); }); // console.log("Search result: " + JSON.stringify(results, null, 2)); return results; } /** * Ensures that a collection exists in the Milvus database. * @param vectors Optional array of vectors to be used if a new collection needs to be created. * @param documents Optional array of Document instances to be used if a new collection needs to be created. * @returns Promise resolving to void. */ async ensureCollection(vectors?: number[][], documents?: Document[]) { const hasColResp = await this.client.hasCollection({ collection_name: this.collectionName, }); if (hasColResp.status.error_code !== ErrorCode.SUCCESS) { throw new Error( `Error checking collection: ${JSON.stringify(hasColResp, null, 2)}` ); } if (hasColResp.value === false) { if (vectors === undefined || documents === undefined) { throw new Error( `Collection not found: ${this.collectionName}, please provide vectors and documents to create collection.` ); } await this.createCollection(vectors, documents); } else { await this.grabCollectionFields(); } } /** * Ensures that a partition exists in the Milvus collection. * @returns Promise resolving to void. */ async ensurePartition() { if (this.partitionName === undefined) { return; } const hasPartResp = await this.client.hasPartition({ collection_name: this.collectionName, partition_name: this.partitionName, }); if (hasPartResp.status.error_code !== ErrorCode.SUCCESS) { throw new Error( `Error checking partition: ${JSON.stringify(hasPartResp, null, 2)}` ); } if (hasPartResp.value === false) { await this.client.createPartition({ collection_name: this.collectionName, partition_name: this.partitionName, }); } } /** * Creates a collection in the Milvus database. * @param vectors Array of vectors to be added to the new collection. * @param documents Array of Document instances to be added to the new collection. * @returns Promise resolving to void. */ async createCollection( vectors: number[][], documents: Document[] ): Promise<void> { const fieldList: FieldType[] = []; fieldList.push( ...createFieldTypeForMetadata( documents, this.primaryField, this.partitionKey ) ); if (this.autoId) { fieldList.push({ name: this.primaryField, description: "Primary key", data_type: DataType.Int64, is_primary_key: true, autoID: true, }); } else { fieldList.push({ name: this.primaryField, description: "Primary key", data_type: DataType.VarChar, is_primary_key: true, autoID: false, max_length: 65535, }); } fieldList.push( { name: this.textField, description: "Text field", data_type: DataType.VarChar, type_params: { max_length: this.textFieldMaxLength > 0 ? this.textFieldMaxLength.toString() : getTextFieldMaxLength(documents).toString(), }, }, { name: this.vectorField, description: "Vector field", data_type: DataType.FloatVector, type_params: { dim: getVectorFieldDim(vectors).toString(), }, } ); if (this.partitionKey) { fieldList.push({ name: this.partitionKey, description: "Partition key", data_type: DataType.VarChar, max_length: this.partitionKeyMaxLength, is_partition_key: true, }); } fieldList.forEach((field) => { if (!field.autoID) { this.fields.push(field.name); } }); const createRes = await this.client.createCollection({ collection_name: this.collectionName, fields: fieldList, }); if (createRes.error_code !== ErrorCode.SUCCESS) { throw new Error(`Failed to create collection: ${createRes}`); } const extraParams = { ...this.indexCreateParams, params: JSON.stringify(this.indexCreateParams.params), }; await this.client.createIndex({ collection_name: this.collectionName, field_name: this.vectorField, extra_params: extraParams, }); } /** * Retrieves the fields of a collection in the Milvus database. * @returns Promise resolving to void. */ async grabCollectionFields(): Promise<void> { if (!this.collectionName) { throw new Error("Need collection name to grab collection fields"); } if ( this.primaryField && this.vectorField && this.textField && this.fields.length > 0 ) { return; } const desc = await this.client.describeCollection({ collection_name: this.collectionName, }); desc.schema.fields.forEach((field) => { this.fields.push(field.name); if (field.autoID) { const index = this.fields.indexOf(field.name); if (index !== -1) { this.fields.splice(index, 1); } } if (field.is_primary_key) { this.primaryField = field.name; } const dtype = DataTypeMap[field.data_type]; if (dtype === DataType.FloatVector || dtype === DataType.BinaryVector) { this.vectorField = field.name; } if (dtype === DataType.VarChar && field.name === MILVUS_TEXT_FIELD_NAME) { this.textField = field.name; } }); } /** * Creates a Milvus instance from a set of texts and their associated * metadata. * @param texts Array of texts to be added to the database. * @param metadatas Array of metadata objects associated with the texts. * @param embeddings Embeddings instance used to generate vector embeddings for the texts. * @param dbConfig Optional configuration for the Milvus database. * @returns Promise resolving to a new Milvus instance. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig?: MilvusLibArgs ): Promise<Milvus> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return Milvus.fromDocuments(docs, embeddings, dbConfig); } /** * Creates a Milvus instance from a set of Document instances. * @param docs Array of Document instances to be added to the database. * @param embeddings Embeddings instance used to generate vector embeddings for the documents. * @param dbConfig Optional configuration for the Milvus database. * @returns Promise resolving to a new Milvus instance. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig?: MilvusLibArgs ): Promise<Milvus> { const args: MilvusLibArgs = { ...dbConfig, collectionName: dbConfig?.collectionName ?? genCollectionName(), }; const instance = new this(embeddings, args); await instance.addDocuments(docs); return instance; } /** * Creates a Milvus instance from an existing collection in the Milvus * database. * @param embeddings Embeddings instance used to generate vector embeddings for the documents in the collection. * @param dbConfig Configuration for the Milvus database. * @returns Promise resolving to a new Milvus instance. */ static async fromExistingCollection( embeddings: EmbeddingsInterface, dbConfig: MilvusLibArgs ): Promise<Milvus> { const instance = new this(embeddings, dbConfig); await instance.ensureCollection(); return instance; } /** * Deletes data from the Milvus database. * @param params Object containing a filter to apply to the deletion. * @returns Promise resolving to void. */ async delete(params: { filter?: string; ids?: string[] }): Promise<void> { const hasColResp = await this.client.hasCollection({ collection_name: this.collectionName, }); if (hasColResp.status.error_code !== ErrorCode.SUCCESS) { throw new Error(`Error checking collection: ${hasColResp}`); } if (hasColResp.value === false) { throw new Error( `Collection not found: ${this.collectionName}, please create collection before search.` ); } const { filter, ids } = params; if (filter && !ids) { const deleteResp = await this.client.deleteEntities({ collection_name: this.collectionName, expr: filter, }); if (deleteResp.status.error_code !== ErrorCode.SUCCESS) { throw new Error(`Error deleting data: ${JSON.stringify(deleteResp)}`); } } else if (!filter && ids && ids.length > 0) { const deleteResp = await this.client.delete({ collection_name: this.collectionName, ids, }); if (deleteResp.status.error_code !== ErrorCode.SUCCESS) { throw new Error( `Error deleting data with ids: ${JSON.stringify(deleteResp)}` ); } } } } function createFieldTypeForMetadata( documents: Document[], primaryFieldName: string, partitionKey?: string ): FieldType[] { const sampleMetadata = documents[0].metadata; let textFieldMaxLength = 0; let jsonFieldMaxLength = 0; documents.forEach(({ metadata }) => { // check all keys name and count in metadata is same as sampleMetadata Object.keys(metadata).forEach((key) => { if ( !(key in metadata) || typeof metadata[key] !== typeof sampleMetadata[key] ) { throw new Error( "All documents must have same metadata keys and datatype" ); } // find max length of string field and json field, cache json string value if (typeof metadata[key] === "string") { if (metadata[key].length > textFieldMaxLength) { textFieldMaxLength = metadata[key].length; } } else if (typeof metadata[key] === "object") { const json = JSON.stringify(metadata[key]); if (json.length > jsonFieldMaxLength) { jsonFieldMaxLength = json.length; } } }); }); const fields: FieldType[] = []; for (const [key, value] of Object.entries(sampleMetadata)) { const type = typeof value; if (key === primaryFieldName || key === partitionKey) { /** * skip primary field and partition key * because we will create primary field and partition key in createCollection * */ } else if (type === "string") { fields.push({ name: key, description: `Metadata String field`, data_type: DataType.VarChar, type_params: { max_length: textFieldMaxLength.toString(), }, }); } else if (type === "number") { fields.push({ name: key, description: `Metadata Number field`, data_type: DataType.Float, }); } else if (type === "boolean") { fields.push({ name: key, description: `Metadata Boolean field`, data_type: DataType.Bool, }); } else if (value === null) { // skip } else { // use json for other types try { fields.push({ name: key, description: `Metadata JSON field`, data_type: DataType.VarChar, type_params: { max_length: jsonFieldMaxLength.toString(), }, }); } catch (e) { throw new Error("Failed to parse metadata field as JSON"); } } } return fields; } function genCollectionName(): string { return `${MILVUS_COLLECTION_NAME_PREFIX}_${uuid.v4().replaceAll("-", "")}`; } function getTextFieldMaxLength(documents: Document[]) { let textMaxLength = 0; const textEncoder = new TextEncoder(); // eslint-disable-next-line no-plusplus for (let i = 0; i < documents.length; i++) { const text = documents[i].pageContent; const textLengthInBytes = textEncoder.encode(text).length; if (textLengthInBytes > textMaxLength) { textMaxLength = textLengthInBytes; } } return textMaxLength; } function getVectorFieldDim(vectors: number[][]) { if (vectors.length === 0) { throw new Error("No vectors found"); } return vectors[0].length; } // eslint-disable-next-line @typescript-eslint/no-explicit-any function checkJsonString(value: string): { isJson: boolean; obj: any } { try { const result = JSON.parse(value); return { isJson: true, obj: result }; } catch (e) { return { isJson: false, obj: null }; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/singlestore.ts
import type { Pool, RowDataPacket, OkPacket, ResultSetHeader, FieldPacket, PoolOptions, } from "mysql2/promise"; import { format } from "mysql2"; import { createPool } from "mysql2/promise"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document, DocumentInterface } from "@langchain/core/documents"; import { Callbacks } from "@langchain/core/callbacks/manager"; // eslint-disable-next-line @typescript-eslint/no-explicit-any export type Metadata = Record<string, any>; export type DistanceMetrics = "DOT_PRODUCT" | "EUCLIDEAN_DISTANCE"; export type SearchStrategy = | "VECTOR_ONLY" | "TEXT_ONLY" | "FILTER_BY_TEXT" | "FILTER_BY_VECTOR" | "WEIGHTED_SUM"; const OrderingDirective: Record<DistanceMetrics, string> = { DOT_PRODUCT: "DESC", EUCLIDEAN_DISTANCE: "", }; export interface ConnectionOptions extends PoolOptions {} type ConnectionWithUri = { connectionOptions?: never; connectionURI: string; }; type ConnectionWithOptions = { connectionURI?: never; connectionOptions: ConnectionOptions; }; type ConnectionConfig = ConnectionWithUri | ConnectionWithOptions; type SearchConfig = { searchStrategy?: SearchStrategy; filterThreshold?: number; textWeight?: number; vectorWeight?: number; vectorselectCountMultiplier?: number; }; export type SingleStoreVectorStoreConfig = ConnectionConfig & { tableName?: string; idColumnName?: string; contentColumnName?: string; vectorColumnName?: string; metadataColumnName?: string; distanceMetric?: DistanceMetrics; useVectorIndex?: boolean; vectorIndexName?: string; vectorIndexOptions?: Metadata; vectorSize?: number; useFullTextIndex?: boolean; searchConfig?: SearchConfig; }; /** * Adds the connect attributes to the connection options. * @param config A SingleStoreVectorStoreConfig object. */ function withConnectAttributes( config: SingleStoreVectorStoreConfig ): ConnectionOptions { let newOptions: ConnectionOptions = {}; if (config.connectionURI) { newOptions = { uri: config.connectionURI, }; } else if (config.connectionOptions) { newOptions = { ...config.connectionOptions, }; } const result: ConnectionOptions = { ...newOptions, connectAttributes: { ...newOptions.connectAttributes, }, }; if (!result.connectAttributes) { result.connectAttributes = {}; } result.connectAttributes = { ...result.connectAttributes, _connector_name: "langchain js sdk", _connector_version: "2.0.0", _driver_name: "Node-MySQL-2", }; return result; } /** * Class for interacting with SingleStoreDB, a high-performance * distributed SQL database. It provides vector storage and vector * functions. */ export class SingleStoreVectorStore extends VectorStore { connectionPool: Pool; tableName: string; idColumnName: string; contentColumnName: string; vectorColumnName: string; metadataColumnName: string; distanceMetric: DistanceMetrics; useVectorIndex: boolean; vectorIndexName: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any vectorIndexOptions: Metadata; vectorSize: number; useFullTextIndex: boolean; searchConfig: SearchConfig; _vectorstoreType(): string { return "singlestore"; } constructor( embeddings: EmbeddingsInterface, config: SingleStoreVectorStoreConfig ) { super(embeddings, config); this.connectionPool = createPool(withConnectAttributes(config)); this.tableName = config.tableName ?? "embeddings"; this.idColumnName = config.idColumnName ?? "id"; this.contentColumnName = config.contentColumnName ?? "content"; this.vectorColumnName = config.vectorColumnName ?? "vector"; this.metadataColumnName = config.metadataColumnName ?? "metadata"; this.distanceMetric = config.distanceMetric ?? "DOT_PRODUCT"; this.useVectorIndex = config.useVectorIndex ?? false; this.vectorIndexName = config.vectorIndexName ?? ""; this.vectorIndexOptions = config.vectorIndexOptions ?? {}; this.vectorSize = config.vectorSize ?? 1536; this.useFullTextIndex = config.useFullTextIndex ?? false; this.searchConfig = config.searchConfig ?? { searchStrategy: "VECTOR_ONLY", filterThreshold: 1.0, textWeight: 0.5, vectorWeight: 0.5, vectorselectCountMultiplier: 10, }; } /** * Creates a new table in the SingleStoreDB database if it does not * already exist. */ async createTableIfNotExists(): Promise<void> { let fullTextIndex = ""; if (this.useFullTextIndex) { fullTextIndex = `, FULLTEXT(${this.contentColumnName})`; } if (this.useVectorIndex) { let vectorIndexOptions = ""; if (Object.keys(this.vectorIndexOptions).length > 0) { vectorIndexOptions = `INDEX_OPTIONS '${JSON.stringify( this.vectorIndexOptions )}'`; } await this.connectionPool .execute(`CREATE TABLE IF NOT EXISTS ${this.tableName} ( ${this.idColumnName} BIGINT AUTO_INCREMENT PRIMARY KEY, ${this.contentColumnName} LONGTEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, ${this.vectorColumnName} VECTOR(${this.vectorSize}, F32) NOT NULL, ${this.metadataColumnName} JSON, VECTOR INDEX ${this.vectorIndexName} (${this.vectorColumnName}) ${vectorIndexOptions} ${fullTextIndex});`); } else { await this.connectionPool .execute(`CREATE TABLE IF NOT EXISTS ${this.tableName} ( ${this.idColumnName} BIGINT AUTO_INCREMENT PRIMARY KEY, ${this.contentColumnName} LONGTEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, ${this.vectorColumnName} BLOB, ${this.metadataColumnName} JSON ${fullTextIndex});`); } } /** * Ends the connection to the SingleStoreDB database. */ async end(): Promise<void> { return this.connectionPool.end(); } /** * Sets the search configuration for the SingleStoreVectorStore instance. * @param config A SearchConfig object. */ async setSearchConfig(config: SearchConfig): Promise<void> { this.searchConfig = { searchStrategy: config.searchStrategy ?? "VECTOR_ONLY", filterThreshold: config.filterThreshold ?? 1.0, textWeight: config.textWeight ?? 0.5, vectorWeight: config.vectorWeight ?? 0.5, vectorselectCountMultiplier: config.vectorselectCountMultiplier ?? 10, }; } /** * Adds new documents to the SingleStoreDB database. * @param documents An array of Document objects. */ async addDocuments(documents: Document[]): Promise<void> { const texts = documents.map(({ pageContent }) => pageContent); const vectors = await this.embeddings.embedDocuments(texts); return this.addVectors(vectors, documents); } /** * Adds new vectors to the SingleStoreDB database. * @param vectors An array of vectors. * @param documents An array of Document objects. */ async addVectors(vectors: number[][], documents: Document[]): Promise<void> { await this.createTableIfNotExists(); const { tableName } = this; await Promise.all( vectors.map(async (vector, idx) => { try { await this.connectionPool.query( format( `INSERT INTO ${tableName}( ${this.contentColumnName}, ${this.vectorColumnName}, ${this.metadataColumnName}) VALUES (?, JSON_ARRAY_PACK('[?]'), ?);`, [ documents[idx].pageContent, vector, JSON.stringify(documents[idx].metadata), ] ) ); } catch (error) { console.error(`Error adding vector at index ${idx}:`, error); } }) ); if (this.useFullTextIndex || this.useVectorIndex) { await this.connectionPool.query(`OPTIMIZE TABLE ${tableName} FLUSH;`); } } /** * * Performs a similarity search on the texts stored in the SingleStoreDB * using the specified search strategy and distance metric. * @param query A string representing the query text. * @param vector An array of numbers representing the query vector. * @param k The number of nearest neighbors to return. * @param filter Optional metadata to filter the texts by. * @returns Top matching documents with score */ async similaritySearchTextAndVectorWithScore( query: string, vector: number[], k: number, filter?: Metadata ): Promise<[Document, number][]> { if (!this.searchConfig.searchStrategy) { throw new Error("Search strategy is required."); } if ( this.searchConfig.searchStrategy !== "VECTOR_ONLY" && !this.useFullTextIndex ) { throw new Error( "Full text index is required for text-based search strategies." ); } if ( (this.searchConfig.searchStrategy === "FILTER_BY_TEXT" || this.searchConfig.searchStrategy === "FILTER_BY_VECTOR") && !this.searchConfig.filterThreshold && this.searchConfig.filterThreshold !== 0 ) { throw new Error( "Filter threshold is required for filter-based search strategies." ); } if ( this.searchConfig.searchStrategy === "WEIGHTED_SUM" && ((!this.searchConfig.textWeight && this.searchConfig.textWeight !== 0) || (!this.searchConfig.vectorWeight && this.searchConfig.vectorWeight !== 0) || (!this.searchConfig.vectorselectCountMultiplier && this.searchConfig.vectorselectCountMultiplier !== 0)) ) { throw new Error( "Text and vector weight and vector select count multiplier are required for weighted sum search strategy." ); } if ( this.searchConfig.searchStrategy === "WEIGHTED_SUM" && this.distanceMetric !== "DOT_PRODUCT" ) { throw new Error( "Weighted sum search strategy is only available for DOT_PRODUCT distance metric." ); } const filterThreshold = this.searchConfig.filterThreshold ?? 1.0; // build the where clause from filter const whereArgs: string[] = []; const buildWhereClause = (record: Metadata, argList: string[]): string => { const whereTokens: string[] = []; for (const key in record) if (record[key] !== undefined) { if ( typeof record[key] === "object" && record[key] != null && !Array.isArray(record[key]) ) { whereTokens.push( buildWhereClause(record[key], argList.concat([key])) ); } else { whereTokens.push( `JSON_EXTRACT_JSON(${this.metadataColumnName}, `.concat( Array.from({ length: argList.length + 1 }, () => "?").join( ", " ), ") = ?" ) ); whereArgs.push(...argList, key, JSON.stringify(record[key])); } } return whereTokens.join(" AND "); }; const filterByTextClause = (): string => { whereArgs.push(query, filterThreshold.toString()); return `MATCH (${this.contentColumnName}) AGAINST (?) > ?`; }; const filterByVectorClause = (): string => { whereArgs.push(JSON.stringify(vector), filterThreshold.toString()); return this.distanceMetric === "DOT_PRODUCT" ? `${this.distanceMetric}(${this.vectorColumnName}, JSON_ARRAY_PACK(?)) > ?` : `${this.distanceMetric}(${this.vectorColumnName}, JSON_ARRAY_PACK(?)) < ?`; }; const whereClauses: string[] = []; if (filter) { whereClauses.push(buildWhereClause(filter, [])); } if (this.searchConfig.searchStrategy === "FILTER_BY_TEXT") { whereClauses.push(filterByTextClause()); } if (this.searchConfig.searchStrategy === "FILTER_BY_VECTOR") { whereClauses.push(filterByVectorClause()); } const whereClause = whereClauses.length > 0 ? `WHERE ${whereClauses.join(" AND ")}` : ""; let queryText = ""; switch (this.searchConfig.searchStrategy) { case "TEXT_ONLY": case "FILTER_BY_VECTOR": queryText = format( `SELECT ${this.contentColumnName}, ${this.metadataColumnName}, MATCH (${this.contentColumnName}) AGAINST (?) as __score FROM ${this.tableName} ${whereClause} ORDER BY __score DESC LIMIT ?;`, [query, ...whereArgs, k] ); break; case "VECTOR_ONLY": case "FILTER_BY_TEXT": queryText = format( `SELECT ${this.contentColumnName}, ${this.metadataColumnName}, ${this.distanceMetric}(${ this.vectorColumnName }, JSON_ARRAY_PACK('[?]')) as __score FROM ${this.tableName} ${whereClause} ORDER BY __score ${ OrderingDirective[this.distanceMetric] } LIMIT ?;`, [vector, ...whereArgs, k] ); break; case "WEIGHTED_SUM": queryText = format( `SELECT ${this.contentColumnName}, ${ this.metadataColumnName }, __score1 * ? + __score2 * ? as __score FROM ( SELECT ${this.idColumnName}, ${this.contentColumnName}, ${ this.metadataColumnName }, MATCH (${this.contentColumnName}) AGAINST (?) as __score1 FROM ${this.tableName} ${whereClause}) r1 FULL OUTER JOIN ( SELECT ${this.idColumnName}, ${this.distanceMetric}(${ this.vectorColumnName }, JSON_ARRAY_PACK('[?]')) as __score2 FROM ${this.tableName} ${whereClause} ORDER BY __score2 ${ OrderingDirective[this.distanceMetric] } LIMIT ? ) r2 ON r1.${this.idColumnName} = r2.${ this.idColumnName } ORDER BY __score ${OrderingDirective[this.distanceMetric]} LIMIT ?`, [ this.searchConfig.textWeight, this.searchConfig.vectorWeight, query, ...whereArgs, vector, ...whereArgs, k * (this.searchConfig.vectorselectCountMultiplier ?? 10), k, ] ); break; default: throw new Error("Invalid search strategy."); } const [rows]: [ ( | RowDataPacket[] | RowDataPacket[][] | OkPacket | OkPacket[] | ResultSetHeader ), FieldPacket[] ] = await this.connectionPool.query(queryText); const result: [Document, number][] = []; for (const row of rows as RowDataPacket[]) { const rowData = row as unknown as Record<string, unknown>; result.push([ new Document({ pageContent: rowData[this.contentColumnName] as string, metadata: rowData[this.metadataColumnName] as Record<string, unknown>, }), Number(rowData.score), ]); } return result; } /** * Performs a similarity search on the texts stored in the SingleStoreDB * @param query A string representing the query text. * @param k The number of nearest neighbors to return. By default, it is 4. * @param filter Optional metadata to filter the texts by. * @param _callbacks - Callbacks object, not used in this implementation. * @returns Top matching documents */ async similaritySearch( query: string, k?: number, filter?: Metadata, _callbacks?: Callbacks | undefined ): Promise<DocumentInterface<Metadata>[]> { // @typescript-eslint/no-explicit-any const queryVector = await this.embeddings.embedQuery(query); return this.similaritySearchTextAndVectorWithScore( query, queryVector, k ?? 4, filter ).then((result) => result.map(([doc]) => doc)); } /** * Performs a similarity search on the texts stored in the SingleStoreDB * @param query A string representing the query text. * @param k The number of nearest neighbors to return. By default, it is 4. * @param filter Optional metadata to filter the texts by. * @param _callbacks * @returns Top matching documents with score */ async similaritySearchWithScore( query: string, k?: number, filter?: Metadata, _callbacks?: Callbacks | undefined ): Promise<[DocumentInterface<Metadata>, number][]> { // @typescript-eslint/no-explicit-any const queryVector = await this.embeddings.embedQuery(query); return this.similaritySearchTextAndVectorWithScore( query, queryVector, k ?? 4, filter ); } /** * Performs a similarity search on the vectors stored in the SingleStoreDB * database. * @param query An array of numbers representing the query vector. * @param k The number of nearest neighbors to return. * @param filter Optional metadata to filter the vectors by. * @returns Top matching vectors with score */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: Metadata ): Promise<[Document, number][]> { if (this.searchConfig.searchStrategy !== "VECTOR_ONLY") { throw new Error( "similaritySearchVectorWithScore is only available for VECTOR_ONLY search strategy." ); } return this.similaritySearchTextAndVectorWithScore("", query, k, filter); } /** * Creates a new instance of the SingleStoreVectorStore class from a list * of texts. * @param texts An array of strings. * @param metadatas An array of metadata objects. * @param embeddings An Embeddings object. * @param dbConfig A SingleStoreVectorStoreConfig object. * @returns A new SingleStoreVectorStore instance */ static async fromTexts( texts: string[], metadatas: object[], embeddings: EmbeddingsInterface, dbConfig: SingleStoreVectorStoreConfig ): Promise<SingleStoreVectorStore> { const docs = texts.map((text, idx) => { const metadata = Array.isArray(metadatas) ? metadatas[idx] : metadatas; return new Document({ pageContent: text, metadata, }); }); return SingleStoreVectorStore.fromDocuments(docs, embeddings, dbConfig); } /** * Creates a new instance of the SingleStoreVectorStore class from a list * of Document objects. * @param docs An array of Document objects. * @param embeddings An Embeddings object. * @param dbConfig A SingleStoreVectorStoreConfig object. * @returns A new SingleStoreVectorStore instance */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: SingleStoreVectorStoreConfig ): Promise<SingleStoreVectorStore> { const instance = new this(embeddings, dbConfig); await instance.addDocuments(docs); return instance; } }
0
lc_public_repos/langchainjs/libs/langchain-community/src
lc_public_repos/langchainjs/libs/langchain-community/src/vectorstores/chroma.ts
import * as uuid from "uuid"; import type { ChromaClient as ChromaClientT, Collection, ChromaClientParams, CollectionMetadata, Where, } from "chromadb"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; type SharedChromaLibArgs = { numDimensions?: number; collectionName?: string; filter?: object; collectionMetadata?: CollectionMetadata; clientParams?: Omit<ChromaClientParams, "path">; }; /** * Defines the arguments that can be passed to the `Chroma` class * constructor. It can either contain a `url` for the Chroma database, the * number of dimensions for the vectors (`numDimensions`), a * `collectionName` for the collection to be used in the database, and a * `filter` object; or it can contain an `index` which is an instance of * `ChromaClientT`, along with the `numDimensions`, `collectionName`, and * `filter`. */ export type ChromaLibArgs = | ({ url?: string; } & SharedChromaLibArgs) | ({ index?: ChromaClientT; } & SharedChromaLibArgs); /** * Defines the parameters for the `delete` method in the `Chroma` class. * It can either contain an array of `ids` of the documents to be deleted * or a `filter` object to specify the documents to be deleted. */ export interface ChromaDeleteParams<T> { ids?: string[]; filter?: T; } /** * Chroma vector store integration. * * Setup: * Install `@langchain/community` and `chromadb`. * * ```bash * npm install @langchain/community chromadb * ``` * * ## [Constructor args](https://api.js.langchain.com/classes/langchain_community_vectorstores_chroma.Chroma.html#constructor) * * <details open> * <summary><strong>Instantiate</strong></summary> * * ```typescript * import { Chroma } from '@langchain/community/vectorstores/chroma'; * // Or other embeddings * import { OpenAIEmbeddings } from '@langchain/openai'; * * const embeddings = new OpenAIEmbeddings({ * model: "text-embedding-3-small", * }) * * const vectorStore = new Chroma( * embeddings, * { * collectionName: "foo", * url: "http://localhost:8000", // URL of the Chroma server * } * ); * ``` * </details> * * <br /> * * <details> * <summary><strong>Add documents</strong></summary> * * ```typescript * import type { Document } from '@langchain/core/documents'; * * const document1 = { pageContent: "foo", metadata: { baz: "bar" } }; * const document2 = { pageContent: "thud", metadata: { bar: "baz" } }; * const document3 = { pageContent: "i will be deleted :(", metadata: {} }; * * const documents: Document[] = [document1, document2, document3]; * const ids = ["1", "2", "3"]; * await vectorStore.addDocuments(documents, { ids }); * ``` * </details> * * <br /> * * <details> * <summary><strong>Delete documents</strong></summary> * * ```typescript * await vectorStore.delete({ ids: ["3"] }); * ``` * </details> * * <br /> * * <details> * <summary><strong>Similarity search</strong></summary> * * ```typescript * const results = await vectorStore.similaritySearch("thud", 1); * for (const doc of results) { * console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`); * } * // Output: * thud [{"baz":"bar"}] * ``` * </details> * * <br /> * * * <details> * <summary><strong>Similarity search with filter</strong></summary> * * ```typescript * const resultsWithFilter = await vectorStore.similaritySearch("thud", 1, { baz: "bar" }); * * for (const doc of resultsWithFilter) { * console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`); * } * // Output: * foo [{"baz":"bar"}] * ``` * </details> * * <br /> * * * <details> * <summary><strong>Similarity search with score</strong></summary> * * ```typescript * const resultsWithScore = await vectorStore.similaritySearchWithScore("qux", 1); * for (const [doc, score] of resultsWithScore) { * console.log(`* [SIM=${score.toFixed(6)}] ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`); * } * // Output: * [SIM=0.000000] qux [{"bar":"baz","baz":"bar"}] * ``` * </details> * * <br /> * * <details> * <summary><strong>As a retriever</strong></summary> * * ```typescript * const retriever = vectorStore.asRetriever({ * searchType: "mmr", // Leave blank for standard similarity search * k: 1, * }); * const resultAsRetriever = await retriever.invoke("thud"); * console.log(resultAsRetriever); * * // Output: [Document({ metadata: { "baz":"bar" }, pageContent: "thud" })] * ``` * </details> * * <br /> */ export class Chroma extends VectorStore { declare FilterType: Where; index?: ChromaClientT; collection?: Collection; collectionName: string; collectionMetadata?: CollectionMetadata; numDimensions?: number; clientParams?: Omit<ChromaClientParams, "path">; url: string; filter?: object; _vectorstoreType(): string { return "chroma"; } constructor(embeddings: EmbeddingsInterface, args: ChromaLibArgs) { super(embeddings, args); this.numDimensions = args.numDimensions; this.embeddings = embeddings; this.collectionName = ensureCollectionName(args.collectionName); this.collectionMetadata = args.collectionMetadata; this.clientParams = args.clientParams; if ("index" in args) { this.index = args.index; } else if ("url" in args) { this.url = args.url || "http://localhost:8000"; } this.filter = args.filter; } /** * Adds documents to the Chroma database. The documents are first * converted to vectors using the `embeddings` instance, and then added to * the database. * @param documents An array of `Document` instances to be added to the database. * @param options Optional. An object containing an array of `ids` for the documents. * @returns A promise that resolves when the documents have been added to the database. */ async addDocuments(documents: Document[], options?: { ids?: string[] }) { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents, options ); } /** * Ensures that a collection exists in the Chroma database. If the * collection does not exist, it is created. * @returns A promise that resolves with the `Collection` instance. */ async ensureCollection(): Promise<Collection> { if (!this.collection) { if (!this.index) { const chromaClient = new (await Chroma.imports()).ChromaClient({ path: this.url, ...(this.clientParams ?? {}), }); this.index = chromaClient; } try { this.collection = await this.index.getOrCreateCollection({ name: this.collectionName, ...(this.collectionMetadata && { metadata: this.collectionMetadata }), }); } catch (err) { throw new Error(`Chroma getOrCreateCollection error: ${err}`); } } return this.collection; } /** * Adds vectors to the Chroma database. The vectors are associated with * the provided documents. * @param vectors An array of vectors to be added to the database. * @param documents An array of `Document` instances associated with the vectors. * @param options Optional. An object containing an array of `ids` for the vectors. * @returns A promise that resolves with an array of document IDs when the vectors have been added to the database. */ async addVectors( vectors: number[][], documents: Document[], options?: { ids?: string[] } ) { if (vectors.length === 0) { return []; } if (this.numDimensions === undefined) { this.numDimensions = vectors[0].length; } if (vectors.length !== documents.length) { throw new Error(`Vectors and metadatas must have the same length`); } if (vectors[0].length !== this.numDimensions) { throw new Error( `Vectors must have the same length as the number of dimensions (${this.numDimensions})` ); } const documentIds = options?.ids ?? Array.from({ length: vectors.length }, () => uuid.v1()); const collection = await this.ensureCollection(); const mappedMetadatas = documents.map(({ metadata }) => { let locFrom; let locTo; if (metadata?.loc) { if (metadata.loc.lines?.from !== undefined) locFrom = metadata.loc.lines.from; if (metadata.loc.lines?.to !== undefined) locTo = metadata.loc.lines.to; } const newMetadata: Document["metadata"] = { ...metadata, ...(locFrom !== undefined && { locFrom }), ...(locTo !== undefined && { locTo }), }; if (newMetadata.loc) delete newMetadata.loc; return newMetadata; }); await collection.upsert({ ids: documentIds, embeddings: vectors, metadatas: mappedMetadatas, documents: documents.map(({ pageContent }) => pageContent), }); return documentIds; } /** * Deletes documents from the Chroma database. The documents to be deleted * can be specified by providing an array of `ids` or a `filter` object. * @param params An object containing either an array of `ids` of the documents to be deleted or a `filter` object to specify the documents to be deleted. * @returns A promise that resolves when the specified documents have been deleted from the database. */ async delete(params: ChromaDeleteParams<this["FilterType"]>): Promise<void> { const collection = await this.ensureCollection(); if (Array.isArray(params.ids)) { await collection.delete({ ids: params.ids }); } else if (params.filter) { await collection.delete({ where: { ...params.filter }, }); } else { throw new Error(`You must provide one of "ids or "filter".`); } } /** * Searches for vectors in the Chroma database that are similar to the * provided query vector. The search can be filtered using the provided * `filter` object or the `filter` property of the `Chroma` instance. * @param query The query vector. * @param k The number of similar vectors to return. * @param filter Optional. A `filter` object to filter the search results. * @returns A promise that resolves with an array of tuples, each containing a `Document` instance and a similarity score. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ) { if (filter && this.filter) { throw new Error("cannot provide both `filter` and `this.filter`"); } const _filter = filter ?? this.filter; const where = _filter === undefined ? undefined : { ..._filter }; const collection = await this.ensureCollection(); // similaritySearchVectorWithScore supports one query vector at a time // chroma supports multiple query vectors at a time const result = await collection.query({ queryEmbeddings: query, nResults: k, where, }); const { ids, distances, documents, metadatas } = result; if (!ids || !distances || !documents || !metadatas) { return []; } // get the result data from the first and only query vector const [firstIds] = ids; const [firstDistances] = distances; const [firstDocuments] = documents; const [firstMetadatas] = metadatas; const results: [Document, number][] = []; for (let i = 0; i < firstIds.length; i += 1) { let metadata: Document["metadata"] = firstMetadatas?.[i] ?? {}; if (metadata.locFrom && metadata.locTo) { metadata = { ...metadata, loc: { lines: { from: metadata.locFrom, to: metadata.locTo, }, }, }; delete metadata.locFrom; delete metadata.locTo; } results.push([ new Document({ pageContent: firstDocuments?.[i] ?? "", metadata, id: firstIds[i], }), firstDistances[i], ]); } return results; } /** * Creates a new `Chroma` instance from an array of text strings. The text * strings are converted to `Document` instances and added to the Chroma * database. * @param texts An array of text strings. * @param metadatas An array of metadata objects or a single metadata object. If an array is provided, it must have the same length as the `texts` array. * @param embeddings An `Embeddings` instance used to generate embeddings for the documents. * @param dbConfig A `ChromaLibArgs` object containing the configuration for the Chroma database. * @returns A promise that resolves with a new `Chroma` instance. */ static async fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: ChromaLibArgs ): Promise<Chroma> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return this.fromDocuments(docs, embeddings, dbConfig); } /** * Creates a new `Chroma` instance from an array of `Document` instances. * The documents are added to the Chroma database. * @param docs An array of `Document` instances. * @param embeddings An `Embeddings` instance used to generate embeddings for the documents. * @param dbConfig A `ChromaLibArgs` object containing the configuration for the Chroma database. * @returns A promise that resolves with a new `Chroma` instance. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: ChromaLibArgs ): Promise<Chroma> { const instance = new this(embeddings, dbConfig); await instance.addDocuments(docs); return instance; } /** * Creates a new `Chroma` instance from an existing collection in the * Chroma database. * @param embeddings An `Embeddings` instance used to generate embeddings for the documents. * @param dbConfig A `ChromaLibArgs` object containing the configuration for the Chroma database. * @returns A promise that resolves with a new `Chroma` instance. */ static async fromExistingCollection( embeddings: EmbeddingsInterface, dbConfig: ChromaLibArgs ): Promise<Chroma> { const instance = new this(embeddings, dbConfig); await instance.ensureCollection(); return instance; } /** @ignore */ static async imports(): Promise<{ ChromaClient: typeof ChromaClientT; }> { try { const { ChromaClient } = await import("chromadb"); return { ChromaClient }; } catch (e) { throw new Error( "Please install chromadb as a dependency with, e.g. `npm install -S chromadb`" ); } } } /** * Generates a unique collection name if none is provided. */ function ensureCollectionName(collectionName?: string) { if (!collectionName) { return `langchain-${uuid.v4()}`; } return collectionName; }