index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-mistralai/langchain.config.js | import { resolve, dirname } from "node:path";
import { fileURLToPath } from "node:url";
/**
* @param {string} relativePath
* @returns {string}
*/
function abs(relativePath) {
return resolve(dirname(fileURLToPath(import.meta.url)), relativePath);
}
export const config = {
internals: [/node\:/, /@langchain\/core\//],
entrypoints: {
index: "index",
},
tsConfigPath: resolve("./tsconfig.json"),
cjsSource: "./dist-cjs",
cjsDestination: "./dist",
abs,
} |
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-mistralai/package.json | {
"name": "@langchain/mistralai",
"version": "0.2.0",
"description": "MistralAI integration for LangChain.js",
"type": "module",
"engines": {
"node": ">=18"
},
"main": "./index.js",
"types": "./index.d.ts",
"repository": {
"type": "git",
"url": "git@github.com:langchain-ai/langchainjs.git"
},
"homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-mistralai/",
"scripts": {
"build": "yarn turbo:command build:internal --filter=@langchain/mistralai",
"build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking",
"lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/",
"lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
"lint": "yarn lint:eslint && yarn lint:dpdm",
"lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm",
"clean": "rm -rf .turbo dist/",
"prepack": "yarn build",
"test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
"test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
"test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000",
"test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
"test:standard:unit": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.test.ts --testTimeout 100000 --maxWorkers=50%",
"test:standard:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
"test:standard": "yarn test:standard:unit && yarn test:standard:int",
"format": "prettier --config .prettierrc --write \"src\"",
"format:check": "prettier --config .prettierrc --check \"src\""
},
"author": "LangChain",
"license": "MIT",
"dependencies": {
"@mistralai/mistralai": "^1.3.1",
"uuid": "^10.0.0",
"zod": "^3.23.8",
"zod-to-json-schema": "^3.22.4"
},
"peerDependencies": {
"@langchain/core": ">=0.3.7 <0.4.0"
},
"devDependencies": {
"@jest/globals": "^29.5.0",
"@langchain/core": "workspace:*",
"@langchain/scripts": ">=0.1.0 <0.2.0",
"@langchain/standard-tests": "0.0.0",
"@swc/core": "^1.3.90",
"@swc/jest": "^0.2.29",
"@tsconfig/recommended": "^1.0.3",
"@typescript-eslint/eslint-plugin": "^6.12.0",
"@typescript-eslint/parser": "^6.12.0",
"dotenv": "^16.3.1",
"dpdm": "^3.12.0",
"eslint": "^8.33.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-prettier": "^8.6.0",
"eslint-plugin-import": "^2.27.5",
"eslint-plugin-no-instanceof": "^1.0.1",
"eslint-plugin-prettier": "^4.2.1",
"jest": "^29.5.0",
"jest-environment-node": "^29.6.4",
"prettier": "^2.8.3",
"release-it": "^17.6.0",
"rollup": "^4.5.2",
"ts-jest": "^29.1.0",
"typescript": "<5.2.0"
},
"publishConfig": {
"access": "public"
},
"exports": {
".": {
"types": {
"import": "./index.d.ts",
"require": "./index.d.cts",
"default": "./index.d.ts"
},
"import": "./index.js",
"require": "./index.cjs"
},
"./package.json": "./package.json"
},
"files": [
"dist/",
"index.cjs",
"index.js",
"index.d.ts",
"index.d.cts"
]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-mistralai/tsconfig.cjs.json | {
"extends": "./tsconfig.json",
"compilerOptions": {
"module": "commonjs",
"declaration": false
},
"exclude": [
"node_modules",
"dist",
"docs",
"**/tests"
]
} |
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-mistralai/turbo.json | {
"extends": ["//"],
"pipeline": {
"build": {
"outputs": ["**/dist/**"]
},
"build:internal": {
"dependsOn": ["^build:internal"]
}
}
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-mistralai/.prettierrc | {
"$schema": "https://json.schemastore.org/prettierrc",
"printWidth": 80,
"tabWidth": 2,
"useTabs": false,
"semi": true,
"singleQuote": false,
"quoteProps": "as-needed",
"jsxSingleQuote": false,
"trailingComma": "es5",
"bracketSpacing": true,
"arrowParens": "always",
"requirePragma": false,
"insertPragma": false,
"proseWrap": "preserve",
"htmlWhitespaceSensitivity": "css",
"vueIndentScriptAndStyle": false,
"endOfLine": "lf"
}
|
0 | lc_public_repos/langchainjs/libs/langchain-mistralai | lc_public_repos/langchainjs/libs/langchain-mistralai/src/llms.ts | import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { BaseLLMParams, LLM } from "@langchain/core/language_models/llms";
import { type BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
import { GenerationChunk, LLMResult } from "@langchain/core/outputs";
import { FIMCompletionRequest as MistralAIFIMCompletionRequest } from "@mistralai/mistralai/models/components/fimcompletionrequest.js";
import { FIMCompletionStreamRequest as MistralAIFIMCompletionStreamRequest } from "@mistralai/mistralai/models/components/fimcompletionstreamrequest.js";
import { FIMCompletionResponse as MistralAIFIMCompletionResponse } from "@mistralai/mistralai/models/components/fimcompletionresponse.js";
import { ChatCompletionChoice as MistralAIChatCompletionChoice } from "@mistralai/mistralai/models/components/chatcompletionchoice.js";
import { CompletionEvent as MistralAIChatCompletionEvent } from "@mistralai/mistralai/models/components/completionevent.js";
import { CompletionChunk as MistralAICompetionChunk } from "@mistralai/mistralai/models/components/completionchunk.js";
import {
BeforeRequestHook,
RequestErrorHook,
ResponseHook,
HTTPClient as MistralAIHTTPClient,
} from "@mistralai/mistralai/lib/http.js";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { chunkArray } from "@langchain/core/utils/chunk_array";
import { AsyncCaller } from "@langchain/core/utils/async_caller";
export interface MistralAICallOptions extends BaseLanguageModelCallOptions {
/**
* Optional text/code that adds more context for the model.
* When given a prompt and a suffix the model will fill what
* is between them. When suffix is not provided, the model
* will simply execute completion starting with prompt.
*/
suffix?: string;
}
export interface MistralAIInput extends BaseLLMParams {
/**
* The name of the model to use.
* @default "codestral-latest"
*/
model?: string;
/**
* The API key to use.
* @default {process.env.MISTRAL_API_KEY}
*/
apiKey?: string;
/**
* Override the default server URL used by the Mistral SDK.
* @deprecated use serverURL instead
*/
endpoint?: string;
/**
* Override the default server URL used by the Mistral SDK.
*/
serverURL?: string;
/**
* What sampling temperature to use, between 0.0 and 2.0.
* Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
* @default {0.7}
*/
temperature?: number;
/**
* Nucleus sampling, where the model considers the results of the tokens with `topP` probability mass.
* So 0.1 means only the tokens comprising the top 10% probability mass are considered.
* Should be between 0 and 1.
* @default {1}
*/
topP?: number;
/**
* The maximum number of tokens to generate in the completion.
* The token count of your prompt plus maxTokens cannot exceed the model's context length.
*/
maxTokens?: number;
/**
* Whether or not to stream the response.
* @default {false}
*/
streaming?: boolean;
/**
* The seed to use for random sampling. If set, different calls will generate deterministic results.
* Alias for `seed`
*/
randomSeed?: number;
/**
* Batch size to use when passing multiple documents to generate
*/
batchSize?: number;
/**
* A list of custom hooks that must follow (req: Request) => Awaitable<Request | void>
* They are automatically added when a ChatMistralAI instance is created
*/
beforeRequestHooks?: BeforeRequestHook[];
/**
* A list of custom hooks that must follow (err: unknown, req: Request) => Awaitable<void>
* They are automatically added when a ChatMistralAI instance is created
*/
requestErrorHooks?: RequestErrorHook[];
/**
* A list of custom hooks that must follow (res: Response, req: Request) => Awaitable<void>
* They are automatically added when a ChatMistralAI instance is created
*/
responseHooks?: ResponseHook[];
/**
* Optional custom HTTP client to manage API requests
* Allows users to add custom fetch implementations, hooks, as well as error and response processing.
*/
httpClient?: MistralAIHTTPClient;
}
/**
* MistralAI completions LLM.
*/
export class MistralAI
extends LLM<MistralAICallOptions>
implements MistralAIInput
{
static lc_name() {
return "MistralAI";
}
lc_namespace = ["langchain", "llms", "mistralai"];
lc_serializable = true;
model = "codestral-latest";
temperature = 0;
topP?: number;
maxTokens?: number | undefined;
randomSeed?: number | undefined;
streaming = false;
batchSize = 20;
apiKey: string;
/**
* @deprecated use serverURL instead
*/
endpoint: string;
serverURL?: string;
maxRetries?: number;
maxConcurrency?: number;
beforeRequestHooks?: Array<BeforeRequestHook>;
requestErrorHooks?: Array<RequestErrorHook>;
responseHooks?: Array<ResponseHook>;
httpClient?: MistralAIHTTPClient;
constructor(fields?: MistralAIInput) {
super(fields ?? {});
this.model = fields?.model ?? this.model;
this.temperature = fields?.temperature ?? this.temperature;
this.topP = fields?.topP ?? this.topP;
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
this.randomSeed = fields?.randomSeed ?? this.randomSeed;
this.batchSize = fields?.batchSize ?? this.batchSize;
this.streaming = fields?.streaming ?? this.streaming;
this.serverURL = fields?.serverURL ?? this.serverURL;
this.maxRetries = fields?.maxRetries;
this.maxConcurrency = fields?.maxConcurrency;
this.beforeRequestHooks =
fields?.beforeRequestHooks ?? this.beforeRequestHooks;
this.requestErrorHooks =
fields?.requestErrorHooks ?? this.requestErrorHooks;
this.responseHooks = fields?.responseHooks ?? this.responseHooks;
this.httpClient = fields?.httpClient ?? this.httpClient;
const apiKey = fields?.apiKey ?? getEnvironmentVariable("MISTRAL_API_KEY");
if (!apiKey) {
throw new Error(
`MistralAI requires an API key to be set.
Either provide one via the "apiKey" field in the constructor, or set the "MISTRAL_API_KEY" environment variable.`
);
}
this.apiKey = apiKey;
this.addAllHooksToHttpClient();
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
apiKey: "MISTRAL_API_KEY",
};
}
get lc_aliases(): { [key: string]: string } | undefined {
return {
apiKey: "mistral_api_key",
};
}
_llmType() {
return "mistralai";
}
invocationParams(
options: this["ParsedCallOptions"]
): Omit<
MistralAIFIMCompletionRequest | MistralAIFIMCompletionStreamRequest,
"prompt"
> {
return {
model: this.model,
suffix: options.suffix,
temperature: this.temperature,
maxTokens: this.maxTokens,
topP: this.topP,
randomSeed: this.randomSeed,
stop: options.stop,
};
}
/**
* For some given input string and options, return a string output.
*
* Despite the fact that `invoke` is overridden below, we still need this
* in order to handle public APi calls to `generate()`.
*/
async _call(
prompt: string,
options: this["ParsedCallOptions"]
): Promise<string> {
const params = {
...this.invocationParams(options),
prompt,
};
const result = await this.completionWithRetry(params, options, false);
let content = result?.choices?.[0].message.content ?? "";
if (Array.isArray(content)) {
content = content[0].type === "text" ? content[0].text : "";
}
return content;
}
async _generate(
prompts: string[],
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<LLMResult> {
const subPrompts = chunkArray(prompts, this.batchSize);
const choices: MistralAIChatCompletionChoice[][] = [];
const params = this.invocationParams(options);
for (let i = 0; i < subPrompts.length; i += 1) {
const data = await (async () => {
if (this.streaming) {
const responseData: Array<
{ choices: MistralAIChatCompletionChoice[] } & Partial<
Omit<MistralAICompetionChunk, "choices">
>
> = [];
for (let x = 0; x < subPrompts[i].length; x += 1) {
const choices: MistralAIChatCompletionChoice[] = [];
let response:
| Omit<MistralAICompetionChunk, "choices" | "usage">
| undefined;
const stream = await this.completionWithRetry(
{
...params,
prompt: subPrompts[i][x],
},
options,
true
);
for await (const { data } of stream) {
// on the first message set the response properties
if (!response) {
response = {
id: data.id,
object: "chat.completion",
created: data.created,
model: data.model,
};
}
// on all messages, update choice
for (const part of data.choices) {
let content = part.delta.content ?? "";
// Convert MistralContentChunk data into a string
if (Array.isArray(content)) {
let strContent = "";
for (const contentChunk of content) {
if (contentChunk.type === "text") {
strContent += contentChunk.text;
} else if (contentChunk.type === "image_url") {
const imageURL =
typeof contentChunk.imageUrl === "string"
? contentChunk.imageUrl
: contentChunk.imageUrl.url;
strContent += imageURL;
}
}
content = strContent;
}
if (!choices[part.index]) {
choices[part.index] = {
index: part.index,
message: {
role: "assistant",
content,
toolCalls: null,
},
finishReason: part.finishReason ?? "length",
};
} else {
const choice = choices[part.index];
choice.message.content += content;
choice.finishReason = part.finishReason ?? "length";
}
void runManager?.handleLLMNewToken(content, {
prompt: part.index,
completion: part.index,
});
}
}
if (options.signal?.aborted) {
throw new Error("AbortError");
}
responseData.push({
...response,
choices,
});
}
return responseData;
} else {
const responseData: Array<MistralAIFIMCompletionResponse> = [];
for (let x = 0; x < subPrompts[i].length; x += 1) {
const res = await this.completionWithRetry(
{
...params,
prompt: subPrompts[i][x],
},
options,
false
);
responseData.push(res);
}
return responseData;
}
})();
choices.push(...data.map((d) => d.choices ?? []));
}
const generations = choices.map((promptChoices) =>
promptChoices.map((choice) => {
let text = choice.message?.content ?? "";
if (Array.isArray(text)) {
text = text[0].type === "text" ? text[0].text : "";
}
return {
text,
generationInfo: {
finishReason: choice.finishReason,
},
};
})
);
return {
generations,
};
}
async completionWithRetry(
request: MistralAIFIMCompletionRequest,
options: this["ParsedCallOptions"],
stream: false
): Promise<MistralAIFIMCompletionResponse>;
async completionWithRetry(
request: MistralAIFIMCompletionStreamRequest,
options: this["ParsedCallOptions"],
stream: true
): Promise<AsyncIterable<MistralAIChatCompletionEvent>>;
async completionWithRetry(
request:
| MistralAIFIMCompletionRequest
| MistralAIFIMCompletionStreamRequest,
options: this["ParsedCallOptions"],
stream: boolean
): Promise<
MistralAIFIMCompletionResponse | AsyncIterable<MistralAIChatCompletionEvent>
> {
const { Mistral } = await this.imports();
const caller = new AsyncCaller({
maxConcurrency: options.maxConcurrency || this.maxConcurrency,
maxRetries: this.maxRetries,
});
const client = new Mistral({
apiKey: this.apiKey,
serverURL: this.serverURL,
timeoutMs: options.timeout,
// If httpClient exists, pass it into constructor
...(this.httpClient ? { httpClient: this.httpClient } : {}),
});
return caller.callWithOptions(
{
signal: options.signal,
},
async () => {
try {
let res:
| MistralAIFIMCompletionResponse
| AsyncIterable<MistralAIChatCompletionEvent>;
if (stream) {
res = await client.fim.stream(request);
} else {
res = await client.fim.complete(request);
}
return res;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) {
if (
e.message?.includes("status: 400") ||
e.message?.toLowerCase().includes("status 400") ||
e.message?.includes("validation failed")
) {
e.status = 400;
}
throw e;
}
}
);
}
async *_streamResponseChunks(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const params = {
...this.invocationParams(options),
prompt,
};
const stream = await this.completionWithRetry(params, options, true);
for await (const message of stream) {
const { data } = message;
const choice = data?.choices[0];
if (!choice) {
continue;
}
let text = choice.delta.content ?? "";
if (Array.isArray(text)) {
text = text[0].type === "text" ? text[0].text : "";
}
const chunk = new GenerationChunk({
text,
generationInfo: {
finishReason: choice.finishReason,
tokenUsage: data.usage,
},
});
yield chunk;
// eslint-disable-next-line no-void
void runManager?.handleLLMNewToken(chunk.text ?? "");
}
if (options.signal?.aborted) {
throw new Error("AbortError");
}
}
addAllHooksToHttpClient() {
try {
// To prevent duplicate hooks
this.removeAllHooksFromHttpClient();
// If the user wants to use hooks, but hasn't created an HTTPClient yet
const hasHooks = [
this.beforeRequestHooks,
this.requestErrorHooks,
this.responseHooks,
].some((hook) => hook && hook.length > 0);
if (hasHooks && !this.httpClient) {
this.httpClient = new MistralAIHTTPClient();
}
if (this.beforeRequestHooks) {
for (const hook of this.beforeRequestHooks) {
this.httpClient?.addHook("beforeRequest", hook);
}
}
if (this.requestErrorHooks) {
for (const hook of this.requestErrorHooks) {
this.httpClient?.addHook("requestError", hook);
}
}
if (this.responseHooks) {
for (const hook of this.responseHooks) {
this.httpClient?.addHook("response", hook);
}
}
} catch {
throw new Error("Error in adding all hooks");
}
}
removeAllHooksFromHttpClient() {
try {
if (this.beforeRequestHooks) {
for (const hook of this.beforeRequestHooks) {
this.httpClient?.removeHook("beforeRequest", hook);
}
}
if (this.requestErrorHooks) {
for (const hook of this.requestErrorHooks) {
this.httpClient?.removeHook("requestError", hook);
}
}
if (this.responseHooks) {
for (const hook of this.responseHooks) {
this.httpClient?.removeHook("response", hook);
}
}
} catch {
throw new Error("Error in removing hooks");
}
}
removeHookFromHttpClient(
hook: BeforeRequestHook | RequestErrorHook | ResponseHook
) {
try {
this.httpClient?.removeHook("beforeRequest", hook as BeforeRequestHook);
this.httpClient?.removeHook("requestError", hook as RequestErrorHook);
this.httpClient?.removeHook("response", hook as ResponseHook);
} catch {
throw new Error("Error in removing hook");
}
}
/** @ignore */
private async imports() {
const { Mistral } = await import("@mistralai/mistralai");
return { Mistral };
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-mistralai | lc_public_repos/langchainjs/libs/langchain-mistralai/src/index.ts | export * from "./chat_models.js";
export * from "./embeddings.js";
export * from "./llms.js";
|
0 | lc_public_repos/langchainjs/libs/langchain-mistralai | lc_public_repos/langchainjs/libs/langchain-mistralai/src/chat_models.ts | import { v4 as uuidv4 } from "uuid";
import { Mistral as MistralClient } from "@mistralai/mistralai";
import {
ChatCompletionRequest as MistralAIChatCompletionRequest,
ChatCompletionRequestToolChoice as MistralAIToolChoice,
Messages as MistralAIMessage,
} from "@mistralai/mistralai/models/components/chatcompletionrequest.js";
import { ContentChunk as MistralAIContentChunk } from "@mistralai/mistralai/models/components/contentchunk.js";
import { Tool as MistralAITool } from "@mistralai/mistralai/models/components/tool.js";
import { ToolCall as MistralAIToolCall } from "@mistralai/mistralai/models/components/toolcall.js";
import { ChatCompletionStreamRequest as MistralAIChatCompletionStreamRequest } from "@mistralai/mistralai/models/components/chatcompletionstreamrequest.js";
import { UsageInfo as MistralAITokenUsage } from "@mistralai/mistralai/models/components/usageinfo.js";
import { CompletionEvent as MistralAIChatCompletionEvent } from "@mistralai/mistralai/models/components/completionevent.js";
import { ChatCompletionResponse as MistralAIChatCompletionResponse } from "@mistralai/mistralai/models/components/chatcompletionresponse.js";
import {
type BeforeRequestHook,
type RequestErrorHook,
type ResponseHook,
HTTPClient as MistralAIHTTPClient,
} from "@mistralai/mistralai/lib/http.js";
import {
BaseMessage,
MessageType,
MessageContent,
MessageContentComplex,
AIMessage,
HumanMessage,
HumanMessageChunk,
AIMessageChunk,
ToolMessageChunk,
ChatMessageChunk,
FunctionMessageChunk,
isAIMessage,
} from "@langchain/core/messages";
import type {
BaseLanguageModelInput,
BaseLanguageModelCallOptions,
StructuredOutputMethodOptions,
FunctionDefinition,
} from "@langchain/core/language_models/base";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import {
type BaseChatModelParams,
BaseChatModel,
BindToolsInput,
LangSmithParams,
} from "@langchain/core/language_models/chat_models";
import {
ChatGeneration,
ChatGenerationChunk,
ChatResult,
} from "@langchain/core/outputs";
import { AsyncCaller } from "@langchain/core/utils/async_caller";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { NewTokenIndices } from "@langchain/core/callbacks/base";
import { z } from "zod";
import {
type BaseLLMOutputParser,
JsonOutputParser,
StructuredOutputParser,
} from "@langchain/core/output_parsers";
import {
JsonOutputKeyToolsParser,
convertLangChainToolCallToOpenAI,
makeInvalidToolCall,
parseToolCall,
} from "@langchain/core/output_parsers/openai_tools";
import {
Runnable,
RunnablePassthrough,
RunnableSequence,
} from "@langchain/core/runnables";
import { zodToJsonSchema } from "zod-to-json-schema";
import { ToolCallChunk } from "@langchain/core/messages/tool";
import {
_convertToolCallIdToMistralCompatible,
_mistralContentChunkToMessageContentComplex,
} from "./utils.js";
interface TokenUsage {
completionTokens?: number;
promptTokens?: number;
totalTokens?: number;
}
type ChatMistralAIToolType = MistralAIToolCall | MistralAITool | BindToolsInput;
export interface ChatMistralAICallOptions
extends Omit<BaseLanguageModelCallOptions, "stop"> {
response_format?: {
type: "text" | "json_object";
};
tools?: ChatMistralAIToolType[];
tool_choice?: MistralAIToolChoice;
/**
* Whether or not to include token usage in the stream.
* @default {true}
*/
streamUsage?: boolean;
}
/**
* Input to chat model class.
*/
export interface ChatMistralAIInput
extends BaseChatModelParams,
Pick<ChatMistralAICallOptions, "streamUsage"> {
/**
* The API key to use.
* @default {process.env.MISTRAL_API_KEY}
*/
apiKey?: string;
/**
* The name of the model to use.
* Alias for `model`
* @deprecated Use `model` instead.
* @default {"mistral-small-latest"}
*/
modelName?: string;
/**
* The name of the model to use.
* @default {"mistral-small-latest"}
*/
model?: string;
/**
* Override the default server URL used by the Mistral SDK.
* @deprecated use serverURL instead
*/
endpoint?: string;
/**
* Override the default server URL used by the Mistral SDK.
*/
serverURL?: string;
/**
* What sampling temperature to use, between 0.0 and 2.0.
* Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
* @default {0.7}
*/
temperature?: number;
/**
* Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass.
* So 0.1 means only the tokens comprising the top 10% probability mass are considered.
* Should be between 0 and 1.
* @default {1}
*/
topP?: number;
/**
* The maximum number of tokens to generate in the completion.
* The token count of your prompt plus max_tokens cannot exceed the model's context length.
*/
maxTokens?: number;
/**
* Whether or not to stream the response.
* @default {false}
*/
streaming?: boolean;
/**
* Whether to inject a safety prompt before all conversations.
* @default {false}
* @deprecated use safePrompt instead
*/
safeMode?: boolean;
/**
* Whether to inject a safety prompt before all conversations.
* @default {false}
*/
safePrompt?: boolean;
/**
* The seed to use for random sampling. If set, different calls will generate deterministic results.
* Alias for `seed`
*/
randomSeed?: number;
/**
* The seed to use for random sampling. If set, different calls will generate deterministic results.
*/
seed?: number;
/**
* A list of custom hooks that must follow (req: Request) => Awaitable<Request | void>
* They are automatically added when a ChatMistralAI instance is created.
*/
beforeRequestHooks?: BeforeRequestHook[];
/**
* A list of custom hooks that must follow (err: unknown, req: Request) => Awaitable<void>.
* They are automatically added when a ChatMistralAI instance is created.
*/
requestErrorHooks?: RequestErrorHook[];
/**
* A list of custom hooks that must follow (res: Response, req: Request) => Awaitable<void>.
* They are automatically added when a ChatMistralAI instance is created.
*/
responseHooks?: ResponseHook[];
/**
* Custom HTTP client to manage API requests.
* Allows users to add custom fetch implementations, hooks, as well as error and response processing.
*/
httpClient?: MistralAIHTTPClient;
/**
* Determines how much the model penalizes the repetition of words or phrases. A higher presence
* penalty encourages the model to use a wider variety of words and phrases, making the output
* more diverse and creative.
*/
presencePenalty?: number;
/**
* Penalizes the repetition of words based on their frequency in the generated text. A higher
* frequency penalty discourages the model from repeating words that have already appeared frequently
* in the output, promoting diversity and reducing repetition.
*/
frequencyPenalty?: number;
/**
* Number of completions to return for each request, input tokens are only billed once.
*/
numCompletions?: number;
}
function convertMessagesToMistralMessages(
messages: Array<BaseMessage>
): Array<MistralAIMessage> {
const getRole = (role: MessageType) => {
switch (role) {
case "human":
return "user";
case "ai":
return "assistant";
case "system":
return "system";
case "tool":
return "tool";
case "function":
return "assistant";
default:
throw new Error(`Unknown message type: ${role}`);
}
};
const getContent = (
content: MessageContent,
type: MessageType
): string | MistralAIContentChunk[] => {
const _generateContentChunk = (
complex: MessageContentComplex,
role: string
): MistralAIContentChunk => {
if (
complex.type === "image_url" &&
(role === "user" || role === "assistant")
) {
return {
type: complex.type,
imageUrl: complex?.image_url,
};
}
if (complex.type === "text") {
return {
type: complex.type,
text: complex?.text,
};
}
throw new Error(
`ChatMistralAI only supports messages of "image_url" for roles "user" and "assistant", and "text" for all others.\n\nReceived: ${JSON.stringify(
content,
null,
2
)}`
);
};
if (typeof content === "string") {
return content;
}
if (Array.isArray(content)) {
const mistralRole = getRole(type);
// Mistral "assistant" and "user" roles can support Mistral ContentChunks
// Mistral "system" role can support Mistral TextChunks
const newContent: MistralAIContentChunk[] = [];
content.forEach((messageContentComplex) => {
// Mistral content chunks only support type "text" and "image_url"
if (
messageContentComplex.type === "text" ||
messageContentComplex.type === "image_url"
) {
newContent.push(
_generateContentChunk(messageContentComplex, mistralRole)
);
} else {
throw new Error(
`Mistral only supports types "text" or "image_url" for complex message types.`
);
}
});
return newContent;
}
throw new Error(
`Message content must be a string or an array.\n\nReceived: ${JSON.stringify(
content,
null,
2
)}`
);
};
const getTools = (message: BaseMessage): MistralAIToolCall[] | undefined => {
if (isAIMessage(message) && !!message.tool_calls?.length) {
return message.tool_calls
.map((toolCall) => ({
...toolCall,
id: _convertToolCallIdToMistralCompatible(toolCall.id ?? ""),
}))
.map(convertLangChainToolCallToOpenAI) as MistralAIToolCall[];
}
return undefined;
};
return messages.map((message) => {
const toolCalls = getTools(message);
const content = getContent(message.content, message.getType());
if ("tool_call_id" in message && typeof message.tool_call_id === "string") {
return {
role: getRole(message.getType()),
content,
name: message.name,
toolCallId: _convertToolCallIdToMistralCompatible(message.tool_call_id),
};
// Mistral "assistant" role can only support either content or tool calls but not both
} else if (isAIMessage(message)) {
if (toolCalls === undefined) {
return {
role: getRole(message.getType()),
content,
};
} else {
return {
role: getRole(message.getType()),
toolCalls,
};
}
}
return {
role: getRole(message.getType()),
content,
};
}) as MistralAIMessage[];
}
function mistralAIResponseToChatMessage(
choice: NonNullable<MistralAIChatCompletionResponse["choices"]>[0],
usage?: MistralAITokenUsage
): BaseMessage {
const { message } = choice;
if (message === undefined) {
throw new Error("No message found in response");
}
// MistralAI SDK does not include toolCalls in the non
// streaming return type, so we need to extract it like this
// to satisfy typescript.
let rawToolCalls: MistralAIToolCall[] = [];
if ("toolCalls" in message && Array.isArray(message.toolCalls)) {
rawToolCalls = message.toolCalls;
}
const content = _mistralContentChunkToMessageContentComplex(message.content);
switch (message.role) {
case "assistant": {
const toolCalls = [];
const invalidToolCalls = [];
for (const rawToolCall of rawToolCalls) {
try {
const parsed = parseToolCall(rawToolCall, { returnId: true });
toolCalls.push({
...parsed,
id: parsed.id ?? uuidv4().replace(/-/g, ""),
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) {
invalidToolCalls.push(makeInvalidToolCall(rawToolCall, e.message));
}
}
return new AIMessage({
content,
tool_calls: toolCalls,
invalid_tool_calls: invalidToolCalls,
additional_kwargs: {},
usage_metadata: usage
? {
input_tokens: usage.promptTokens,
output_tokens: usage.completionTokens,
total_tokens: usage.totalTokens,
}
: undefined,
});
}
default:
return new HumanMessage({ content });
}
}
function _convertDeltaToMessageChunk(
delta: {
role?: string | null | undefined;
content?: string | MistralAIContentChunk[] | null | undefined;
toolCalls?: MistralAIToolCall[] | null | undefined;
},
usage?: MistralAITokenUsage | null
) {
if (!delta.content && !delta.toolCalls) {
if (usage) {
return new AIMessageChunk({
content: "",
usage_metadata: usage
? {
input_tokens: usage.promptTokens,
output_tokens: usage.completionTokens,
total_tokens: usage.totalTokens,
}
: undefined,
});
}
return null;
}
// Our merge additional kwargs util function will throw unless there
// is an index key in each tool object (as seen in OpenAI's) so we
// need to insert it here.
const rawToolCallChunksWithIndex = delta.toolCalls?.length
? delta.toolCalls?.map(
(toolCall, index): MistralAIToolCall & { index: number } => ({
...toolCall,
index,
id: toolCall.id ?? uuidv4().replace(/-/g, ""),
type: "function",
})
)
: undefined;
let role = "assistant";
if (delta.role) {
role = delta.role;
}
const content = _mistralContentChunkToMessageContentComplex(delta.content);
let additional_kwargs;
const toolCallChunks: ToolCallChunk[] = [];
if (rawToolCallChunksWithIndex !== undefined) {
for (const rawToolCallChunk of rawToolCallChunksWithIndex) {
const rawArgs = rawToolCallChunk.function?.arguments;
const args =
rawArgs === undefined || typeof rawArgs === "string"
? rawArgs
: JSON.stringify(rawArgs);
toolCallChunks.push({
name: rawToolCallChunk.function?.name,
args,
id: rawToolCallChunk.id,
index: rawToolCallChunk.index,
type: "tool_call_chunk",
});
}
} else {
additional_kwargs = {};
}
if (role === "user") {
return new HumanMessageChunk({ content });
} else if (role === "assistant") {
return new AIMessageChunk({
content,
tool_call_chunks: toolCallChunks,
additional_kwargs,
usage_metadata: usage
? {
input_tokens: usage.promptTokens,
output_tokens: usage.completionTokens,
total_tokens: usage.totalTokens,
}
: undefined,
});
} else if (role === "tool") {
return new ToolMessageChunk({
content,
additional_kwargs,
tool_call_id: rawToolCallChunksWithIndex?.[0].id ?? "",
});
} else if (role === "function") {
return new FunctionMessageChunk({
content,
additional_kwargs,
});
} else {
return new ChatMessageChunk({ content, role });
}
}
function _convertToolToMistralTool(
tools: ChatMistralAIToolType[]
): MistralAITool[] {
return tools.map((tool) => {
if ("function" in tool) {
return tool as MistralAITool;
}
const description = tool.description ?? `Tool: ${tool.name}`;
return {
type: "function",
function: {
name: tool.name,
description,
parameters: zodToJsonSchema(tool.schema),
},
};
});
}
/**
* Mistral AI chat model integration.
*
* Setup:
* Install `@langchain/mistralai` and set an environment variable named `MISTRAL_API_KEY`.
*
* ```bash
* npm install @langchain/mistralai
* export MISTRAL_API_KEY="your-api-key"
* ```
*
* ## [Constructor args](https://api.js.langchain.com/classes/_langchain_mistralai.ChatMistralAI.html#constructor)
*
* ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_mistralai.ChatMistralAICallOptions.html)
*
* Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
* They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below:
*
* ```typescript
* // When calling `.bind`, call options should be passed via the first argument
* const llmWithArgsBound = llm.bind({
* stop: ["\n"],
* tools: [...],
* });
*
* // When calling `.bindTools`, call options should be passed via the second argument
* const llmWithTools = llm.bindTools(
* [...],
* {
* tool_choice: "auto",
* }
* );
* ```
*
* ## Examples
*
* <details open>
* <summary><strong>Instantiate</strong></summary>
*
* ```typescript
* import { ChatMistralAI } from '@langchain/mistralai';
*
* const llm = new ChatMistralAI({
* model: "mistral-large-2402",
* temperature: 0,
* // other params...
* });
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Invoking</strong></summary>
*
* ```typescript
* const input = `Translate "I love programming" into French.`;
*
* // Models also accept a list of chat messages or a formatted prompt
* const result = await llm.invoke(input);
* console.log(result);
* ```
*
* ```txt
* AIMessage {
* "content": "The translation of \"I love programming\" into French is \"J'aime la programmation\". Here's the breakdown:\n\n- \"I\" translates to \"Je\"\n- \"love\" translates to \"aime\"\n- \"programming\" translates to \"la programmation\"\n\nSo, \"J'aime la programmation\" means \"I love programming\" in French.",
* "additional_kwargs": {},
* "response_metadata": {
* "tokenUsage": {
* "completionTokens": 89,
* "promptTokens": 13,
* "totalTokens": 102
* },
* "finish_reason": "stop"
* },
* "tool_calls": [],
* "invalid_tool_calls": [],
* "usage_metadata": {
* "input_tokens": 13,
* "output_tokens": 89,
* "total_tokens": 102
* }
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Streaming Chunks</strong></summary>
*
* ```typescript
* for await (const chunk of await llm.stream(input)) {
* console.log(chunk);
* }
* ```
*
* ```txt
* AIMessageChunk {
* "content": "The",
* "additional_kwargs": {},
* "response_metadata": {
* "prompt": 0,
* "completion": 0
* },
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
* }
* AIMessageChunk {
* "content": " translation",
* "additional_kwargs": {},
* "response_metadata": {
* "prompt": 0,
* "completion": 0
* },
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
* }
* AIMessageChunk {
* "content": " of",
* "additional_kwargs": {},
* "response_metadata": {
* "prompt": 0,
* "completion": 0
* },
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
* }
* AIMessageChunk {
* "content": " \"",
* "additional_kwargs": {},
* "response_metadata": {
* "prompt": 0,
* "completion": 0
* },
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
* }
* AIMessageChunk {
* "content": "I",
* "additional_kwargs": {},
* "response_metadata": {
* "prompt": 0,
* "completion": 0
* },
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
* }
* AIMessageChunk {
* "content": ".",
* "additional_kwargs": {},
* "response_metadata": {
* "prompt": 0,
* "completion": 0
* },
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
*}
*AIMessageChunk {
* "content": "",
* "additional_kwargs": {},
* "response_metadata": {
* "prompt": 0,
* "completion": 0
* },
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": [],
* "usage_metadata": {
* "input_tokens": 13,
* "output_tokens": 89,
* "total_tokens": 102
* }
*}
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Aggregate Streamed Chunks</strong></summary>
*
* ```typescript
* import { AIMessageChunk } from '@langchain/core/messages';
* import { concat } from '@langchain/core/utils/stream';
*
* const stream = await llm.stream(input);
* let full: AIMessageChunk | undefined;
* for await (const chunk of stream) {
* full = !full ? chunk : concat(full, chunk);
* }
* console.log(full);
* ```
*
* ```txt
* AIMessageChunk {
* "content": "The translation of \"I love programming\" into French is \"J'aime la programmation\". Here's the breakdown:\n\n- \"I\" translates to \"Je\"\n- \"love\" translates to \"aime\"\n- \"programming\" translates to \"la programmation\"\n\nSo, \"J'aime la programmation\" means \"I love programming\" in French.",
* "additional_kwargs": {},
* "response_metadata": {
* "prompt": 0,
* "completion": 0
* },
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": [],
* "usage_metadata": {
* "input_tokens": 13,
* "output_tokens": 89,
* "total_tokens": 102
* }
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Bind tools</strong></summary>
*
* ```typescript
* import { z } from 'zod';
*
* const GetWeather = {
* name: "GetWeather",
* description: "Get the current weather in a given location",
* schema: z.object({
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
* }),
* }
*
* const GetPopulation = {
* name: "GetPopulation",
* description: "Get the current population in a given location",
* schema: z.object({
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
* }),
* }
*
* const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
* const aiMsg = await llmWithTools.invoke(
* "Which city is hotter today and which is bigger: LA or NY?"
* );
* console.log(aiMsg.tool_calls);
* ```
*
* ```txt
* [
* {
* name: 'GetWeather',
* args: { location: 'Los Angeles, CA' },
* type: 'tool_call',
* id: '47i216yko'
* },
* {
* name: 'GetWeather',
* args: { location: 'New York, NY' },
* type: 'tool_call',
* id: 'nb3v8Fpcn'
* },
* {
* name: 'GetPopulation',
* args: { location: 'Los Angeles, CA' },
* type: 'tool_call',
* id: 'EedWzByIB'
* },
* {
* name: 'GetPopulation',
* args: { location: 'New York, NY' },
* type: 'tool_call',
* id: 'jLdLia7zC'
* }
* ]
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Structured Output</strong></summary>
*
* ```typescript
* import { z } from 'zod';
*
* const Joke = z.object({
* setup: z.string().describe("The setup of the joke"),
* punchline: z.string().describe("The punchline to the joke"),
* rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
* }).describe('Joke to tell user.');
*
* const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
* const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
* console.log(jokeResult);
* ```
*
* ```txt
* {
* setup: "Why don't cats play poker in the jungle?",
* punchline: 'Too many cheetahs!',
* rating: 7
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Usage Metadata</strong></summary>
*
* ```typescript
* const aiMsgForMetadata = await llm.invoke(input);
* console.log(aiMsgForMetadata.usage_metadata);
* ```
*
* ```txt
* { input_tokens: 13, output_tokens: 89, total_tokens: 102 }
* ```
* </details>
*
* <br />
*/
export class ChatMistralAI<
CallOptions extends ChatMistralAICallOptions = ChatMistralAICallOptions
>
extends BaseChatModel<CallOptions, AIMessageChunk>
implements ChatMistralAIInput
{
// Used for tracing, replace with the same name as your class
static lc_name() {
return "ChatMistralAI";
}
lc_namespace = ["langchain", "chat_models", "mistralai"];
model = "mistral-small-latest";
apiKey: string;
/**
* @deprecated use serverURL instead
*/
endpoint: string;
serverURL?: string;
temperature = 0.7;
streaming = false;
topP = 1;
maxTokens: number;
/**
* @deprecated use safePrompt instead
*/
safeMode = false;
safePrompt = false;
randomSeed?: number;
seed?: number;
maxRetries?: number;
lc_serializable = true;
streamUsage = true;
beforeRequestHooks?: Array<BeforeRequestHook>;
requestErrorHooks?: Array<RequestErrorHook>;
responseHooks?: Array<ResponseHook>;
httpClient?: MistralAIHTTPClient;
presencePenalty?: number;
frequencyPenalty?: number;
numCompletions?: number;
constructor(fields?: ChatMistralAIInput) {
super(fields ?? {});
const apiKey = fields?.apiKey ?? getEnvironmentVariable("MISTRAL_API_KEY");
if (!apiKey) {
throw new Error(
"API key MISTRAL_API_KEY is missing for MistralAI, but it is required."
);
}
this.apiKey = apiKey;
this.streaming = fields?.streaming ?? this.streaming;
this.serverURL = fields?.serverURL ?? this.serverURL;
this.temperature = fields?.temperature ?? this.temperature;
this.topP = fields?.topP ?? this.topP;
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
this.safePrompt = fields?.safePrompt ?? this.safePrompt;
this.randomSeed = fields?.seed ?? fields?.randomSeed ?? this.seed;
this.seed = this.randomSeed;
this.maxRetries = fields?.maxRetries;
this.httpClient = fields?.httpClient;
this.model = fields?.model ?? fields?.modelName ?? this.model;
this.streamUsage = fields?.streamUsage ?? this.streamUsage;
this.beforeRequestHooks =
fields?.beforeRequestHooks ?? this.beforeRequestHooks;
this.requestErrorHooks =
fields?.requestErrorHooks ?? this.requestErrorHooks;
this.responseHooks = fields?.responseHooks ?? this.responseHooks;
this.presencePenalty = fields?.presencePenalty ?? this.presencePenalty;
this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty;
this.numCompletions = fields?.numCompletions ?? this.numCompletions;
this.addAllHooksToHttpClient();
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
apiKey: "MISTRAL_API_KEY",
};
}
get lc_aliases(): { [key: string]: string } | undefined {
return {
apiKey: "mistral_api_key",
};
}
getLsParams(options: this["ParsedCallOptions"]): LangSmithParams {
const params = this.invocationParams(options);
return {
ls_provider: "mistral",
ls_model_name: this.model,
ls_model_type: "chat",
ls_temperature: params.temperature ?? undefined,
ls_max_tokens: params.maxTokens ?? undefined,
};
}
_llmType() {
return "mistral_ai";
}
/**
* Get the parameters used to invoke the model
*/
invocationParams(
options?: this["ParsedCallOptions"]
): Omit<
MistralAIChatCompletionRequest | MistralAIChatCompletionStreamRequest,
"messages"
> {
const { response_format, tools, tool_choice } = options ?? {};
const mistralAITools: Array<MistralAITool> | undefined = tools?.length
? _convertToolToMistralTool(tools)
: undefined;
const params: Omit<MistralAIChatCompletionRequest, "messages"> = {
model: this.model,
tools: mistralAITools,
temperature: this.temperature,
maxTokens: this.maxTokens,
topP: this.topP,
randomSeed: this.seed,
safePrompt: this.safePrompt,
toolChoice: tool_choice,
responseFormat: response_format,
presencePenalty: this.presencePenalty,
frequencyPenalty: this.frequencyPenalty,
n: this.numCompletions,
};
return params;
}
override bindTools(
tools: ChatMistralAIToolType[],
kwargs?: Partial<CallOptions>
): Runnable<BaseLanguageModelInput, AIMessageChunk, CallOptions> {
return this.bind({
tools: _convertToolToMistralTool(tools),
...kwargs,
} as CallOptions);
}
/**
* Calls the MistralAI API with retry logic in case of failures.
* @param {ChatRequest} input The input to send to the MistralAI API.
* @returns {Promise<MistralAIChatCompletionResult | AsyncIterable<MistralAIChatCompletionEvent>>} The response from the MistralAI API.
*/
async completionWithRetry(
input: MistralAIChatCompletionStreamRequest,
streaming: true
): Promise<AsyncIterable<MistralAIChatCompletionEvent>>;
async completionWithRetry(
input: MistralAIChatCompletionRequest,
streaming: false
): Promise<MistralAIChatCompletionResponse>;
async completionWithRetry(
input:
| MistralAIChatCompletionRequest
| MistralAIChatCompletionStreamRequest,
streaming: boolean
): Promise<
| MistralAIChatCompletionResponse
| AsyncIterable<MistralAIChatCompletionEvent>
> {
const caller = new AsyncCaller({
maxRetries: this.maxRetries,
});
const client = new MistralClient({
apiKey: this.apiKey,
serverURL: this.serverURL,
// If httpClient exists, pass it into constructor
...(this.httpClient ? { httpClient: this.httpClient } : {}),
});
return caller.call(async () => {
try {
let res:
| MistralAIChatCompletionResponse
| AsyncIterable<MistralAIChatCompletionEvent>;
if (streaming) {
res = await client.chat.stream(input);
} else {
res = await client.chat.complete(input);
}
return res;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) {
if (
e.message?.includes("status: 400") ||
e.message?.toLowerCase().includes("status 400") ||
e.message?.includes("validation failed")
) {
e.status = 400;
}
throw e;
}
});
}
/** @ignore */
async _generate(
messages: BaseMessage[],
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<ChatResult> {
const tokenUsage: TokenUsage = {};
const params = this.invocationParams(options);
const mistralMessages = convertMessagesToMistralMessages(messages);
const input = {
...params,
messages: mistralMessages,
};
// Enable streaming for signal controller or timeout due
// to SDK limitations on canceling requests.
const shouldStream = options.signal ?? !!options.timeout;
// Handle streaming
if (this.streaming || shouldStream) {
const stream = this._streamResponseChunks(messages, options, runManager);
const finalChunks: Record<number, ChatGenerationChunk> = {};
for await (const chunk of stream) {
const index =
(chunk.generationInfo as NewTokenIndices)?.completion ?? 0;
if (finalChunks[index] === undefined) {
finalChunks[index] = chunk;
} else {
finalChunks[index] = finalChunks[index].concat(chunk);
}
}
const generations = Object.entries(finalChunks)
.sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10))
.map(([_, value]) => value);
return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } };
}
// Not streaming, so we can just call the API once.
const response = await this.completionWithRetry(input, false);
const { completionTokens, promptTokens, totalTokens } =
response?.usage ?? {};
if (completionTokens) {
tokenUsage.completionTokens =
(tokenUsage.completionTokens ?? 0) + completionTokens;
}
if (promptTokens) {
tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + promptTokens;
}
if (totalTokens) {
tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + totalTokens;
}
const generations: ChatGeneration[] = [];
for (const part of response?.choices ?? []) {
if ("delta" in part) {
throw new Error("Delta not supported in non-streaming mode.");
}
if (!("message" in part)) {
throw new Error("No message found in the choice.");
}
let text = part.message?.content ?? "";
if (Array.isArray(text)) {
text = text[0].type === "text" ? text[0].text : "";
}
const generation: ChatGeneration = {
text,
message: mistralAIResponseToChatMessage(part, response?.usage),
};
if (part.finishReason) {
generation.generationInfo = { finishReason: part.finishReason };
}
generations.push(generation);
}
return {
generations,
llmOutput: { tokenUsage },
};
}
async *_streamResponseChunks(
messages: BaseMessage[],
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<ChatGenerationChunk> {
const mistralMessages = convertMessagesToMistralMessages(messages);
const params = this.invocationParams(options);
const input = {
...params,
messages: mistralMessages,
};
const streamIterable = await this.completionWithRetry(input, true);
for await (const { data } of streamIterable) {
if (options.signal?.aborted) {
throw new Error("AbortError");
}
const choice = data?.choices[0];
if (!choice || !("delta" in choice)) {
continue;
}
const { delta } = choice;
if (!delta) {
continue;
}
const newTokenIndices = {
prompt: 0,
completion: choice.index ?? 0,
};
const shouldStreamUsage = this.streamUsage || options.streamUsage;
const message = _convertDeltaToMessageChunk(
delta,
shouldStreamUsage ? data.usage : null
);
if (message === null) {
// Do not yield a chunk if the message is empty
continue;
}
let text = delta.content ?? "";
if (Array.isArray(text)) {
text = text[0].type === "text" ? text[0].text : "";
}
const generationChunk = new ChatGenerationChunk({
message,
text,
generationInfo: newTokenIndices,
});
yield generationChunk;
// eslint-disable-next-line no-void
void runManager?.handleLLMNewToken(
generationChunk.text ?? "",
newTokenIndices,
undefined,
undefined,
undefined,
{ chunk: generationChunk }
);
}
}
addAllHooksToHttpClient() {
try {
// To prevent duplicate hooks
this.removeAllHooksFromHttpClient();
// If the user wants to use hooks, but hasn't created an HTTPClient yet
const hasHooks = [
this.beforeRequestHooks,
this.requestErrorHooks,
this.responseHooks,
].some((hook) => hook && hook.length > 0);
if (hasHooks && !this.httpClient) {
this.httpClient = new MistralAIHTTPClient();
}
if (this.beforeRequestHooks) {
for (const hook of this.beforeRequestHooks) {
this.httpClient?.addHook("beforeRequest", hook);
}
}
if (this.requestErrorHooks) {
for (const hook of this.requestErrorHooks) {
this.httpClient?.addHook("requestError", hook);
}
}
if (this.responseHooks) {
for (const hook of this.responseHooks) {
this.httpClient?.addHook("response", hook);
}
}
} catch {
throw new Error("Error in adding all hooks");
}
}
removeAllHooksFromHttpClient() {
try {
if (this.beforeRequestHooks) {
for (const hook of this.beforeRequestHooks) {
this.httpClient?.removeHook("beforeRequest", hook);
}
}
if (this.requestErrorHooks) {
for (const hook of this.requestErrorHooks) {
this.httpClient?.removeHook("requestError", hook);
}
}
if (this.responseHooks) {
for (const hook of this.responseHooks) {
this.httpClient?.removeHook("response", hook);
}
}
} catch {
throw new Error("Error in removing hooks");
}
}
removeHookFromHttpClient(
hook: BeforeRequestHook | RequestErrorHook | ResponseHook
) {
try {
this.httpClient?.removeHook("beforeRequest", hook as BeforeRequestHook);
this.httpClient?.removeHook("requestError", hook as RequestErrorHook);
this.httpClient?.removeHook("response", hook as ResponseHook);
} catch {
throw new Error("Error in removing hook");
}
}
/** @ignore */
_combineLLMOutput() {
return [];
}
withStructuredOutput<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunOutput extends Record<string, any> = Record<string, any>
>(
outputSchema:
| z.ZodType<RunOutput>
// eslint-disable-next-line @typescript-eslint/no-explicit-any
| Record<string, any>,
config?: StructuredOutputMethodOptions<false>
): Runnable<BaseLanguageModelInput, RunOutput>;
withStructuredOutput<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunOutput extends Record<string, any> = Record<string, any>
>(
outputSchema:
| z.ZodType<RunOutput>
// eslint-disable-next-line @typescript-eslint/no-explicit-any
| Record<string, any>,
config?: StructuredOutputMethodOptions<true>
): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;
withStructuredOutput<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunOutput extends Record<string, any> = Record<string, any>
>(
outputSchema:
| z.ZodType<RunOutput>
// eslint-disable-next-line @typescript-eslint/no-explicit-any
| Record<string, any>,
config?: StructuredOutputMethodOptions<boolean>
):
| Runnable<BaseLanguageModelInput, RunOutput>
| Runnable<
BaseLanguageModelInput,
{ raw: BaseMessage; parsed: RunOutput }
> {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const schema: z.ZodType<RunOutput> | Record<string, any> = outputSchema;
const name = config?.name;
const method = config?.method;
const includeRaw = config?.includeRaw;
let llm: Runnable<BaseLanguageModelInput>;
let outputParser: BaseLLMOutputParser<RunOutput>;
if (method === "jsonMode") {
llm = this.bind({
response_format: { type: "json_object" },
} as Partial<CallOptions>);
if (isZodSchema(schema)) {
outputParser = StructuredOutputParser.fromZodSchema(schema);
} else {
outputParser = new JsonOutputParser<RunOutput>();
}
} else {
let functionName = name ?? "extract";
// Is function calling
if (isZodSchema(schema)) {
const asJsonSchema = zodToJsonSchema(schema);
llm = this.bind({
tools: [
{
type: "function" as const,
function: {
name: functionName,
description: asJsonSchema.description,
parameters: asJsonSchema,
},
},
],
tool_choice: "any",
} as Partial<CallOptions>);
outputParser = new JsonOutputKeyToolsParser({
returnSingle: true,
keyName: functionName,
zodSchema: schema,
});
} else {
let openAIFunctionDefinition: FunctionDefinition;
if (
typeof schema.name === "string" &&
typeof schema.parameters === "object" &&
schema.parameters != null
) {
openAIFunctionDefinition = schema as FunctionDefinition;
functionName = schema.name;
} else {
openAIFunctionDefinition = {
name: functionName,
description: schema.description ?? "",
parameters: schema,
};
}
llm = this.bind({
tools: [
{
type: "function" as const,
function: openAIFunctionDefinition,
},
],
tool_choice: "any",
} as Partial<CallOptions>);
outputParser = new JsonOutputKeyToolsParser<RunOutput>({
returnSingle: true,
keyName: functionName,
});
}
}
if (!includeRaw) {
return llm.pipe(outputParser) as Runnable<
BaseLanguageModelInput,
RunOutput
>;
}
const parserAssign = RunnablePassthrough.assign({
// eslint-disable-next-line @typescript-eslint/no-explicit-any
parsed: (input: any, config) => outputParser.invoke(input.raw, config),
});
const parserNone = RunnablePassthrough.assign({
parsed: () => null,
});
const parsedWithFallback = parserAssign.withFallbacks({
fallbacks: [parserNone],
});
return RunnableSequence.from<
BaseLanguageModelInput,
{ raw: BaseMessage; parsed: RunOutput }
>([
{
raw: llm,
},
parsedWithFallback,
]);
}
}
function isZodSchema<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunOutput extends Record<string, any> = Record<string, any>
>(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
input: z.ZodType<RunOutput> | Record<string, any>
): input is z.ZodType<RunOutput> {
// Check for a characteristic method of Zod schemas
return typeof (input as z.ZodType<RunOutput>)?.parse === "function";
}
|
0 | lc_public_repos/langchainjs/libs/langchain-mistralai | lc_public_repos/langchainjs/libs/langchain-mistralai/src/embeddings.ts | import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { Embeddings, type EmbeddingsParams } from "@langchain/core/embeddings";
import { chunkArray } from "@langchain/core/utils/chunk_array";
import { EmbeddingRequest as MistralAIEmbeddingsRequest } from "@mistralai/mistralai/src/models/components/embeddingrequest.js";
import { EmbeddingResponse as MistralAIEmbeddingsResponse } from "@mistralai/mistralai/src/models/components/embeddingresponse.js";
import {
BeforeRequestHook,
RequestErrorHook,
ResponseHook,
HTTPClient as MistralAIHTTPClient,
} from "@mistralai/mistralai/lib/http.js";
/**
* Interface for MistralAIEmbeddings parameters. Extends EmbeddingsParams and
* defines additional parameters specific to the MistralAIEmbeddings class.
*/
export interface MistralAIEmbeddingsParams extends EmbeddingsParams {
/**
* The API key to use.
* @default {process.env.MISTRAL_API_KEY}
*/
apiKey?: string;
/**
* The name of the model to use.
* Alias for `model`.
* @default {"mistral-embed"}
*/
modelName?: string;
/**
* The name of the model to use.
* @default {"mistral-embed"}
*/
model?: string;
/**
* The format of the output data.
* @default {"float"}
*/
encodingFormat?: string;
/**
* Override the default server URL used by the Mistral SDK.
* @deprecated use serverURL instead
*/
endpoint?: string;
/**
* Override the default server URL used by the Mistral SDK.
*/
serverURL?: string;
/**
* The maximum number of documents to embed in a single request.
* @default {512}
*/
batchSize?: number;
/**
* Whether to strip new lines from the input text. This is recommended,
* but may not be suitable for all use cases.
* @default {true}
*/
stripNewLines?: boolean;
/**
* A list of custom hooks that must follow (req: Request) => Awaitable<Request | void>
* They are automatically added when a ChatMistralAI instance is created
*/
beforeRequestHooks?: BeforeRequestHook[];
/**
* A list of custom hooks that must follow (err: unknown, req: Request) => Awaitable<void>
* They are automatically added when a ChatMistralAI instance is created
*/
requestErrorHooks?: RequestErrorHook[];
/**
* A list of custom hooks that must follow (res: Response, req: Request) => Awaitable<void>
* They are automatically added when a ChatMistralAI instance is created
*/
responseHooks?: ResponseHook[];
/**
* Optional custom HTTP client to manage API requests
* Allows users to add custom fetch implementations, hooks, as well as error and response processing.
*/
httpClient?: MistralAIHTTPClient;
}
/**
* Class for generating embeddings using the MistralAI API.
*/
export class MistralAIEmbeddings
extends Embeddings
implements MistralAIEmbeddingsParams
{
modelName = "mistral-embed";
model = "mistral-embed";
encodingFormat = "float";
batchSize = 512;
stripNewLines = true;
apiKey: string;
/**
* @deprecated use serverURL instead
*/
endpoint: string;
serverURL?: string;
beforeRequestHooks?: Array<BeforeRequestHook>;
requestErrorHooks?: Array<RequestErrorHook>;
responseHooks?: Array<ResponseHook>;
httpClient?: MistralAIHTTPClient;
constructor(fields?: Partial<MistralAIEmbeddingsParams>) {
super(fields ?? {});
const apiKey = fields?.apiKey ?? getEnvironmentVariable("MISTRAL_API_KEY");
if (!apiKey) {
throw new Error("API key missing for MistralAI, but it is required.");
}
this.apiKey = apiKey;
this.serverURL = fields?.serverURL ?? this.serverURL;
this.modelName = fields?.model ?? fields?.modelName ?? this.model;
this.model = this.modelName;
this.encodingFormat = fields?.encodingFormat ?? this.encodingFormat;
this.batchSize = fields?.batchSize ?? this.batchSize;
this.stripNewLines = fields?.stripNewLines ?? this.stripNewLines;
this.beforeRequestHooks =
fields?.beforeRequestHooks ?? this.beforeRequestHooks;
this.requestErrorHooks =
fields?.requestErrorHooks ?? this.requestErrorHooks;
this.responseHooks = fields?.responseHooks ?? this.responseHooks;
this.httpClient = fields?.httpClient ?? this.httpClient;
this.addAllHooksToHttpClient();
}
/**
* Method to generate embeddings for an array of documents. Splits the
* documents into batches and makes requests to the MistralAI API to generate
* embeddings.
* @param {Array<string>} texts Array of documents to generate embeddings for.
* @returns {Promise<number[][]>} Promise that resolves to a 2D array of embeddings for each document.
*/
async embedDocuments(texts: string[]): Promise<number[][]> {
const batches = chunkArray(
this.stripNewLines ? texts.map((t) => t.replace(/\n/g, " ")) : texts,
this.batchSize
);
const batchRequests = batches.map((batch) =>
this.embeddingWithRetry(batch)
);
const batchResponses = await Promise.all(batchRequests);
const embeddings: number[][] = [];
for (let i = 0; i < batchResponses.length; i += 1) {
const batch = batches[i];
const { data: batchResponse } = batchResponses[i];
for (let j = 0; j < batch.length; j += 1) {
embeddings.push(batchResponse[j].embedding ?? []);
}
}
return embeddings;
}
/**
* Method to generate an embedding for a single document. Calls the
* embeddingWithRetry method with the document as the input.
* @param {string} text Document to generate an embedding for.
* @returns {Promise<number[]>} Promise that resolves to an embedding for the document.
*/
async embedQuery(text: string): Promise<number[]> {
const { data } = await this.embeddingWithRetry(
this.stripNewLines ? text.replace(/\n/g, " ") : text
);
return data[0].embedding ?? [];
}
/**
* Private method to make a request to the MistralAI API to generate
* embeddings. Handles the retry logic and returns the response from the
* API.
* @param {string | Array<string>} inputs Text to send to the MistralAI API.
* @returns {Promise<MistralAIEmbeddingsResponse>} Promise that resolves to the response from the API.
*/
private async embeddingWithRetry(
inputs: string | Array<string>
): Promise<MistralAIEmbeddingsResponse> {
const { Mistral } = await this.imports();
const client = new Mistral({
apiKey: this.apiKey,
serverURL: this.serverURL,
// If httpClient exists, pass it into constructor
...(this.httpClient ? { httpClient: this.httpClient } : {}),
});
const embeddingsRequest: MistralAIEmbeddingsRequest = {
model: this.model,
inputs,
encodingFormat: this.encodingFormat,
};
return this.caller.call(async () => {
const res = await client.embeddings.create(embeddingsRequest);
return res;
});
}
addAllHooksToHttpClient() {
try {
// To prevent duplicate hooks
this.removeAllHooksFromHttpClient();
// If the user wants to use hooks, but hasn't created an HTTPClient yet
const hasHooks = [
this.beforeRequestHooks,
this.requestErrorHooks,
this.responseHooks,
].some((hook) => hook && hook.length > 0);
if (hasHooks && !this.httpClient) {
this.httpClient = new MistralAIHTTPClient();
}
if (this.beforeRequestHooks) {
for (const hook of this.beforeRequestHooks) {
this.httpClient?.addHook("beforeRequest", hook);
}
}
if (this.requestErrorHooks) {
for (const hook of this.requestErrorHooks) {
this.httpClient?.addHook("requestError", hook);
}
}
if (this.responseHooks) {
for (const hook of this.responseHooks) {
this.httpClient?.addHook("response", hook);
}
}
} catch {
throw new Error("Error in adding all hooks");
}
}
removeAllHooksFromHttpClient() {
try {
if (this.beforeRequestHooks) {
for (const hook of this.beforeRequestHooks) {
this.httpClient?.removeHook("beforeRequest", hook);
}
}
if (this.requestErrorHooks) {
for (const hook of this.requestErrorHooks) {
this.httpClient?.removeHook("requestError", hook);
}
}
if (this.responseHooks) {
for (const hook of this.responseHooks) {
this.httpClient?.removeHook("response", hook);
}
}
} catch {
throw new Error("Error in removing hooks");
}
}
removeHookFromHttpClient(
hook: BeforeRequestHook | RequestErrorHook | ResponseHook
) {
try {
this.httpClient?.removeHook("beforeRequest", hook as BeforeRequestHook);
this.httpClient?.removeHook("requestError", hook as RequestErrorHook);
this.httpClient?.removeHook("response", hook as ResponseHook);
} catch {
throw new Error("Error in removing hook");
}
}
/** @ignore */
private async imports() {
const { Mistral } = await import("@mistralai/mistralai");
return { Mistral };
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-mistralai | lc_public_repos/langchainjs/libs/langchain-mistralai/src/utils.ts | import { ContentChunk as MistralAIContentChunk } from "@mistralai/mistralai/models/components/contentchunk.js";
import { MessageContentComplex } from "@langchain/core/messages";
// Mistral enforces a specific pattern for tool call IDs
const TOOL_CALL_ID_PATTERN = /^[a-zA-Z0-9]{9}$/;
export function _isValidMistralToolCallId(toolCallId: string): boolean {
return TOOL_CALL_ID_PATTERN.test(toolCallId);
}
function _base62Encode(num: number): string {
let numCopy = num;
const base62 =
"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
if (numCopy === 0) return base62[0];
const arr: string[] = [];
const base = base62.length;
while (numCopy) {
arr.push(base62[numCopy % base]);
numCopy = Math.floor(numCopy / base);
}
return arr.reverse().join("");
}
function _simpleHash(str: string): number {
let hash = 0;
for (let i = 0; i < str.length; i += 1) {
const char = str.charCodeAt(i);
hash = (hash << 5) - hash + char;
hash &= hash; // Convert to 32-bit integer
}
return Math.abs(hash);
}
export function _convertToolCallIdToMistralCompatible(
toolCallId: string
): string {
if (_isValidMistralToolCallId(toolCallId)) {
return toolCallId;
} else {
const hash = _simpleHash(toolCallId);
const base62Str = _base62Encode(hash);
if (base62Str.length >= 9) {
return base62Str.slice(0, 9);
} else {
return base62Str.padStart(9, "0");
}
}
}
export function _mistralContentChunkToMessageContentComplex(
content: string | MistralAIContentChunk[] | null | undefined
): string | MessageContentComplex[] {
if (!content) {
return "";
}
if (typeof content === "string") {
return content;
}
return content.map((contentChunk) => {
// Only Mistral ImageURLChunks need conversion to MessageContentComplex
if (contentChunk.type === "image_url") {
if (
typeof contentChunk.imageUrl !== "string" &&
contentChunk.imageUrl?.detail
) {
const { detail } = contentChunk.imageUrl;
// Mistral detail can be any string, but MessageContentComplex only supports
// detail to be "high", "auto", or "low"
if (detail !== "high" && detail !== "auto" && detail !== "low") {
return {
type: contentChunk.type,
image_url: {
url: contentChunk.imageUrl.url,
},
};
}
}
return {
type: contentChunk.type,
image_url: contentChunk.imageUrl,
};
}
return contentChunk;
});
}
|
0 | lc_public_repos/langchainjs/libs/langchain-mistralai/src | lc_public_repos/langchainjs/libs/langchain-mistralai/src/tests/agent.int.test.ts | // import { test, expect } from "@jest/globals";
// import { TavilySearchResults } from "@langchain/community/tools/tavily_search";
// import { Calculator } from "@langchain/community/tools/calculator";
// import { BaseChatModel } from "@langchain/core/language_models/chat_models";
// import { SystemMessagePromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, ChatPromptTemplate } from "@langchain/core/prompts";
// import { DynamicStructuredTool } from "@langchain/core/tools";
// import { z } from "zod";
// import { ChatMistralAI } from "../chat_models.js";
// import { AgentExecutor, createOpenAIToolsAgent, createToolCallingAgent } from "langchain/agents";
// const tool = new TavilySearchResults({ maxResults: 1 });
// tool.description = tool.description += " You can also use this tool to check the current weather.";
// const tools = [tool, new Calculator()];
// TODO: This test breaks CI build due to dependencies. Figure out a way around it.
test("createToolCallingAgent works", async () => {
// const prompt = ChatPromptTemplate.fromMessages([
// ["system", "You are a helpful assistant. Use tools as often as possible"],
// ["placeholder", "{chat_history}"],
// ["human", "{input}"],
// ["placeholder", "{agent_scratchpad}"],
// ]);
// const llm = new ChatMistralAI({
// model: "mistral-large-latest",
// temperature: 0,
// });
// const agent = await createToolCallingAgent({
// llm,
// tools,
// prompt,
// });
// const agentExecutor = new AgentExecutor({
// agent,
// tools,
// });
// const input = "What is the current weather in SF?";
// const result = await agentExecutor.invoke({
// input,
// });
// console.log(result);
// expect(result.input).toBe(input);
// expect(typeof result.output).toBe("string");
// // Length greater than 10 because any less than that would warrant
// // an investigation into why such a short generation was returned.
// expect(result.output.length).toBeGreaterThan(10);
});
test("Model is compatible with OpenAI tools agent and Agent Executor", async () => {
// const llm: BaseChatModel = new ChatMistralAI({
// temperature: 0,
// model: "mistral-large-latest",
// });
// const systemMessage = SystemMessagePromptTemplate.fromTemplate(
// "You are an agent capable of retrieving current weather information."
// );
// const humanMessage = HumanMessagePromptTemplate.fromTemplate("{input}");
// const agentScratchpad = new MessagesPlaceholder("agent_scratchpad");
// const prompt = ChatPromptTemplate.fromMessages([
// systemMessage,
// humanMessage,
// agentScratchpad,
// ]);
// const currentWeatherTool = new DynamicStructuredTool({
// name: "get_current_weather",
// description: "Get the current weather in a given location",
// schema: z.object({
// location: z
// .string()
// .describe("The city and state, e.g. San Francisco, CA"),
// }),
// func: async () => Promise.resolve("28 °C"),
// });
// const agent = await createOpenAIToolsAgent({
// llm,
// tools: [currentWeatherTool],
// prompt,
// });
// const agentExecutor = new AgentExecutor({
// agent,
// tools: [currentWeatherTool],
// });
// const input = "What's the weather like in Paris?";
// const { output } = await agentExecutor.invoke({ input });
// console.log(output);
// expect(output).toBeDefined();
// expect(output).toContain("The current temperature in Paris is 28 °C");
});
|
0 | lc_public_repos/langchainjs/libs/langchain-mistralai/src | lc_public_repos/langchainjs/libs/langchain-mistralai/src/tests/llms.test.ts | import { MistralAI } from "../llms.js";
test("Serialization", () => {
const model = new MistralAI({
apiKey: "foo",
});
expect(JSON.stringify(model)).toEqual(
`{"lc":1,"type":"constructor","id":["langchain","llms","mistralai","MistralAI"],"kwargs":{"mistral_api_key":{"lc":1,"type":"secret","id":["MISTRAL_API_KEY"]}}}`
);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-mistralai/src | lc_public_repos/langchainjs/libs/langchain-mistralai/src/tests/chat_models.standard.int.test.ts | /* eslint-disable no-process-env */
import { test, expect } from "@jest/globals";
import { ChatModelIntegrationTests } from "@langchain/standard-tests";
import { AIMessageChunk } from "@langchain/core/messages";
import { ChatMistralAI, ChatMistralAICallOptions } from "../chat_models.js";
class ChatMistralAIStandardIntegrationTests extends ChatModelIntegrationTests<
ChatMistralAICallOptions,
AIMessageChunk
> {
constructor() {
if (!process.env.MISTRAL_API_KEY) {
throw new Error(
"Can not run Mistral AI integration tests because MISTRAL_API_KEY is not set"
);
}
super({
Cls: ChatMistralAI,
chatModelHasToolCalling: true,
chatModelHasStructuredOutput: true,
constructorArgs: {},
// Mistral requires function call IDs to be a-z, A-Z, 0-9, with a length of 9.
functionId: "123456789",
});
}
async testToolMessageHistoriesListContent() {
this.skipTestMessage(
"testToolMessageHistoriesListContent",
"ChatMistralAI",
"tool_use message blocks not supported"
);
}
}
const testClass = new ChatMistralAIStandardIntegrationTests();
test("ChatMistralAIStandardIntegrationTests", async () => {
const testResults = await testClass.runTests();
expect(testResults).toBe(true);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-mistralai/src | lc_public_repos/langchainjs/libs/langchain-mistralai/src/tests/llms.int.test.ts | /* eslint-disable no-process-env */
import { test, expect } from "@jest/globals";
import { CallbackManager } from "@langchain/core/callbacks/manager";
import { HTTPClient } from "@mistralai/mistralai/lib/http.js";
import { MistralAI } from "../llms.js";
// Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable
const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND;
test("Test MistralAI default", async () => {
const model = new MistralAI({
maxTokens: 5,
model: "codestral-latest",
});
const res = await model.invoke(
"Log 'Hello world' to the console in javascript: "
);
// console.log({ res }, "Test MistralAI");
expect(res.length).toBeGreaterThan(1);
});
test("Test MistralAI with stop in object", async () => {
const model = new MistralAI({
maxTokens: 5,
model: "codestral-latest",
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await model.invoke("console.log 'Hello world' in javascript:", {
stop: ["world"],
});
// console.log({ res }, "Test MistralAI with stop in object");
});
test("Test MistralAI with timeout in call options", async () => {
const model = new MistralAI({
maxTokens: 5,
maxRetries: 0,
model: "codestral-latest",
});
await expect(() =>
model.invoke("Log 'Hello world' to the console in javascript: ", {
timeout: 10,
})
).rejects.toThrow();
}, 5000);
test("Test MistralAI with timeout in call options and node adapter", async () => {
const model = new MistralAI({
maxTokens: 5,
maxRetries: 0,
model: "codestral-latest",
});
await expect(() =>
model.invoke("Log 'Hello world' to the console in javascript: ", {
timeout: 10,
})
).rejects.toThrow();
}, 5000);
test("Test MistralAI with signal in call options", async () => {
const model = new MistralAI({
maxTokens: 5,
model: "codestral-latest",
});
const controller = new AbortController();
await expect(async () => {
const ret = await model.stream(
"Log 'Hello world' to the console in javascript 100 times: ",
{
signal: controller.signal,
}
);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
for await (const chunk of ret) {
// console.log({ chunk }, "Test MistralAI with signal in call options");
controller.abort();
}
return ret;
}).rejects.toThrow();
}, 5000);
test("Test MistralAI in streaming mode", async () => {
// Running LangChain callbacks in the background will sometimes cause the callbackManager to execute
// after the test/llm call has already finished & returned. Set that environment variable to false
// to prevent that from happening.
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false";
try {
let nrNewTokens = 0;
let streamedCompletion = "";
const model = new MistralAI({
maxTokens: 5,
model: "codestral-latest",
streaming: true,
callbacks: CallbackManager.fromHandlers({
async handleLLMNewToken(token: string) {
nrNewTokens += 1;
streamedCompletion += token;
},
}),
});
const res = await model.invoke(
"Log 'Hello world' to the console in javascript: "
);
// console.log({ res }, "Test MistralAI in streaming mode");
expect(nrNewTokens > 0).toBe(true);
expect(res).toBe(streamedCompletion);
} finally {
// Reset the environment variable
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground;
}
});
test("Test MistralAI stream method", async () => {
const model = new MistralAI({
maxTokens: 50,
model: "codestral-latest",
});
const stream = await model.stream(
"Log 'Hello world' to the console in javascript: ."
);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(1);
});
test("Test MistralAI stream method with abort", async () => {
await expect(async () => {
const model = new MistralAI({
maxTokens: 250,
maxRetries: 0,
model: "codestral-latest",
});
const stream = await model.stream(
"How is your day going? Be extremely verbose.",
{
signal: AbortSignal.timeout(1000),
}
);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
for await (const chunk of stream) {
// console.log({ chunk }, "Test MistralAI stream method with abort");
}
}).rejects.toThrow();
});
test("Test MistralAI stream method with early break", async () => {
const model = new MistralAI({
maxTokens: 50,
model: "codestral-latest",
});
const stream = await model.stream(
"How is your day going? Be extremely verbose."
);
let i = 0;
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
for await (const chunk of stream) {
// console.log({ chunk }, "Test MistralAI stream method with early break");
i += 1;
if (i > 5) {
break;
}
}
expect(i).toBeGreaterThan(5);
});
test("Test MistralAI can register BeforeRequestHook function", async () => {
const model = new MistralAI({
model: "codestral-latest",
});
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.addAllHooksToHttpClient();
await model.invoke("Log 'Hello world' to the console in javascript: .");
// console.log(count);
expect(count).toEqual(1);
});
test("Test MistralAI can register RequestErrorHook function", async () => {
const fetcher = (): Promise<Response> =>
Promise.reject(new Error("Intended fetcher error"));
const customHttpClient = new HTTPClient({ fetcher });
const model = new MistralAI({
model: "codestral-latest",
httpClient: customHttpClient,
maxRetries: 0,
});
let count = 0;
const addCount = () => {
count += 1;
};
const RequestErrorHook = (): void => {
addCount();
console.log("In request error hook");
};
model.requestErrorHooks = [RequestErrorHook];
model.addAllHooksToHttpClient();
try {
await model.invoke("Log 'Hello world' to the console in javascript: .");
} catch (e: unknown) {
// Intended error, do not rethrow
}
// console.log(count);
expect(count).toEqual(1);
});
test("Test MistralAI can register ResponseHook function", async () => {
const model = new MistralAI({
model: "codestral-latest",
});
let count = 0;
const addCount = () => {
count += 1;
};
const ResponseHook = (): void => {
addCount();
};
model.responseHooks = [ResponseHook];
model.addAllHooksToHttpClient();
await model.invoke("Log 'Hello world' to the console in javascript: .");
// console.log(count);
expect(count).toEqual(1);
});
test("Test MistralAI can register multiple hook functions with success", async () => {
const model = new MistralAI({
model: "codestral-latest",
});
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
const ResponseHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.responseHooks = [ResponseHook];
model.addAllHooksToHttpClient();
await model.invoke("Log 'Hello world' to the console in javascript: ");
// console.log(count);
expect(count).toEqual(2);
});
test("Test MistralAI can register multiple hook functions with error", async () => {
const fetcher = (): Promise<Response> =>
Promise.reject(new Error("Intended fetcher error"));
const customHttpClient = new HTTPClient({ fetcher });
const model = new MistralAI({
model: "codestral-latest",
httpClient: customHttpClient,
maxRetries: 0,
});
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
const RequestErrorHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.requestErrorHooks = [RequestErrorHook];
model.addAllHooksToHttpClient();
try {
await model.invoke("Log 'Hello world' to the console in javascript: ");
} catch (e: unknown) {
// Intended error, do not rethrow
}
// console.log(count);
expect(count).toEqual(2);
});
test("Test MistralAI can remove hook", async () => {
const model = new MistralAI({
model: "codestral-latest",
});
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.addAllHooksToHttpClient();
await model.invoke("Log 'Hello world' to the console in javascript: ");
// console.log(count);
expect(count).toEqual(1);
model.removeHookFromHttpClient(beforeRequestHook);
await model.invoke("Log 'Hello world' to the console in javascript: ");
// console.log(count);
expect(count).toEqual(1);
});
test("Test MistralAI can remove all hooks", async () => {
const model = new MistralAI({
model: "codestral-latest",
});
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
const ResponseHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.responseHooks = [ResponseHook];
model.addAllHooksToHttpClient();
await model.invoke("Log 'Hello world' to the console in javascript: ");
// console.log(count);
expect(count).toEqual(2);
model.removeAllHooksFromHttpClient();
await model.invoke("Log 'Hello world' to the console in javascript: ");
// console.log(count);
expect(count).toEqual(2);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-mistralai/src | lc_public_repos/langchainjs/libs/langchain-mistralai/src/tests/chat_models.standard.test.ts | /* eslint-disable no-process-env */
import { test, expect } from "@jest/globals";
import { ChatModelUnitTests } from "@langchain/standard-tests";
import { AIMessageChunk } from "@langchain/core/messages";
import { LangSmithParams } from "@langchain/core/language_models/chat_models";
import { ChatMistralAI, ChatMistralAICallOptions } from "../chat_models.js";
class ChatMistralAIStandardUnitTests extends ChatModelUnitTests<
ChatMistralAICallOptions,
AIMessageChunk
> {
constructor() {
super({
Cls: ChatMistralAI,
chatModelHasToolCalling: true,
chatModelHasStructuredOutput: true,
constructorArgs: {},
});
// This must be set so method like `.bindTools` or `.withStructuredOutput`
// which we call after instantiating the model will work.
// (constructor will throw if API key is not set)
process.env.MISTRAL_API_KEY = "test";
}
expectedLsParams(): Partial<LangSmithParams> {
console.warn(
"Overriding testStandardParams. ChatMistralAI does not support stop sequences."
);
return {
ls_provider: "string",
ls_model_name: "string",
ls_model_type: "chat",
ls_temperature: 0,
ls_max_tokens: 0,
};
}
testChatModelInitApiKey() {
// Unset the API key env var here so this test can properly check
// the API key class arg.
process.env.MISTRAL_API_KEY = "";
super.testChatModelInitApiKey();
// Re-set the API key env var here so other tests can run properly.
process.env.MISTRAL_API_KEY = "test";
}
}
const testClass = new ChatMistralAIStandardUnitTests();
test("ChatMistralAIStandardUnitTests", () => {
const testResults = testClass.runTests();
expect(testResults).toBe(true);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-mistralai/src | lc_public_repos/langchainjs/libs/langchain-mistralai/src/tests/chat_models.int.test.ts | import { test } from "@jest/globals";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { StructuredTool } from "@langchain/core/tools";
import { z } from "zod";
import {
AIMessage,
AIMessageChunk,
HumanMessage,
SystemMessage,
ToolMessage,
} from "@langchain/core/messages";
import { ContentChunk as MistralAIContentChunk } from "@mistralai/mistralai/models/components/contentchunk.js";
import { HTTPClient } from "@mistralai/mistralai/lib/http.js";
import { zodToJsonSchema } from "zod-to-json-schema";
import { ChatMistralAI } from "../chat_models.js";
import { _mistralContentChunkToMessageContentComplex } from "../utils.js";
test("Test ChatMistralAI can invoke hello", async () => {
const model = new ChatMistralAI({
model: "mistral-tiny",
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
const response = await prompt.pipe(model).invoke({
input: "Hello",
});
// console.log("response", response);
expect(response.content.length).toBeGreaterThan(1);
});
test("Test ChatMistralAI can stream", async () => {
const model = new ChatMistralAI();
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
const response = await prompt.pipe(model).stream({
input: "Hello",
});
let itters = 0;
let fullMessage = "";
for await (const item of response) {
// console.log(item);
itters += 1;
fullMessage += item.content;
}
// console.log("fullMessage", fullMessage);
expect(itters).toBeGreaterThan(1);
});
test("Can call tools using structured tools", async () => {
class Calculator extends StructuredTool {
name = "calculator";
description = "Calculate the answer to a math equation";
schema = z.object({
calculator: z
.string()
.describe("The math equation to calculate the answer for."),
});
async _call(_input: { input: string }) {
return "the answer!";
}
}
const model = new ChatMistralAI({
model: "mistral-large-latest",
}).bind({
tools: [new Calculator()],
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "you are very bad at math and always must use a calculator"],
[
"human",
"what is the sum of 223 + 228 divided by 718236 multiplied by 1234?",
],
]);
const chain = prompt.pipe(model);
const response = await chain.invoke({});
expect("tool_calls" in response).toBe(true);
// console.log(response.additional_kwargs.tool_calls?.[0]);
expect(response.tool_calls?.[0].name).toBe("calculator");
expect(response.tool_calls?.[0].args?.calculator).toBeDefined();
});
test("Can call tools using raw tools", async () => {
const tools = [
{
type: "function",
function: {
name: "calculator",
description: "Calculate the answer to a math equation",
parameters: {
type: "object",
properties: {
calculator: {
type: "string",
description: "The math equation to calculate the answer for.",
},
},
required: ["calculator"],
},
},
},
];
const model = new ChatMistralAI({
model: "mistral-large-latest",
}).bind({
tools,
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "you are very bad at math and always must use a calculator"],
[
"human",
"what is the sum of 223 + 228 divided by 718236 multiplied by 1234?",
],
]);
const chain = prompt.pipe(model);
const response = await chain.invoke({});
// console.log(response);
expect(response.tool_calls?.length).toEqual(1);
expect(response.tool_calls?.[0].name).toBe("calculator");
expect(response.tool_calls?.[0].args?.calculator).toBeDefined();
});
test("Can call .stream with tool calling", async () => {
class Calculator extends StructuredTool {
name = "calculator";
description = "Calculate the answer to a math equation";
schema = z.object({
calculator: z
.string()
.describe("The math equation to calculate the answer for."),
});
async _call(_input: { input: string }) {
return "the answer!";
}
}
const model = new ChatMistralAI({
model: "mistral-large-latest",
}).bind({
tools: [new Calculator()],
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "you are very bad at math and always must use a calculator"],
[
"human",
"what is the sum of 223 + 228 divided by 718236 multiplied by 1234?",
],
]);
const chain = prompt.pipe(model);
const response = await chain.stream({});
let finalRes: AIMessageChunk | null = null;
for await (const chunk of response) {
// console.log(chunk);
finalRes = chunk;
}
if (!finalRes) {
throw new Error("No final response found");
}
expect("tool_calls" in finalRes).toBe(true);
// console.log(finalRes.additional_kwargs.tool_calls?.[0]);
expect(finalRes.tool_calls?.[0].name).toBe("calculator");
expect(finalRes.tool_calls?.[0].args.calculator).toBeDefined();
});
test("Can use json mode response format", async () => {
const model = new ChatMistralAI({
model: "mistral-large-latest",
}).bind({
response_format: {
type: "json_object",
},
});
const prompt = ChatPromptTemplate.fromMessages([
"system",
`you are very bad at math and always must use a calculator.
To use a calculator respond with valid JSON containing a single key: 'calculator' which should contain the math equation to calculate the answer for.`,
[
"human",
"what is the sum of 223 + 228 divided by 718236 multiplied by 1234?",
],
]);
const chain = prompt.pipe(model);
const response = await chain.invoke({});
// console.log(response);
const parsedRes = JSON.parse(response.content as string);
expect(parsedRes.calculator).toBeDefined();
});
test("Can call .stream with json mode", async () => {
const model = new ChatMistralAI({
model: "mistral-large-latest",
}).bind({
response_format: {
type: "json_object",
},
});
const prompt = ChatPromptTemplate.fromMessages([
"system",
`you are very bad at math and always must use a calculator.
To use a calculator respond with valid JSON containing a single key: 'calculator' which should contain the math equation to calculate the answer for.`,
[
"human",
"what is the sum of 223 + 228 divided by 718236 multiplied by 1234?",
],
]);
const chain = prompt.pipe(model);
const response = await chain.stream({});
let finalRes = "";
for await (const chunk of response) {
// console.log(chunk);
finalRes += chunk.content;
}
// console.log(finalRes);
const parsedRes = JSON.parse(finalRes);
expect(parsedRes.calculator).toBeDefined();
});
test("Can stream and concat responses for a complex tool", async () => {
class PersonTraits extends StructuredTool {
name = "person_traits";
description = "Log the traits of a person";
schema = z.object({
person: z.object({
name: z.string().describe("Name of the person"),
age: z.number().describe("Age of the person"),
friends: z.array(
z.object({
name: z.string().describe("Name of the friend"),
age: z.number().describe("Age of the friend"),
})
),
friendsCount: z.number().describe("Number of friends"),
areFriendsCool: z
.boolean()
.describe("Whether or not the user thinks the friends are cool"),
}),
});
async _call(_input: { input: string }) {
return "the answer!";
}
}
const model = new ChatMistralAI({
model: "mistral-large-latest",
}).bind({
tools: [new PersonTraits()],
});
const prompt = ChatPromptTemplate.fromMessages([
"system",
"You are a helpful assistant, who always logs the traits of a person and their friends because the user has a bad memory.",
"human",
"Hi!!! My name's John Doe, and I'm almost 4 years old!. I have 6 friends: Mary, age 24, May, age 22, and Jane, age 30, Joey, age 18, Sam, age 19 and MacFarland age 66. They're all super cool!",
]);
const chain = prompt.pipe(model);
const response = await chain.stream({});
let finalRes: AIMessageChunk[] = [];
for await (const chunk of response) {
// console.log(chunk);
finalRes = finalRes.concat(chunk);
}
if (!finalRes) {
throw new Error("No final response found");
}
expect(finalRes[0].tool_calls?.[0]).toBeDefined();
const toolCall = finalRes[0].tool_calls?.[0];
expect(toolCall?.name).toBe("person_traits");
const person = toolCall?.args?.person;
expect(person).toBeDefined();
expect(person.name).toBeDefined();
expect(person.age).toBeDefined();
expect(person.friends.length).toBeGreaterThan(0);
expect(person.friendsCount).toBeDefined();
expect(person.areFriendsCool).toBeDefined();
});
test("Few shotting with tool calls", async () => {
const chat = new ChatMistralAI({
model: "mistral-large-latest",
temperature: 0,
}).bind({
tools: [
{
type: "function",
function: {
name: "get_current_weather",
description: "Get the current weather in a given location",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "The city and state, e.g. San Francisco, CA",
},
unit: { type: "string", enum: ["celsius", "fahrenheit"] },
},
required: ["location"],
},
},
},
],
});
const res = await chat.invoke([
new HumanMessage("What is the weather in SF?"),
new AIMessage({
content: "",
tool_calls: [
{
id: "12345",
name: "get_current_weather",
args: {
location: "SF",
},
},
],
}),
new ToolMessage({
tool_call_id: "12345",
content: "It is currently 24 degrees with hail in SF.",
}),
new AIMessage("It is currently 24 degrees in SF with hail in SF."),
new HumanMessage("What did you say the weather was?"),
]);
// console.log(res);
expect(res.content).toContain("24");
});
describe("withStructuredOutput", () => {
test("withStructuredOutput zod schema function calling", async () => {
const model = new ChatMistralAI({
temperature: 0,
model: "mistral-large-latest",
});
const calculatorSchema = z
.object({
operation: z
.enum(["add", "subtract", "multiply", "divide"])
.describe("The type of operation to execute."),
number1: z.number().describe("The first number to operate on."),
number2: z.number().describe("The second number to operate on."),
})
.describe("A calculator schema");
const modelWithStructuredOutput = model.withStructuredOutput(
calculatorSchema,
{
name: "calculator",
}
);
const prompt = ChatPromptTemplate.fromMessages([
"system",
"You are VERY bad at math and must always use a calculator.",
"human",
"Please help me!! What is 2 + 2?",
]);
const chain = prompt.pipe(modelWithStructuredOutput);
const result = await chain.invoke({});
console.log(result);
expect("operation" in result).toBe(true);
expect("number1" in result).toBe(true);
expect("number2" in result).toBe(true);
});
test("withStructuredOutput zod schema JSON mode", async () => {
const model = new ChatMistralAI({
temperature: 0,
model: "mistral-large-latest",
});
const calculatorSchema = z.object({
operation: z.enum(["add", "subtract", "multiply", "divide"]),
number1: z.number(),
number2: z.number(),
});
const modelWithStructuredOutput = model.withStructuredOutput(
calculatorSchema,
{
name: "calculator",
method: "jsonMode",
}
);
const prompt = ChatPromptTemplate.fromMessages([
"system",
`You are VERY bad at math and must always use a calculator.
Respond with a JSON object containing three keys:
'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide',
'number1': the first number to operate on,
'number2': the second number to operate on.
`,
"human",
"Please help me!! What is 2 + 2?",
]);
const chain = prompt.pipe(modelWithStructuredOutput);
const result = await chain.invoke({});
// console.log(result);
expect("operation" in result).toBe(true);
expect("number1" in result).toBe(true);
expect("number2" in result).toBe(true);
});
test("withStructuredOutput JSON schema function calling", async () => {
const model = new ChatMistralAI({
temperature: 0,
model: "mistral-large-latest",
});
const calculatorSchema = z
.object({
operation: z
.enum(["add", "subtract", "multiply", "divide"])
.describe("The type of operation to execute."),
number1: z.number().describe("The first number to operate on."),
number2: z.number().describe("The second number to operate on."),
})
.describe("A calculator schema");
const modelWithStructuredOutput = model.withStructuredOutput(
zodToJsonSchema(calculatorSchema),
{
name: "calculator",
}
);
const prompt = ChatPromptTemplate.fromMessages([
"system",
`You are VERY bad at math and must always use a calculator.`,
"human",
"Please help me!! What is 2 + 2?",
]);
const chain = prompt.pipe(modelWithStructuredOutput);
const result = await chain.invoke({});
// console.log(result);
expect("operation" in result).toBe(true);
expect("number1" in result).toBe(true);
expect("number2" in result).toBe(true);
});
test("withStructuredOutput OpenAI function definition function calling", async () => {
const model = new ChatMistralAI({
temperature: 0,
model: "mistral-large-latest",
});
const calculatorSchema = z
.object({
operation: z
.enum(["add", "subtract", "multiply", "divide"])
.describe("The type of operation to execute."),
number1: z.number().describe("The first number to operate on."),
number2: z.number().describe("The second number to operate on."),
})
.describe("A calculator schema");
const modelWithStructuredOutput = model.withStructuredOutput({
name: "calculator",
parameters: zodToJsonSchema(calculatorSchema),
});
const prompt = ChatPromptTemplate.fromMessages([
"system",
`You are VERY bad at math and must always use a calculator.`,
"human",
"Please help me!! What is 2 + 2?",
]);
const chain = prompt.pipe(modelWithStructuredOutput);
const result = await chain.invoke({});
// console.log(result);
expect("operation" in result).toBe(true);
expect("number1" in result).toBe(true);
expect("number2" in result).toBe(true);
});
test("withStructuredOutput JSON schema JSON mode", async () => {
const model = new ChatMistralAI({
temperature: 0,
model: "mistral-large-latest",
});
const calculatorSchema = z.object({
operation: z.enum(["add", "subtract", "multiply", "divide"]),
number1: z.number(),
number2: z.number(),
});
const modelWithStructuredOutput = model.withStructuredOutput(
zodToJsonSchema(calculatorSchema),
{
name: "calculator",
method: "jsonMode",
}
);
const prompt = ChatPromptTemplate.fromMessages([
"system",
`You are VERY bad at math and must always use a calculator.
Respond with a JSON object containing three keys:
'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide',
'number1': the first number to operate on,
'number2': the second number to operate on.
`,
"human",
"Please help me!! What is 2 + 2?",
]);
const chain = prompt.pipe(modelWithStructuredOutput);
const result = await chain.invoke({});
// console.log(result);
expect("operation" in result).toBe(true);
expect("number1" in result).toBe(true);
expect("number2" in result).toBe(true);
});
test("withStructuredOutput includeRaw true", async () => {
const model = new ChatMistralAI({
temperature: 0,
model: "mistral-large-latest",
});
const calculatorSchema = z
.object({
operation: z
.enum(["add", "subtract", "multiply", "divide"])
.describe("The type of operation to execute."),
number1: z.number().describe("The first number to operate on."),
number2: z.number().describe("The second number to operate on."),
})
.describe("A calculator schema");
const modelWithStructuredOutput = model.withStructuredOutput(
calculatorSchema,
{
name: "calculator",
includeRaw: true,
}
);
const prompt = ChatPromptTemplate.fromMessages([
"system",
"You are VERY bad at math and must always use a calculator.",
"human",
"Please help me!! What is 2 + 2?",
]);
const chain = prompt.pipe(modelWithStructuredOutput);
const result = await chain.invoke({});
// console.log(result);
expect("parsed" in result).toBe(true);
// Need to make TS happy :)
if (!("parsed" in result)) {
throw new Error("parsed not in result");
}
const { parsed } = result;
expect("operation" in parsed).toBe(true);
expect("number1" in parsed).toBe(true);
expect("number2" in parsed).toBe(true);
expect("raw" in result).toBe(true);
// Need to make TS happy :)
if (!("raw" in result)) {
throw new Error("raw not in result");
}
const { raw } = result as { raw: AIMessage };
expect(raw.tool_calls?.length).toBeGreaterThan(0);
expect(raw.tool_calls?.[0].name).toBe("calculator");
expect("operation" in (raw.tool_calls?.[0]?.args ?? {})).toBe(true);
expect("number1" in (raw.tool_calls?.[0]?.args ?? {})).toBe(true);
expect("number2" in (raw.tool_calls?.[0]?.args ?? {})).toBe(true);
});
});
describe("ChatMistralAI aborting", () => {
test("ChatMistralAI can abort request via .stream", async () => {
const controller = new AbortController();
const model = new ChatMistralAI().bind({
signal: controller.signal,
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You're super good at counting!"],
[
"human",
"Count from 0-100, remember to say 'woof' after every even number!",
],
]);
const stream = await prompt.pipe(model).stream({});
let finalRes = "";
let iters = 0;
try {
for await (const item of stream) {
finalRes += item.content;
// console.log(finalRes);
iters += 1;
controller.abort();
}
// If the loop completes without error, fail the test
fail(
"Expected for-await loop to throw an error due to abort, but it did not."
);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (error: any) {
// Check if the error is due to the abort action
expect(error.message).toBe("AbortError");
}
expect(iters).toBe(1);
});
test("ChatMistralAI can timeout requests via .stream", async () => {
const model = new ChatMistralAI().bind({
timeout: 1000,
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You're super good at counting!"],
[
"human",
"Count from 0-100, remember to say 'woof' after every even number!",
],
]);
let didError = false;
let finalRes = "";
try {
// Stream is inside the for-await loop because sometimes
// the abort will occur before the first stream event is emitted
const stream = await prompt.pipe(model).stream({});
for await (const item of stream) {
finalRes += item.content;
// console.log(finalRes);
}
// If the loop completes without error, fail the test
fail(
"Expected for-await loop to throw an error due to abort, but it did not."
);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (error: any) {
didError = true;
// Check if the error is due to the abort action
expect(error.message).toBe("AbortError");
}
expect(didError).toBeTruthy();
});
test("ChatMistralAI can abort request via .invoke", async () => {
const controller = new AbortController();
const model = new ChatMistralAI().bind({
signal: controller.signal,
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You're super good at counting!"],
[
"human",
"Count from 0-100, remember to say 'woof' after every even number!",
],
]);
let didError = false;
setTimeout(() => controller.abort(), 1000); // Abort after 1 second
try {
await prompt.pipe(model).invoke({});
// If the loop completes without error, fail the test
fail(
"Expected for-await loop to throw an error due to abort, but it did not."
);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (error: any) {
didError = true;
// Check if the error is due to the abort action
expect(error.message).toBe("AbortError");
}
expect(didError).toBeTruthy();
});
test("ChatMistralAI can timeout requests via .invoke", async () => {
const model = new ChatMistralAI().bind({
timeout: 1000,
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You're super good at counting!"],
[
"human",
"Count from 0-100, remember to say 'woof' after every even number!",
],
]);
let didError = false;
try {
await prompt.pipe(model).invoke({});
// If the loop completes without error, fail the test
fail(
"Expected for-await loop to throw an error due to abort, but it did not."
);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (error: any) {
didError = true;
// Check if the error is due to the abort action
expect(error.message).toBe("AbortError");
}
expect(didError).toBeTruthy();
});
});
describe("codestral-latest", () => {
test("Test ChatMistralAI can invoke codestral-latest", async () => {
const model = new ChatMistralAI({
model: "codestral-latest",
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
const response = await prompt.pipe(model).invoke({
input: "How can I log 'Hello, World!' in Python?",
});
// console.log("response", response);
expect(response.content.length).toBeGreaterThan(1);
expect((response.content as string).toLowerCase()).toContain("hello");
expect((response.content as string).toLowerCase()).toContain("world");
});
test("Test ChatMistralAI can stream codestral-latest", async () => {
const model = new ChatMistralAI({
model: "codestral-latest",
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
const response = await prompt.pipe(model).stream({
input: "How can I log 'Hello, World!' in Python?",
});
let itters = 0;
let fullMessage = "";
for await (const item of response) {
// console.log(item);
itters += 1;
fullMessage += item.content;
}
// console.log("fullMessage", fullMessage);
expect(itters).toBeGreaterThan(1);
expect(fullMessage.toLowerCase()).toContain("hello");
expect(fullMessage.toLowerCase()).toContain("world");
});
test("Can call tools using codestral-latest structured tools", async () => {
class CodeSandbox extends StructuredTool {
name = "code_sandbox";
description =
"A tool which can run Python code in an isolated environment";
schema = z.object({
code: z
.string()
.describe(
"The Python code to execute. Must only contain valid Python code."
),
});
async _call(input: z.infer<typeof this.schema>) {
return JSON.stringify(input, null, 2);
}
}
const model = new ChatMistralAI({
model: "codestral-latest",
}).bind({
tools: [new CodeSandbox()],
tool_choice: "any",
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are an excellent python engineer."],
["human", "{input}"],
]);
const chain = prompt.pipe(model);
const response = await chain.invoke({
input:
"Write a function that takes in a single argument and logs it to the console. Ensure the code is in Python.",
});
// console.log(response);
expect("tool_calls" in response).toBe(true);
// console.log(response.tool_calls?.[0]);
if (!response.tool_calls?.[0]) {
throw new Error("No tool call found");
}
const sandboxTool = response.tool_calls[0];
expect(sandboxTool.name).toBe("code_sandbox");
expect(sandboxTool.args?.code).toBeDefined();
// console.log(sandboxTool.args?.code);
});
});
test("Stream token count usage_metadata", async () => {
const model = new ChatMistralAI({
model: "codestral-latest",
temperature: 0,
maxTokens: 10,
});
let res: AIMessageChunk | null = null;
for await (const chunk of await model.stream(
"Why is the sky blue? Be concise."
)) {
if (!res) {
res = chunk;
} else {
res = res.concat(chunk);
}
}
// console.log(res);
expect(res?.usage_metadata).toBeDefined();
if (!res?.usage_metadata) {
return;
}
expect(res.usage_metadata.input_tokens).toBeGreaterThan(1);
expect(res.usage_metadata.output_tokens).toBeGreaterThan(1);
expect(res.usage_metadata.total_tokens).toBe(
res.usage_metadata.input_tokens + res.usage_metadata.output_tokens
);
});
test("streamUsage excludes token usage", async () => {
const model = new ChatMistralAI({
model: "codestral-latest",
temperature: 0,
streamUsage: false,
});
let res: AIMessageChunk | null = null;
for await (const chunk of await model.stream(
"Why is the sky blue? Be concise."
)) {
if (!res) {
res = chunk;
} else {
res = res.concat(chunk);
}
}
// console.log(res);
expect(res?.usage_metadata).not.toBeDefined();
});
test("Invoke token count usage_metadata", async () => {
const model = new ChatMistralAI({
model: "codestral-latest",
temperature: 0,
maxTokens: 10,
});
const res = await model.invoke("Why is the sky blue? Be concise.");
// console.log(res);
expect(res?.usage_metadata).toBeDefined();
if (!res?.usage_metadata) {
return;
}
expect(res.usage_metadata.input_tokens).toBeGreaterThan(1);
expect(res.usage_metadata.output_tokens).toBeGreaterThan(1);
expect(res.usage_metadata.total_tokens).toBe(
res.usage_metadata.input_tokens + res.usage_metadata.output_tokens
);
});
test("withStructuredOutput will always force tool usage", async () => {
const model = new ChatMistralAI({
temperature: 0,
model: "mistral-large-latest",
});
const weatherTool = z
.object({
location: z.string().describe("The name of city to get the weather for."),
})
.describe(
"Get the weather of a specific location and return the temperature in Celsius."
);
const modelWithTools = model.withStructuredOutput(weatherTool, {
name: "get_weather",
includeRaw: true,
});
const response = await modelWithTools.invoke(
"What is the sum of 271623 and 281623? It is VERY important you use a calculator tool to give me the answer."
);
if (!("tool_calls" in response.raw)) {
throw new Error("Tool call not found in response");
}
const castMessage = response.raw as AIMessage;
expect(castMessage.tool_calls).toHaveLength(1);
});
test("Test ChatMistralAI can invoke with MessageContent input types", async () => {
const model = new ChatMistralAI({
model: "pixtral-12b-2409",
});
const messagesListContent = [
new SystemMessage({
content: "List the top 5 countries in Europe with the highest GDP",
}),
new HumanMessage({
content: [
{
type: "text",
text: "Here is an infographic with European GPDs",
},
{
type: "image_url",
image_url: "https://mistral.ai/images/news/pixtral-12b/gdp.png",
},
],
}),
];
const response = await model.invoke(messagesListContent);
console.log("response", response);
expect(response.content.length).toBeGreaterThan(1);
});
test("Mistral ContentChunk to MessageContentComplex conversion", () => {
const mistralMessages = [
{
type: "text",
text: "Test message",
},
{
type: "image_url",
imageUrl: "https://mistral.ai/images/news/pixtral-12b/gdp.png",
},
{
type: "image_url",
imageUrl: {
url: "https://mistral.ai/images/news/pixtral-12b/gdp.png",
detail: "high",
},
},
{
type: "image_url",
imageUrl: {
url: "https://mistral.ai/images/news/pixtral-12b/gdp.png",
detail: "medium",
},
},
{
type: "image_url",
imageUrl: {
url: "https://mistral.ai/images/news/pixtral-12b/gdp.png",
},
},
] as MistralAIContentChunk[];
expect(_mistralContentChunkToMessageContentComplex(mistralMessages)).toEqual([
{
type: "text",
text: "Test message",
},
{
type: "image_url",
image_url: "https://mistral.ai/images/news/pixtral-12b/gdp.png",
},
{
type: "image_url",
image_url: {
url: "https://mistral.ai/images/news/pixtral-12b/gdp.png",
detail: "high",
},
},
{
type: "image_url",
image_url: {
url: "https://mistral.ai/images/news/pixtral-12b/gdp.png",
},
},
{
type: "image_url",
image_url: {
url: "https://mistral.ai/images/news/pixtral-12b/gdp.png",
},
},
]);
});
test("Test ChatMistralAI can register BeforeRequestHook function", async () => {
const model = new ChatMistralAI({
model: "mistral-tiny",
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.addAllHooksToHttpClient();
await prompt.pipe(model).invoke({
input: "Hello",
});
// console.log(count);
expect(count).toEqual(1);
});
test("Test ChatMistralAI can register RequestErrorHook function", async () => {
const fetcher = (): Promise<Response> =>
Promise.reject(new Error("Intended fetcher error"));
const customHttpClient = new HTTPClient({ fetcher });
const model = new ChatMistralAI({
model: "mistral-tiny",
httpClient: customHttpClient,
maxRetries: 0,
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
let count = 0;
const addCount = () => {
count += 1;
};
const RequestErrorHook = (): void => {
addCount();
console.log("In request error hook");
};
model.requestErrorHooks = [RequestErrorHook];
model.addAllHooksToHttpClient();
try {
await prompt.pipe(model).invoke({
input: "Hello",
});
} catch (e: unknown) {
// Intended error, do not rethrow
}
// console.log(count);
expect(count).toEqual(1);
});
test("Test ChatMistralAI can register ResponseHook function", async () => {
const model = new ChatMistralAI({
model: "mistral-tiny",
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
let count = 0;
const addCount = () => {
count += 1;
};
const ResponseHook = (): void => {
addCount();
};
model.responseHooks = [ResponseHook];
model.addAllHooksToHttpClient();
await prompt.pipe(model).invoke({
input: "Hello",
});
// console.log(count);
expect(count).toEqual(1);
});
test("Test ChatMistralAI can register multiple hook functions with success", async () => {
const model = new ChatMistralAI({
model: "mistral-tiny",
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
const ResponseHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.responseHooks = [ResponseHook];
model.addAllHooksToHttpClient();
await prompt.pipe(model).invoke({
input: "Hello",
});
// console.log(count);
expect(count).toEqual(2);
});
test("Test ChatMistralAI can register multiple hook functions with error", async () => {
const fetcher = (): Promise<Response> =>
Promise.reject(new Error("Intended fetcher error"));
const customHttpClient = new HTTPClient({ fetcher });
const model = new ChatMistralAI({
model: "mistral-tiny",
httpClient: customHttpClient,
maxRetries: 0,
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
const RequestErrorHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.requestErrorHooks = [RequestErrorHook];
model.addAllHooksToHttpClient();
try {
await prompt.pipe(model).invoke({
input: "Hello",
});
} catch (e: unknown) {
// Intended error, do not rethrow
}
// console.log(count);
expect(count).toEqual(2);
});
test("Test ChatMistralAI can remove hook", async () => {
const model = new ChatMistralAI({
model: "mistral-tiny",
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.addAllHooksToHttpClient();
await prompt.pipe(model).invoke({
input: "Hello",
});
// console.log(count);
expect(count).toEqual(1);
model.removeHookFromHttpClient(beforeRequestHook);
await prompt.pipe(model).invoke({
input: "Hello",
});
// console.log(count);
expect(count).toEqual(1);
});
test("Test ChatMistralAI can remove all hooks", async () => {
const model = new ChatMistralAI({
model: "mistral-tiny",
});
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["human", "{input}"],
]);
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
const ResponseHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.responseHooks = [ResponseHook];
model.addAllHooksToHttpClient();
await prompt.pipe(model).invoke({
input: "Hello",
});
// console.log(count);
expect(count).toEqual(2);
model.removeAllHooksFromHttpClient();
await prompt.pipe(model).invoke({
input: "Hello",
});
// console.log(count);
expect(count).toEqual(2);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-mistralai/src | lc_public_repos/langchainjs/libs/langchain-mistralai/src/tests/embeddings.int.test.ts | import { test } from "@jest/globals";
import { HTTPClient } from "@mistralai/mistralai/lib/http.js";
import { MistralAIEmbeddings } from "../embeddings.js";
test("Test MistralAIEmbeddings can embed query", async () => {
const model = new MistralAIEmbeddings();
// "Hello world" in French 🤓
const text = "Bonjour le monde";
const embeddings = await model.embedQuery(text);
// console.log("embeddings", embeddings);
expect(embeddings.length).toBe(1024);
});
test("Test MistralAIEmbeddings can embed documents", async () => {
const model = new MistralAIEmbeddings();
// "Hello world" in French 🤓
const text = "Bonjour le monde";
const documents = [text, text];
const embeddings = await model.embedDocuments(documents);
// console.log("embeddings", embeddings);
expect(embeddings.length).toBe(2);
expect(embeddings[0].length).toBe(1024);
expect(embeddings[1].length).toBe(1024);
});
test("Test MistralAIEmbeddings can register BeforeRequestHook function", async () => {
const model = new MistralAIEmbeddings({
model: "mistral-embed",
});
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.addAllHooksToHttpClient();
await model.embedQuery("Hello");
// console.log(count);
expect(count).toEqual(1);
});
test("Test MistralAIEmbeddings can register RequestErrorHook function", async () => {
const fetcher = (): Promise<Response> =>
Promise.reject(new Error("Intended fetcher error"));
const customHttpClient = new HTTPClient({ fetcher });
const model = new MistralAIEmbeddings({
model: "mistral-embed",
httpClient: customHttpClient,
maxRetries: 0,
});
let count = 0;
const addCount = () => {
count += 1;
};
const RequestErrorHook = (): void => {
addCount();
console.log("In request error hook");
};
model.requestErrorHooks = [RequestErrorHook];
model.addAllHooksToHttpClient();
try {
await model.embedQuery("Hello");
} catch (e: unknown) {
// Intended error, do not rethrow
}
// console.log(count);
expect(count).toEqual(1);
});
test("Test MistralAIEmbeddings can register ResponseHook function", async () => {
const model = new MistralAIEmbeddings({
model: "mistral-embed",
});
let count = 0;
const addCount = () => {
count += 1;
};
const ResponseHook = (): void => {
addCount();
};
model.responseHooks = [ResponseHook];
model.addAllHooksToHttpClient();
await model.embedQuery("Hello");
// console.log(count);
expect(count).toEqual(1);
});
test("Test MistralAIEmbeddings can register multiple hook functions with success", async () => {
const model = new MistralAIEmbeddings({
model: "mistral-embed",
});
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
const ResponseHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.responseHooks = [ResponseHook];
model.addAllHooksToHttpClient();
await model.embedQuery("Hello");
// console.log(count);
expect(count).toEqual(2);
});
test("Test MistralAIEmbeddings can register multiple hook functions with error", async () => {
const fetcher = (): Promise<Response> =>
Promise.reject(new Error("Intended fetcher error"));
const customHttpClient = new HTTPClient({ fetcher });
const model = new MistralAIEmbeddings({
model: "mistral-embed",
httpClient: customHttpClient,
maxRetries: 0,
});
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
const RequestErrorHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.requestErrorHooks = [RequestErrorHook];
model.addAllHooksToHttpClient();
try {
await model.embedQuery("Hello");
} catch (e: unknown) {
// Intended error, do not rethrow
}
// console.log(count);
expect(count).toEqual(2);
});
test("Test MistralAIEmbeddings can remove hook", async () => {
const model = new MistralAIEmbeddings({
model: "mistral-embed",
});
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.addAllHooksToHttpClient();
await model.embedQuery("Hello");
// console.log(count);
expect(count).toEqual(1);
model.removeHookFromHttpClient(beforeRequestHook);
await model.embedQuery("Hello");
// console.log(count);
expect(count).toEqual(1);
});
test("Test MistralAIEmbeddings can remove all hooks", async () => {
const model = new MistralAIEmbeddings({
model: "mistral-embed",
});
let count = 0;
const addCount = () => {
count += 1;
};
const beforeRequestHook = (): void => {
addCount();
};
const ResponseHook = (): void => {
addCount();
};
model.beforeRequestHooks = [beforeRequestHook];
model.responseHooks = [ResponseHook];
model.addAllHooksToHttpClient();
await model.embedQuery("Hello");
// console.log(count);
expect(count).toEqual(2);
model.removeAllHooksFromHttpClient();
await model.embedQuery("Hello");
// console.log(count);
expect(count).toEqual(2);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-mistralai/src | lc_public_repos/langchainjs/libs/langchain-mistralai/src/tests/chat_models.test.ts | import { ChatMistralAI } from "../chat_models.js";
import {
_isValidMistralToolCallId,
_convertToolCallIdToMistralCompatible,
_mistralContentChunkToMessageContentComplex,
} from "../utils.js";
describe("Mistral Tool Call ID Conversion", () => {
test("valid and invalid Mistral tool call IDs", () => {
expect(_isValidMistralToolCallId("ssAbar4Dr")).toBe(true);
expect(_isValidMistralToolCallId("abc123")).toBe(false);
expect(_isValidMistralToolCallId("call_JIIjI55tTipFFzpcP8re3BpM")).toBe(
false
);
});
test("tool call ID conversion", () => {
const resultMap: Record<string, string> = {
ssAbar4Dr: "ssAbar4Dr",
abc123: "0001yoN1K",
call_JIIjI55tTipFFzpcP8re3BpM: "0001sqrj5",
12345: "00003akVR",
};
for (const [inputId, expectedOutput] of Object.entries(resultMap)) {
const convertedId = _convertToolCallIdToMistralCompatible(inputId);
expect(convertedId).toBe(expectedOutput);
expect(_isValidMistralToolCallId(convertedId)).toBe(true);
}
});
});
test("Serialization", () => {
const model = new ChatMistralAI({
apiKey: "foo",
});
expect(JSON.stringify(model)).toEqual(
`{"lc":1,"type":"constructor","id":["langchain","chat_models","mistralai","ChatMistralAI"],"kwargs":{"mistral_api_key":{"lc":1,"type":"secret","id":["MISTRAL_API_KEY"]}}}`
);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-mistralai | lc_public_repos/langchainjs/libs/langchain-mistralai/scripts/jest-setup-after-env.js | import { awaitAllCallbacks } from "@langchain/core/callbacks/promises";
import { afterAll, jest } from "@jest/globals";
afterAll(awaitAllCallbacks);
// Allow console.log to be disabled in tests
if (process.env.DISABLE_CONSOLE_LOGS === "true") {
console.log = jest.fn();
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-ollama/tsconfig.json | {
"extends": "@tsconfig/recommended",
"compilerOptions": {
"outDir": "../dist",
"rootDir": "./src",
"target": "ES2021",
"lib": ["ES2021", "ES2022.Object", "DOM"],
"module": "ES2020",
"moduleResolution": "nodenext",
"esModuleInterop": true,
"declaration": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"useDefineForClassFields": true,
"strictPropertyInitialization": false,
"allowJs": true,
"strict": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "docs"]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-ollama/LICENSE | The MIT License
Copyright (c) 2023 LangChain
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. |
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-ollama/jest.config.cjs | /** @type {import('ts-jest').JestConfigWithTsJest} */
module.exports = {
preset: "ts-jest/presets/default-esm",
testEnvironment: "./jest.env.cjs",
modulePathIgnorePatterns: ["dist/", "docs/"],
moduleNameMapper: {
"^(\\.{1,2}/.*)\\.js$": "$1",
},
transform: {
"^.+\\.tsx?$": ["@swc/jest"],
},
transformIgnorePatterns: [
"/node_modules/",
"\\.pnp\\.[^\\/]+$",
"./scripts/jest-setup-after-env.js",
],
setupFiles: ["dotenv/config"],
testTimeout: 20_000,
passWithNoTests: true,
collectCoverageFrom: ["src/**/*.ts"],
};
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-ollama/jest.env.cjs | const { TestEnvironment } = require("jest-environment-node");
class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment {
constructor(config, context) {
// Make `instanceof Float32Array` return true in tests
// to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549
super(config, context);
this.global.Float32Array = Float32Array;
}
}
module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-ollama/README.md | # @langchain/ollama
This package contains the LangChain.js integrations for Ollama via the `ollama` TypeScript SDK.
## Installation
```bash npm2yarn
npm install @langchain/ollama @langchain/core
```
TODO: add setup instructions for Ollama locally
## Chat Models
```typescript
import { ChatOllama } from "@langchain/ollama";
const model = new ChatOllama({
model: "llama3", // Default value.
});
const result = await model.invoke(["human", "Hello, how are you?"]);
```
## Development
To develop the `@langchain/ollama` package, you'll need to follow these instructions:
### Install dependencies
```bash
yarn install
```
### Build the package
```bash
yarn build
```
Or from the repo root:
```bash
yarn build --filter=@langchain/ollama
```
### Run tests
Test files should live within a `tests/` file in the `src/` folder. Unit tests should end in `.test.ts` and integration tests should
end in `.int.test.ts`:
```bash
$ yarn test
$ yarn test:int
```
### Lint & Format
Run the linter & formatter to ensure your code is up to standard:
```bash
yarn lint && yarn format
```
### Adding new entrypoints
If you add a new file to be exported, either import & re-export from `src/index.ts`, or add it to the `entrypoints` field in the `config` variable located inside `langchain.config.js` and run `yarn build` to generate the new entrypoint.
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-ollama/.release-it.json | {
"github": {
"release": true,
"autoGenerate": true,
"tokenRef": "GITHUB_TOKEN_RELEASE"
},
"npm": {
"versionArgs": ["--workspaces-update=false"]
}
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-ollama/.eslintrc.cjs | module.exports = {
extends: [
"airbnb-base",
"eslint:recommended",
"prettier",
"plugin:@typescript-eslint/recommended",
],
parserOptions: {
ecmaVersion: 12,
parser: "@typescript-eslint/parser",
project: "./tsconfig.json",
sourceType: "module",
},
plugins: ["@typescript-eslint", "no-instanceof"],
ignorePatterns: [
".eslintrc.cjs",
"scripts",
"node_modules",
"dist",
"dist-cjs",
"*.js",
"*.cjs",
"*.d.ts",
],
rules: {
"no-process-env": 2,
"no-instanceof/no-instanceof": 2,
"@typescript-eslint/explicit-module-boundary-types": 0,
"@typescript-eslint/no-empty-function": 0,
"@typescript-eslint/no-shadow": 0,
"@typescript-eslint/no-empty-interface": 0,
"@typescript-eslint/no-use-before-define": ["error", "nofunc"],
"@typescript-eslint/no-unused-vars": ["warn", { args: "none" }],
"@typescript-eslint/no-floating-promises": "error",
"@typescript-eslint/no-misused-promises": "error",
camelcase: 0,
"class-methods-use-this": 0,
"import/extensions": [2, "ignorePackages"],
"import/no-extraneous-dependencies": [
"error",
{ devDependencies: ["**/*.test.ts"] },
],
"import/no-unresolved": 0,
"import/prefer-default-export": 0,
"keyword-spacing": "error",
"max-classes-per-file": 0,
"max-len": 0,
"no-await-in-loop": 0,
"no-bitwise": 0,
"no-console": 0,
"no-restricted-syntax": 0,
"no-shadow": 0,
"no-continue": 0,
"no-void": 0,
"no-underscore-dangle": 0,
"no-use-before-define": 0,
"no-useless-constructor": 0,
"no-return-await": 0,
"consistent-return": 0,
"no-else-return": 0,
"func-names": 0,
"no-lonely-if": 0,
"prefer-rest-params": 0,
"new-cap": ["error", { properties: false, capIsNew: false }],
},
overrides: [
{
files: ["**/*.test.ts"],
rules: {
"@typescript-eslint/no-unused-vars": "off",
},
},
],
};
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-ollama/langchain.config.js | import { resolve, dirname } from "node:path";
import { fileURLToPath } from "node:url";
/**
* @param {string} relativePath
* @returns {string}
*/
function abs(relativePath) {
return resolve(dirname(fileURLToPath(import.meta.url)), relativePath);
}
export const config = {
internals: [/node\:/, /@langchain\/core\//, 'ollama/browser'],
entrypoints: {
index: "index",
},
requiresOptionalDependency: [],
tsConfigPath: resolve("./tsconfig.json"),
cjsSource: "./dist-cjs",
cjsDestination: "./dist",
abs,
};
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-ollama/package.json | {
"name": "@langchain/ollama",
"version": "0.1.2",
"description": "Ollama integration for LangChain.js",
"type": "module",
"engines": {
"node": ">=18"
},
"main": "./index.js",
"types": "./index.d.ts",
"repository": {
"type": "git",
"url": "git@github.com:langchain-ai/langchainjs.git"
},
"homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-ollama/",
"scripts": {
"build": "yarn turbo:command build:internal --filter=@langchain/ollama",
"build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking",
"lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/",
"lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
"lint": "yarn lint:eslint && yarn lint:dpdm",
"lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm",
"clean": "rm -rf .turbo dist/",
"prepack": "yarn build",
"test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
"test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
"test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000",
"test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
"format": "prettier --config .prettierrc --write \"src\"",
"format:check": "prettier --config .prettierrc --check \"src\""
},
"author": "LangChain",
"license": "MIT",
"dependencies": {
"ollama": "^0.5.9",
"uuid": "^10.0.0"
},
"peerDependencies": {
"@langchain/core": ">=0.2.21 <0.4.0"
},
"devDependencies": {
"@jest/globals": "^29.5.0",
"@langchain/core": "workspace:*",
"@langchain/scripts": ">=0.1.0 <0.2.0",
"@langchain/standard-tests": "0.0.0",
"@swc/core": "^1.3.90",
"@swc/jest": "^0.2.29",
"@tsconfig/recommended": "^1.0.3",
"@typescript-eslint/eslint-plugin": "^6.12.0",
"@typescript-eslint/parser": "^6.12.0",
"dotenv": "^16.3.1",
"dpdm": "^3.12.0",
"eslint": "^8.33.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-prettier": "^8.6.0",
"eslint-plugin-import": "^2.27.5",
"eslint-plugin-no-instanceof": "^1.0.1",
"eslint-plugin-prettier": "^4.2.1",
"jest": "^29.5.0",
"jest-environment-node": "^29.6.4",
"prettier": "^2.8.3",
"release-it": "^17.6.0",
"rollup": "^4.5.2",
"ts-jest": "^29.1.0",
"typescript": "<5.2.0",
"zod": "^3.22.4",
"zod-to-json-schema": "^3.23.0"
},
"publishConfig": {
"access": "public"
},
"exports": {
".": {
"types": {
"import": "./index.d.ts",
"require": "./index.d.cts",
"default": "./index.d.ts"
},
"import": "./index.js",
"require": "./index.cjs"
},
"./package.json": "./package.json"
},
"files": [
"dist/",
"index.cjs",
"index.js",
"index.d.ts",
"index.d.cts"
]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-ollama/tsconfig.cjs.json | {
"extends": "./tsconfig.json",
"compilerOptions": {
"module": "commonjs",
"declaration": false
},
"exclude": ["node_modules", "dist", "docs", "**/tests"]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-ollama/turbo.json | {
"extends": ["//"],
"pipeline": {
"build": {
"outputs": ["**/dist/**"]
},
"build:internal": {
"dependsOn": ["^build:internal"]
}
}
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-ollama/.prettierrc | {
"$schema": "https://json.schemastore.org/prettierrc",
"printWidth": 80,
"tabWidth": 2,
"useTabs": false,
"semi": true,
"singleQuote": false,
"quoteProps": "as-needed",
"jsxSingleQuote": false,
"trailingComma": "es5",
"bracketSpacing": true,
"arrowParens": "always",
"requirePragma": false,
"insertPragma": false,
"proseWrap": "preserve",
"htmlWhitespaceSensitivity": "css",
"vueIndentScriptAndStyle": false,
"endOfLine": "lf"
}
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama | lc_public_repos/langchainjs/libs/langchain-ollama/src/types.ts | export interface OllamaCamelCaseOptions {
numa?: boolean;
numCtx?: number;
numBatch?: number;
numGpu?: number;
mainGpu?: number;
lowVram?: boolean;
f16Kv?: boolean;
logitsAll?: boolean;
vocabOnly?: boolean;
useMmap?: boolean;
useMlock?: boolean;
embeddingOnly?: boolean;
numThread?: number;
numKeep?: number;
seed?: number;
numPredict?: number;
topK?: number;
topP?: number;
tfsZ?: number;
typicalP?: number;
repeatLastN?: number;
temperature?: number;
repeatPenalty?: number;
presencePenalty?: number;
frequencyPenalty?: number;
mirostat?: number;
mirostatTau?: number;
mirostatEta?: number;
penalizeNewline?: boolean;
/**
* @default "5m"
*/
keepAlive?: string | number;
stop?: string[];
}
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama | lc_public_repos/langchainjs/libs/langchain-ollama/src/llms.ts | import type { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { GenerationChunk } from "@langchain/core/outputs";
import type { StringWithAutocomplete } from "@langchain/core/utils/types";
import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
import { Ollama as OllamaClient } from "ollama/browser";
import { OllamaCamelCaseOptions } from "./types.js";
export interface OllamaCallOptions extends BaseLanguageModelCallOptions {
images?: string[];
}
export interface OllamaInput extends BaseLLMParams, OllamaCamelCaseOptions {
/**
* The model to use when making requests.
* @default "llama3"
*/
model?: string;
/**
* Optionally override the base URL to make request to.
* This should only be set if your Ollama instance is being
* server from a non-standard location.
* @default "http://localhost:11434"
*/
baseUrl?: string;
format?: string;
/**
* Optional HTTP Headers to include in the request.
*/
headers?: Headers;
}
/**
* Class that represents the Ollama language model. It extends the base
* LLM class and implements the OllamaInput interface.
* @example
* ```typescript
* const ollama = new Ollama({
* baseUrl: "http://api.example.com",
* model: "llama3",
* });
*
* // Streaming translation from English to German
* const stream = await ollama.stream(
* `Translate "I love programming" into German.`
* );
*
* const chunks = [];
* for await (const chunk of stream) {
* chunks.push(chunk);
* }
*
* console.log(chunks.join(""));
* ```
*/
export class Ollama extends LLM<OllamaCallOptions> implements OllamaInput {
static lc_name() {
return "Ollama";
}
lc_serializable = true;
model = "llama3";
baseUrl = "http://localhost:11434";
keepAlive: string | number = "5m";
embeddingOnly?: boolean;
f16KV?: boolean;
frequencyPenalty?: number;
logitsAll?: boolean;
lowVram?: boolean;
mainGpu?: number;
mirostat?: number;
mirostatEta?: number;
mirostatTau?: number;
numBatch?: number;
numCtx?: number;
numGpu?: number;
numKeep?: number;
numPredict?: number;
numThread?: number;
penalizeNewline?: boolean;
presencePenalty?: number;
repeatLastN?: number;
repeatPenalty?: number;
temperature?: number;
stop?: string[];
tfsZ?: number;
topK?: number;
topP?: number;
typicalP?: number;
useMLock?: boolean;
useMMap?: boolean;
vocabOnly?: boolean;
format?: StringWithAutocomplete<"json">;
client: OllamaClient;
constructor(fields?: OllamaInput & BaseLLMParams) {
super(fields ?? {});
this.model = fields?.model ?? this.model;
this.baseUrl = fields?.baseUrl?.endsWith("/")
? fields?.baseUrl.slice(0, -1)
: fields?.baseUrl ?? this.baseUrl;
this.client = new OllamaClient({
host: this.baseUrl,
headers: fields?.headers,
});
this.keepAlive = fields?.keepAlive ?? this.keepAlive;
this.embeddingOnly = fields?.embeddingOnly;
this.f16KV = fields?.f16Kv;
this.frequencyPenalty = fields?.frequencyPenalty;
this.logitsAll = fields?.logitsAll;
this.lowVram = fields?.lowVram;
this.mainGpu = fields?.mainGpu;
this.mirostat = fields?.mirostat;
this.mirostatEta = fields?.mirostatEta;
this.mirostatTau = fields?.mirostatTau;
this.numBatch = fields?.numBatch;
this.numCtx = fields?.numCtx;
this.numGpu = fields?.numGpu;
this.numKeep = fields?.numKeep;
this.numPredict = fields?.numPredict;
this.numThread = fields?.numThread;
this.penalizeNewline = fields?.penalizeNewline;
this.presencePenalty = fields?.presencePenalty;
this.repeatLastN = fields?.repeatLastN;
this.repeatPenalty = fields?.repeatPenalty;
this.temperature = fields?.temperature;
this.stop = fields?.stop;
this.tfsZ = fields?.tfsZ;
this.topK = fields?.topK;
this.topP = fields?.topP;
this.typicalP = fields?.typicalP;
this.useMLock = fields?.useMlock;
this.useMMap = fields?.useMmap;
this.vocabOnly = fields?.vocabOnly;
this.format = fields?.format;
}
_llmType() {
return "ollama";
}
invocationParams(options?: this["ParsedCallOptions"]) {
return {
model: this.model,
format: this.format,
keep_alive: this.keepAlive,
images: options?.images,
options: {
embedding_only: this.embeddingOnly,
f16_kv: this.f16KV,
frequency_penalty: this.frequencyPenalty,
logits_all: this.logitsAll,
low_vram: this.lowVram,
main_gpu: this.mainGpu,
mirostat: this.mirostat,
mirostat_eta: this.mirostatEta,
mirostat_tau: this.mirostatTau,
num_batch: this.numBatch,
num_ctx: this.numCtx,
num_gpu: this.numGpu,
num_keep: this.numKeep,
num_predict: this.numPredict,
num_thread: this.numThread,
penalize_newline: this.penalizeNewline,
presence_penalty: this.presencePenalty,
repeat_last_n: this.repeatLastN,
repeat_penalty: this.repeatPenalty,
temperature: this.temperature,
stop: options?.stop ?? this.stop,
tfs_z: this.tfsZ,
top_k: this.topK,
top_p: this.topP,
typical_p: this.typicalP,
use_mlock: this.useMLock,
use_mmap: this.useMMap,
vocab_only: this.vocabOnly,
},
};
}
async *_streamResponseChunks(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const stream = await this.caller.call(async () =>
this.client.generate({
...this.invocationParams(options),
prompt,
stream: true,
})
);
for await (const chunk of stream) {
if (options.signal?.aborted) {
throw new Error("This operation was aborted");
}
if (!chunk.done) {
yield new GenerationChunk({
text: chunk.response,
generationInfo: {
...chunk,
response: undefined,
},
});
await runManager?.handleLLMNewToken(chunk.response ?? "");
} else {
yield new GenerationChunk({
text: "",
generationInfo: {
model: chunk.model,
total_duration: chunk.total_duration,
load_duration: chunk.load_duration,
prompt_eval_count: chunk.prompt_eval_count,
prompt_eval_duration: chunk.prompt_eval_duration,
eval_count: chunk.eval_count,
eval_duration: chunk.eval_duration,
},
});
}
}
}
/** @ignore */
async _call(
prompt: string,
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<string> {
const chunks = [];
for await (const chunk of this._streamResponseChunks(
prompt,
options,
runManager
)) {
chunks.push(chunk.text);
}
return chunks.join("");
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama | lc_public_repos/langchainjs/libs/langchain-ollama/src/index.ts | export * from "./chat_models.js";
export * from "./embeddings.js";
export * from "./types.js";
export * from "./llms.js";
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama | lc_public_repos/langchainjs/libs/langchain-ollama/src/chat_models.ts | import {
AIMessage,
UsageMetadata,
type BaseMessage,
} from "@langchain/core/messages";
import { BaseLanguageModelInput } from "@langchain/core/language_models/base";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import {
type BaseChatModelParams,
BaseChatModel,
LangSmithParams,
BaseChatModelCallOptions,
BindToolsInput,
} from "@langchain/core/language_models/chat_models";
import { Ollama } from "ollama/browser";
import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
import { AIMessageChunk } from "@langchain/core/messages";
import type {
ChatRequest as OllamaChatRequest,
ChatResponse as OllamaChatResponse,
Message as OllamaMessage,
Tool as OllamaTool,
} from "ollama";
import { Runnable } from "@langchain/core/runnables";
import { convertToOpenAITool } from "@langchain/core/utils/function_calling";
import { concat } from "@langchain/core/utils/stream";
import {
convertOllamaMessagesToLangChain,
convertToOllamaMessages,
} from "./utils.js";
import { OllamaCamelCaseOptions } from "./types.js";
export interface ChatOllamaCallOptions extends BaseChatModelCallOptions {
/**
* An array of strings to stop on.
*/
stop?: string[];
tools?: BindToolsInput[];
}
export interface PullModelOptions {
/**
* Whether or not to stream the download.
* @default true
*/
stream?: boolean;
insecure?: boolean;
/**
* Whether or not to log the status of the download
* to the console.
* @default false
*/
logProgress?: boolean;
}
/**
* Input to chat model class.
*/
export interface ChatOllamaInput
extends BaseChatModelParams,
OllamaCamelCaseOptions {
/**
* The model to invoke. If the model does not exist, it
* will be pulled.
* @default "llama3"
*/
model?: string;
/**
* The host URL of the Ollama server.
* @default "http://127.0.0.1:11434"
*/
baseUrl?: string;
/**
* Optional HTTP Headers to include in the request.
*/
headers?: Headers;
/**
* Whether or not to check the model exists on the local machine before
* invoking it. If set to `true`, the model will be pulled if it does not
* exist.
* @default false
*/
checkOrPullModel?: boolean;
streaming?: boolean;
format?: string;
}
/**
* Ollama chat model integration.
*
* Setup:
* Install `@langchain/ollama` and the Ollama app.
*
* ```bash
* npm install @langchain/ollama
* ```
*
* ## [Constructor args](https://api.js.langchain.com/classes/_langchain_ollama.ChatOllama.html#constructor)
*
* ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_ollama.ChatOllamaCallOptions.html)
*
* Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
* They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below:
*
* ```typescript
* // When calling `.bind`, call options should be passed via the first argument
* const llmWithArgsBound = llm.bind({
* stop: ["\n"],
* tools: [...],
* });
*
* // When calling `.bindTools`, call options should be passed via the second argument
* const llmWithTools = llm.bindTools(
* [...],
* {
* stop: ["\n"],
* }
* );
* ```
*
* ## Examples
*
* <details open>
* <summary><strong>Instantiate</strong></summary>
*
* ```typescript
* import { ChatOllama } from '@langchain/ollama';
*
* const llm = new ChatOllama({
* model: "llama-3.1:8b",
* temperature: 0,
* // other params...
* });
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Invoking</strong></summary>
*
* ```typescript
* const input = `Translate "I love programming" into French.`;
*
* // Models also accept a list of chat messages or a formatted prompt
* const result = await llm.invoke(input);
* console.log(result);
* ```
*
* ```txt
* AIMessage {
* "content": "The translation of \"I love programming\" into French is:\n\n\"J'adore programmer.\"",
* "additional_kwargs": {},
* "response_metadata": {
* "model": "llama3.1:8b",
* "created_at": "2024-08-12T22:12:23.09468Z",
* "done_reason": "stop",
* "done": true,
* "total_duration": 3715571291,
* "load_duration": 35244375,
* "prompt_eval_count": 19,
* "prompt_eval_duration": 3092116000,
* "eval_count": 20,
* "eval_duration": 585789000
* },
* "tool_calls": [],
* "invalid_tool_calls": [],
* "usage_metadata": {
* "input_tokens": 19,
* "output_tokens": 20,
* "total_tokens": 39
* }
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Streaming Chunks</strong></summary>
*
* ```typescript
* for await (const chunk of await llm.stream(input)) {
* console.log(chunk);
* }
* ```
*
* ```txt
* AIMessageChunk {
* "content": "The",
* "additional_kwargs": {},
* "response_metadata": {},
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
* }
* AIMessageChunk {
* "content": " translation",
* "additional_kwargs": {},
* "response_metadata": {},
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
* }
* AIMessageChunk {
* "content": " of",
* "additional_kwargs": {},
* "response_metadata": {},
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
* }
* AIMessageChunk {
* "content": " \"",
* "additional_kwargs": {},
* "response_metadata": {},
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
* }
* AIMessageChunk {
* "content": "I",
* "additional_kwargs": {},
* "response_metadata": {},
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
* }
* ...
* AIMessageChunk {
* "content": "",
* "additional_kwargs": {},
* "response_metadata": {},
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": []
* }
* AIMessageChunk {
* "content": "",
* "additional_kwargs": {},
* "response_metadata": {
* "model": "llama3.1:8b",
* "created_at": "2024-08-12T22:13:22.22423Z",
* "done_reason": "stop",
* "done": true,
* "total_duration": 8599883208,
* "load_duration": 35975875,
* "prompt_eval_count": 19,
* "prompt_eval_duration": 7918195000,
* "eval_count": 20,
* "eval_duration": 643569000
* },
* "tool_calls": [],
* "tool_call_chunks": [],
* "invalid_tool_calls": [],
* "usage_metadata": {
* "input_tokens": 19,
* "output_tokens": 20,
* "total_tokens": 39
* }
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Bind tools</strong></summary>
*
* ```typescript
* import { z } from 'zod';
*
* const GetWeather = {
* name: "GetWeather",
* description: "Get the current weather in a given location",
* schema: z.object({
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
* }),
* }
*
* const GetPopulation = {
* name: "GetPopulation",
* description: "Get the current population in a given location",
* schema: z.object({
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
* }),
* }
*
* const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
* const aiMsg = await llmWithTools.invoke(
* "Which city is hotter today and which is bigger: LA or NY?"
* );
* console.log(aiMsg.tool_calls);
* ```
*
* ```txt
* [
* {
* name: 'GetWeather',
* args: { location: 'Los Angeles, CA' },
* id: '49410cad-2163-415e-bdcd-d26938a9c8c5',
* type: 'tool_call'
* },
* {
* name: 'GetPopulation',
* args: { location: 'New York, NY' },
* id: '39e230e4-63ec-4fae-9df0-21c3abe735ad',
* type: 'tool_call'
* }
* ]
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Structured Output</strong></summary>
*
* ```typescript
* import { z } from 'zod';
*
* const Joke = z.object({
* setup: z.string().describe("The setup of the joke"),
* punchline: z.string().describe("The punchline to the joke"),
* rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
* }).describe('Joke to tell user.');
*
* const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
* const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
* console.log(jokeResult);
* ```
*
* ```txt
* {
* punchline: 'Why did the cat join a band? Because it wanted to be the purr-cussionist!',
* rating: 8,
* setup: 'A cat walks into a music store and asks the owner...'
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Usage Metadata</strong></summary>
*
* ```typescript
* const aiMsgForMetadata = await llm.invoke(input);
* console.log(aiMsgForMetadata.usage_metadata);
* ```
*
* ```txt
* { input_tokens: 19, output_tokens: 20, total_tokens: 39 }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Response Metadata</strong></summary>
*
* ```typescript
* const aiMsgForResponseMetadata = await llm.invoke(input);
* console.log(aiMsgForResponseMetadata.response_metadata);
* ```
*
* ```txt
* {
* model: 'llama3.1:8b',
* created_at: '2024-08-12T22:17:42.274795Z',
* done_reason: 'stop',
* done: true,
* total_duration: 6767071209,
* load_duration: 31628209,
* prompt_eval_count: 19,
* prompt_eval_duration: 6124504000,
* eval_count: 20,
* eval_duration: 608785000
* }
* ```
* </details>
*
* <br />
*/
export class ChatOllama
extends BaseChatModel<ChatOllamaCallOptions, AIMessageChunk>
implements ChatOllamaInput
{
// Used for tracing, replace with the same name as your class
static lc_name() {
return "ChatOllama";
}
model = "llama3";
numa?: boolean;
numCtx?: number;
numBatch?: number;
numGpu?: number;
mainGpu?: number;
lowVram?: boolean;
f16Kv?: boolean;
logitsAll?: boolean;
vocabOnly?: boolean;
useMmap?: boolean;
useMlock?: boolean;
embeddingOnly?: boolean;
numThread?: number;
numKeep?: number;
seed?: number;
numPredict?: number;
topK?: number;
topP?: number;
tfsZ?: number;
typicalP?: number;
repeatLastN?: number;
temperature?: number;
repeatPenalty?: number;
presencePenalty?: number;
frequencyPenalty?: number;
mirostat?: number;
mirostatTau?: number;
mirostatEta?: number;
penalizeNewline?: boolean;
streaming?: boolean;
format?: string;
keepAlive?: string | number = "5m";
client: Ollama;
checkOrPullModel = false;
baseUrl = "http://127.0.0.1:11434";
constructor(fields?: ChatOllamaInput) {
super(fields ?? {});
this.client = new Ollama({
host: fields?.baseUrl,
headers: fields?.headers,
});
this.baseUrl = fields?.baseUrl ?? this.baseUrl;
this.model = fields?.model ?? this.model;
this.numa = fields?.numa;
this.numCtx = fields?.numCtx;
this.numBatch = fields?.numBatch;
this.numGpu = fields?.numGpu;
this.mainGpu = fields?.mainGpu;
this.lowVram = fields?.lowVram;
this.f16Kv = fields?.f16Kv;
this.logitsAll = fields?.logitsAll;
this.vocabOnly = fields?.vocabOnly;
this.useMmap = fields?.useMmap;
this.useMlock = fields?.useMlock;
this.embeddingOnly = fields?.embeddingOnly;
this.numThread = fields?.numThread;
this.numKeep = fields?.numKeep;
this.seed = fields?.seed;
this.numPredict = fields?.numPredict;
this.topK = fields?.topK;
this.topP = fields?.topP;
this.tfsZ = fields?.tfsZ;
this.typicalP = fields?.typicalP;
this.repeatLastN = fields?.repeatLastN;
this.temperature = fields?.temperature;
this.repeatPenalty = fields?.repeatPenalty;
this.presencePenalty = fields?.presencePenalty;
this.frequencyPenalty = fields?.frequencyPenalty;
this.mirostat = fields?.mirostat;
this.mirostatTau = fields?.mirostatTau;
this.mirostatEta = fields?.mirostatEta;
this.penalizeNewline = fields?.penalizeNewline;
this.streaming = fields?.streaming;
this.format = fields?.format;
this.keepAlive = fields?.keepAlive ?? this.keepAlive;
this.checkOrPullModel = fields?.checkOrPullModel ?? this.checkOrPullModel;
}
// Replace
_llmType() {
return "ollama";
}
/**
* Download a model onto the local machine.
*
* @param {string} model The name of the model to download.
* @param {PullModelOptions | undefined} options Options for pulling the model.
* @returns {Promise<void>}
*/
async pull(model: string, options?: PullModelOptions): Promise<void> {
const { stream, insecure, logProgress } = {
stream: true,
...options,
};
if (stream) {
for await (const chunk of await this.client.pull({
model,
insecure,
stream,
})) {
if (logProgress) {
console.log(chunk);
}
}
} else {
const response = await this.client.pull({ model, insecure });
if (logProgress) {
console.log(response);
}
}
}
override bindTools(
tools: BindToolsInput[],
kwargs?: Partial<this["ParsedCallOptions"]>
): Runnable<BaseLanguageModelInput, AIMessageChunk, ChatOllamaCallOptions> {
return this.bind({
tools: tools.map((tool) => convertToOpenAITool(tool)),
...kwargs,
});
}
getLsParams(options: this["ParsedCallOptions"]): LangSmithParams {
const params = this.invocationParams(options);
return {
ls_provider: "ollama",
ls_model_name: this.model,
ls_model_type: "chat",
ls_temperature: params.options?.temperature ?? undefined,
ls_max_tokens: params.options?.num_predict ?? undefined,
ls_stop: options.stop,
};
}
invocationParams(
options?: this["ParsedCallOptions"]
): Omit<OllamaChatRequest, "messages"> {
if (options?.tool_choice) {
throw new Error("Tool choice is not supported for ChatOllama.");
}
return {
model: this.model,
format: this.format,
keep_alive: this.keepAlive,
options: {
numa: this.numa,
num_ctx: this.numCtx,
num_batch: this.numBatch,
num_gpu: this.numGpu,
main_gpu: this.mainGpu,
low_vram: this.lowVram,
f16_kv: this.f16Kv,
logits_all: this.logitsAll,
vocab_only: this.vocabOnly,
use_mmap: this.useMmap,
use_mlock: this.useMlock,
embedding_only: this.embeddingOnly,
num_thread: this.numThread,
num_keep: this.numKeep,
seed: this.seed,
num_predict: this.numPredict,
top_k: this.topK,
top_p: this.topP,
tfs_z: this.tfsZ,
typical_p: this.typicalP,
repeat_last_n: this.repeatLastN,
temperature: this.temperature,
repeat_penalty: this.repeatPenalty,
presence_penalty: this.presencePenalty,
frequency_penalty: this.frequencyPenalty,
mirostat: this.mirostat,
mirostat_tau: this.mirostatTau,
mirostat_eta: this.mirostatEta,
penalize_newline: this.penalizeNewline,
stop: options?.stop,
},
tools: options?.tools?.length
? (options.tools.map((tool) =>
convertToOpenAITool(tool)
) as OllamaTool[])
: undefined,
};
}
/**
* Check if a model exists on the local machine.
*
* @param {string} model The name of the model to check.
* @returns {Promise<boolean>} Whether or not the model exists.
*/
private async checkModelExistsOnMachine(model: string): Promise<boolean> {
const { models } = await this.client.list();
return !!models.find(
(m) => m.name === model || m.name === `${model}:latest`
);
}
async _generate(
messages: BaseMessage[],
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<ChatResult> {
if (this.checkOrPullModel) {
if (!(await this.checkModelExistsOnMachine(this.model))) {
await this.pull(this.model, {
logProgress: true,
});
}
}
let finalChunk: AIMessageChunk | undefined;
for await (const chunk of this._streamResponseChunks(
messages,
options,
runManager
)) {
if (!finalChunk) {
finalChunk = chunk.message;
} else {
finalChunk = concat(finalChunk, chunk.message);
}
}
// Convert from AIMessageChunk to AIMessage since `generate` expects AIMessage.
const nonChunkMessage = new AIMessage({
id: finalChunk?.id,
content: finalChunk?.content ?? "",
tool_calls: finalChunk?.tool_calls,
response_metadata: finalChunk?.response_metadata,
usage_metadata: finalChunk?.usage_metadata,
});
return {
generations: [
{
text:
typeof nonChunkMessage.content === "string"
? nonChunkMessage.content
: "",
message: nonChunkMessage,
},
],
};
}
/**
* Implement to support streaming.
* Should yield chunks iteratively.
*/
async *_streamResponseChunks(
messages: BaseMessage[],
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<ChatGenerationChunk> {
if (this.checkOrPullModel) {
if (!(await this.checkModelExistsOnMachine(this.model))) {
await this.pull(this.model, {
logProgress: true,
});
}
}
const params = this.invocationParams(options);
// TODO: remove cast after SDK adds support for tool calls
const ollamaMessages = convertToOllamaMessages(messages) as OllamaMessage[];
const usageMetadata: UsageMetadata = {
input_tokens: 0,
output_tokens: 0,
total_tokens: 0,
};
if (params.tools && params.tools.length > 0) {
const toolResult = await this.client.chat({
...params,
messages: ollamaMessages,
stream: false, // Ollama currently does not support streaming with tools
});
const { message: responseMessage, ...rest } = toolResult;
usageMetadata.input_tokens += rest.prompt_eval_count ?? 0;
usageMetadata.output_tokens += rest.eval_count ?? 0;
usageMetadata.total_tokens =
usageMetadata.input_tokens + usageMetadata.output_tokens;
yield new ChatGenerationChunk({
text: responseMessage.content,
message: convertOllamaMessagesToLangChain(responseMessage, {
responseMetadata: rest,
usageMetadata,
}),
});
return runManager?.handleLLMNewToken(responseMessage.content);
}
const stream = await this.client.chat({
...params,
messages: ollamaMessages,
stream: true,
});
let lastMetadata: Omit<OllamaChatResponse, "message"> | undefined;
for await (const chunk of stream) {
if (options.signal?.aborted) {
this.client.abort();
}
const { message: responseMessage, ...rest } = chunk;
usageMetadata.input_tokens += rest.prompt_eval_count ?? 0;
usageMetadata.output_tokens += rest.eval_count ?? 0;
usageMetadata.total_tokens =
usageMetadata.input_tokens + usageMetadata.output_tokens;
lastMetadata = rest;
yield new ChatGenerationChunk({
text: responseMessage.content ?? "",
message: convertOllamaMessagesToLangChain(responseMessage),
});
await runManager?.handleLLMNewToken(responseMessage.content ?? "");
}
// Yield the `response_metadata` as the final chunk.
yield new ChatGenerationChunk({
text: "",
message: new AIMessageChunk({
content: "",
response_metadata: lastMetadata,
usage_metadata: usageMetadata,
}),
});
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama | lc_public_repos/langchainjs/libs/langchain-ollama/src/embeddings.ts | import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings";
import { Ollama } from "ollama/browser";
import type { Options as OllamaOptions } from "ollama";
import { OllamaCamelCaseOptions } from "./types.js";
/**
* Interface for OllamaEmbeddings parameters. Extends EmbeddingsParams and
* defines additional parameters specific to the OllamaEmbeddings class.
*/
interface OllamaEmbeddingsParams extends EmbeddingsParams {
/**
* The Ollama model to use for embeddings.
* @default "mxbai-embed-large"
*/
model?: string;
/**
* Base URL of the Ollama server
* @default "http://localhost:11434"
*/
baseUrl?: string;
/**
* Defaults to "5m"
*/
keepAlive?: string | number;
/**
* Whether or not to truncate the input text to fit inside the model's
* context window.
* @default false
*/
truncate?: boolean;
/**
* Optional HTTP Headers to include in the request.
*/
headers?: Headers;
/**
* Advanced Ollama API request parameters in camelCase, see
* https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
* for details of the available parameters.
*/
requestOptions?: OllamaCamelCaseOptions & Partial<OllamaOptions>;
}
export class OllamaEmbeddings extends Embeddings {
model = "mxbai-embed-large";
baseUrl = "http://localhost:11434";
keepAlive: string | number = "5m";
requestOptions?: Partial<OllamaOptions>;
client: Ollama;
truncate = false;
constructor(fields?: OllamaEmbeddingsParams) {
super({ maxConcurrency: 1, ...fields });
this.client = new Ollama({
host: fields?.baseUrl,
headers: fields?.headers,
});
this.baseUrl = fields?.baseUrl ?? this.baseUrl;
this.model = fields?.model ?? this.model;
this.keepAlive = fields?.keepAlive ?? this.keepAlive;
this.truncate = fields?.truncate ?? this.truncate;
this.requestOptions = fields?.requestOptions
? this._convertOptions(fields?.requestOptions)
: undefined;
}
/** convert camelCased Ollama request options like "useMMap" to
* the snake_cased equivalent which the ollama API actually uses.
* Used only for consistency with the llms/Ollama and chatModels/Ollama classes
*/
_convertOptions(
requestOptions: OllamaCamelCaseOptions
): Partial<OllamaOptions> {
const snakeCasedOptions: Partial<OllamaOptions> = {};
const mapping: Record<keyof OllamaCamelCaseOptions, string> = {
embeddingOnly: "embedding_only",
frequencyPenalty: "frequency_penalty",
keepAlive: "keep_alive",
logitsAll: "logits_all",
lowVram: "low_vram",
mainGpu: "main_gpu",
mirostat: "mirostat",
mirostatEta: "mirostat_eta",
mirostatTau: "mirostat_tau",
numBatch: "num_batch",
numCtx: "num_ctx",
numGpu: "num_gpu",
numKeep: "num_keep",
numPredict: "num_predict",
numThread: "num_thread",
penalizeNewline: "penalize_newline",
presencePenalty: "presence_penalty",
repeatLastN: "repeat_last_n",
repeatPenalty: "repeat_penalty",
temperature: "temperature",
stop: "stop",
tfsZ: "tfs_z",
topK: "top_k",
topP: "top_p",
typicalP: "typical_p",
useMlock: "use_mlock",
useMmap: "use_mmap",
vocabOnly: "vocab_only",
f16Kv: "f16_kv",
numa: "numa",
seed: "seed",
};
for (const [key, value] of Object.entries(requestOptions)) {
const snakeCasedOption = mapping[key as keyof OllamaCamelCaseOptions];
if (snakeCasedOption) {
snakeCasedOptions[snakeCasedOption as keyof OllamaOptions] = value;
} else {
// Just pass unknown options through
snakeCasedOptions[key as keyof OllamaOptions] = value;
}
}
return snakeCasedOptions;
}
async embedDocuments(texts: string[]): Promise<number[][]> {
return this.embeddingWithRetry(texts);
}
async embedQuery(text: string) {
return (await this.embeddingWithRetry([text]))[0];
}
private async embeddingWithRetry(texts: string[]): Promise<number[][]> {
const res = await this.caller.call(() =>
this.client.embed({
model: this.model,
input: texts,
keep_alive: this.keepAlive,
options: this.requestOptions,
truncate: this.truncate,
})
);
return res.embeddings;
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama | lc_public_repos/langchainjs/libs/langchain-ollama/src/utils.ts | import {
AIMessage,
AIMessageChunk,
BaseMessage,
HumanMessage,
MessageContentText,
SystemMessage,
ToolMessage,
UsageMetadata,
} from "@langchain/core/messages";
import type {
Message as OllamaMessage,
ToolCall as OllamaToolCall,
} from "ollama";
import { v4 as uuidv4 } from "uuid";
export function convertOllamaMessagesToLangChain(
messages: OllamaMessage,
extra?: {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
responseMetadata?: Record<string, any>;
usageMetadata?: UsageMetadata;
}
): AIMessageChunk {
return new AIMessageChunk({
content: messages.content ?? "",
tool_call_chunks: messages.tool_calls?.map((tc) => ({
name: tc.function.name,
args: JSON.stringify(tc.function.arguments),
type: "tool_call_chunk",
index: 0,
id: uuidv4(),
})),
response_metadata: extra?.responseMetadata,
usage_metadata: extra?.usageMetadata,
});
}
function extractBase64FromDataUrl(dataUrl: string): string {
const match = dataUrl.match(/^data:.*?;base64,(.*)$/);
return match ? match[1] : "";
}
function convertAMessagesToOllama(messages: AIMessage): OllamaMessage[] {
if (typeof messages.content === "string") {
return [
{
role: "assistant",
content: messages.content,
},
];
}
const textFields = messages.content.filter(
(c) => c.type === "text" && typeof c.text === "string"
);
const textMessages = (textFields as MessageContentText[]).map((c) => ({
role: "assistant",
content: c.text,
}));
let toolCallMsgs: OllamaMessage | undefined;
if (
messages.content.find((c) => c.type === "tool_use") &&
messages.tool_calls?.length
) {
// `tool_use` content types are accepted if the message has tool calls
const toolCalls: OllamaToolCall[] | undefined = messages.tool_calls?.map(
(tc) => ({
id: tc.id,
type: "function",
function: {
name: tc.name,
arguments: tc.args,
},
})
);
if (toolCalls) {
toolCallMsgs = {
role: "assistant",
tool_calls: toolCalls,
content: "",
};
}
} else if (
messages.content.find((c) => c.type === "tool_use") &&
!messages.tool_calls?.length
) {
throw new Error(
"'tool_use' content type is not supported without tool calls."
);
}
return [...textMessages, ...(toolCallMsgs ? [toolCallMsgs] : [])];
}
function convertHumanGenericMessagesToOllama(
message: HumanMessage
): OllamaMessage[] {
if (typeof message.content === "string") {
return [
{
role: "user",
content: message.content,
},
];
}
return message.content.map((c) => {
if (c.type === "text") {
return {
role: "user",
content: c.text,
};
} else if (c.type === "image_url") {
if (typeof c.image_url === "string") {
return {
role: "user",
content: "",
images: [extractBase64FromDataUrl(c.image_url)],
};
} else if (c.image_url.url && typeof c.image_url.url === "string") {
return {
role: "user",
content: "",
images: [extractBase64FromDataUrl(c.image_url.url)],
};
}
}
throw new Error(`Unsupported content type: ${c.type}`);
});
}
function convertSystemMessageToOllama(message: SystemMessage): OllamaMessage[] {
if (typeof message.content === "string") {
return [
{
role: "system",
content: message.content,
},
];
} else if (
message.content.every(
(c) => c.type === "text" && typeof c.text === "string"
)
) {
return (message.content as MessageContentText[]).map((c) => ({
role: "system",
content: c.text,
}));
} else {
throw new Error(
`Unsupported content type(s): ${message.content
.map((c) => c.type)
.join(", ")}`
);
}
}
function convertToolMessageToOllama(message: ToolMessage): OllamaMessage[] {
if (typeof message.content !== "string") {
throw new Error("Non string tool message content is not supported");
}
return [
{
role: "tool",
content: message.content,
},
];
}
export function convertToOllamaMessages(
messages: BaseMessage[]
): OllamaMessage[] {
return messages.flatMap((msg) => {
if (["human", "generic"].includes(msg._getType())) {
return convertHumanGenericMessagesToOllama(msg);
} else if (msg._getType() === "ai") {
return convertAMessagesToOllama(msg);
} else if (msg._getType() === "system") {
return convertSystemMessageToOllama(msg);
} else if (msg._getType() === "tool") {
return convertToolMessageToOllama(msg as ToolMessage);
} else {
throw new Error(`Unsupported message type: ${msg._getType()}`);
}
});
}
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama/src | lc_public_repos/langchainjs/libs/langchain-ollama/src/tests/chat_models.standard.int.test.ts | /* eslint-disable no-process-env */
import { test, expect } from "@jest/globals";
import { ChatModelIntegrationTests } from "@langchain/standard-tests";
import { AIMessageChunk } from "@langchain/core/messages";
import { RunnableLambda } from "@langchain/core/runnables";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
import { ChatOllama, ChatOllamaCallOptions } from "../chat_models.js";
const currentWeatherName = "get_current_weather";
const currentWeatherDescription =
"Get the current weather for a given location.";
const currentWeatherSchema = z
.object({
location: z
.string()
.describe("The city to get the weather for, e.g. San Francisco"),
})
.describe(currentWeatherDescription);
// The function calling tests can be flaky due to the model not invoking a tool.
// If the tool calling tests fail because a tool was not called, retry them.
// If they fail for another reason, there is an actual issue.
class ChatOllamaStandardIntegrationTests extends ChatModelIntegrationTests<
ChatOllamaCallOptions,
AIMessageChunk
> {
constructor() {
super({
Cls: ChatOllama,
chatModelHasToolCalling: true,
chatModelHasStructuredOutput: true,
constructorArgs: {
model: "llama3-groq-tool-use",
},
});
}
/**
* Overriding base method because Ollama requires a different
* prompting method to reliably invoke tools.
*/
async testWithStructuredOutput() {
if (!this.chatModelHasStructuredOutput) {
console.log("Test requires withStructuredOutput. Skipping...");
return;
}
const model = new this.Cls(this.constructorArgs);
if (!model.withStructuredOutput) {
throw new Error(
"withStructuredOutput undefined. Cannot test tool message histories."
);
}
const modelWithTools = model.withStructuredOutput(currentWeatherSchema, {
name: currentWeatherName,
});
const result = await modelWithTools.invoke(
"What's the weather like today in San Francisco? Use the 'get_current_weather' tool to respond."
);
expect(result.location).toBeDefined();
expect(typeof result.location).toBe("string");
}
/**
* Overriding base method because Ollama requires a different
* prompting method to reliably invoke tools.
*/
async testBindToolsWithRunnableToolLike() {
const model = new ChatOllama(this.constructorArgs);
const runnableLike = RunnableLambda.from((_) => {
// no-op
}).asTool({
name: currentWeatherName,
description: currentWeatherDescription,
schema: currentWeatherSchema,
});
const modelWithTools = model.bindTools([runnableLike]);
const result = await modelWithTools.invoke(
"What's the weather like today in San Francisco? Use the 'get_current_weather' tool to respond."
);
expect(result.tool_calls).toHaveLength(1);
if (!result.tool_calls) {
throw new Error("result.tool_calls is undefined");
}
const { tool_calls } = result;
expect(tool_calls[0].name).toBe(currentWeatherName);
}
/**
* Overriding base method because Ollama requires a different
* prompting method to reliably invoke tools.
*/
async testBindToolsWithOpenAIFormattedTools() {
const model = new ChatOllama(this.constructorArgs);
const modelWithTools = model.bindTools([
{
type: "function",
function: {
name: currentWeatherName,
description: currentWeatherDescription,
parameters: zodToJsonSchema(currentWeatherSchema),
},
},
]);
const result = await modelWithTools.invoke(
"What's the weather like today in San Francisco? Use the 'get_current_weather' tool to respond."
);
expect(result.tool_calls).toHaveLength(1);
if (!result.tool_calls) {
throw new Error("result.tool_calls is undefined");
}
const { tool_calls } = result;
expect(tool_calls[0].name).toBe(currentWeatherName);
}
/**
* Overriding base method because Ollama requires a different
* prompting method to reliably invoke tools.
*/
async testWithStructuredOutputIncludeRaw() {
const model = new ChatOllama(this.constructorArgs);
const modelWithTools = model.withStructuredOutput(currentWeatherSchema, {
includeRaw: true,
name: currentWeatherName,
});
const result = await modelWithTools.invoke(
"What's the weather like today in San Francisco? Use the 'get_current_weather' tool to respond."
);
expect(result.raw).toBeInstanceOf(this.invokeResponseType);
expect(result.parsed.location).toBeDefined();
expect(typeof result.parsed.location).toBe("string");
}
}
const testClass = new ChatOllamaStandardIntegrationTests();
test("ChatOllamaStandardIntegrationTests", async () => {
const testResults = await testClass.runTests();
expect(testResults).toBe(true);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama/src | lc_public_repos/langchainjs/libs/langchain-ollama/src/tests/llms.int.test.ts | import { test, expect } from "@jest/globals";
import * as fs from "node:fs/promises";
import { fileURLToPath } from "node:url";
import * as path from "node:path";
import { PromptTemplate } from "@langchain/core/prompts";
import {
BytesOutputParser,
StringOutputParser,
} from "@langchain/core/output_parsers";
import { Ollama } from "../llms.js";
test("test call", async () => {
const ollama = new Ollama({});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await ollama.invoke(
"What is a good name for a company that makes colorful socks?"
);
// console.log({ result });
});
test("test call with callback", async () => {
const ollama = new Ollama();
const tokens: string[] = [];
const result = await ollama.invoke(
"What is a good name for a company that makes colorful socks?",
{
callbacks: [
{
handleLLMNewToken(token) {
tokens.push(token);
},
},
],
}
);
expect(tokens.length).toBeGreaterThan(1);
expect(result).toEqual(tokens.join(""));
});
test("test streaming call", async () => {
const ollama = new Ollama();
const stream = await ollama.stream(
`Translate "I love programming" into German.`
);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
// console.log(chunks.join(""));
expect(chunks.length).toBeGreaterThan(1);
});
test("should abort the request", async () => {
const ollama = new Ollama();
const controller = new AbortController();
await expect(() => {
const ret = ollama.invoke("Respond with an extremely verbose response", {
signal: controller.signal,
});
controller.abort();
return ret;
}).rejects.toThrow("This operation was aborted");
});
test("should stream through with a bytes output parser", async () => {
const TEMPLATE = `You are a pirate named Patchy. All responses must be extremely verbose and in pirate dialect.
User: {input}
AI:`;
const prompt = PromptTemplate.fromTemplate(TEMPLATE);
const ollama = new Ollama({
model: "llama3",
});
const outputParser = new BytesOutputParser();
const chain = prompt.pipe(ollama).pipe(outputParser);
const stream = await chain.stream({
input: `Translate "I love programming" into German.`,
});
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
// console.log(chunks.join(""));
expect(chunks.length).toBeGreaterThan(1);
});
test("JSON mode", async () => {
const TEMPLATE = `You are a pirate named Patchy. All responses must be in pirate dialect and in JSON format, with a property named "response" followed by the value.
User: {input}
AI:`;
// Infer the input variables from the template
const prompt = PromptTemplate.fromTemplate(TEMPLATE);
const ollama = new Ollama({
model: "llama3",
format: "json",
});
const outputParser = new StringOutputParser();
const chain = prompt.pipe(ollama).pipe(outputParser);
const res = await chain.invoke({
input: `Translate "I love programming" into German.`,
});
// console.log(res);
expect(JSON.parse(res).response).toBeDefined();
});
test("Test Ollama with an image", async () => {
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const imageData = await fs.readFile(path.join(__dirname, "/data/hotdog.jpg"));
const model = new Ollama({
model: "llava",
}).bind({
images: [imageData.toString("base64")],
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await model.invoke("What's in this image?");
// console.log({ res });
});
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama/src | lc_public_repos/langchainjs/libs/langchain-ollama/src/tests/chat_models.standard.test.ts | /* eslint-disable no-process-env */
import { test, expect } from "@jest/globals";
import { ChatModelUnitTests } from "@langchain/standard-tests";
import { AIMessageChunk } from "@langchain/core/messages";
import { ChatOllama, ChatOllamaCallOptions } from "../chat_models.js";
class ChatOllamaStandardUnitTests extends ChatModelUnitTests<
ChatOllamaCallOptions,
AIMessageChunk
> {
constructor() {
super({
Cls: ChatOllama,
chatModelHasToolCalling: true,
chatModelHasStructuredOutput: true,
constructorArgs: {
model: "llama3-groq-tool-use",
},
});
}
testChatModelInitApiKey() {
this.skipTestMessage(
"testChatModelInitApiKey",
"ChatOllama",
"API key is not required for ChatOllama"
);
}
}
const testClass = new ChatOllamaStandardUnitTests();
test("ChatOllamaStandardUnitTests", () => {
const testResults = testClass.runTests();
expect(testResults).toBe(true);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama/src | lc_public_repos/langchainjs/libs/langchain-ollama/src/tests/chat_models.int.test.ts | import { test, expect } from "@jest/globals";
import * as fs from "node:fs/promises";
import { fileURLToPath } from "node:url";
import * as path from "node:path";
import { AIMessage, HumanMessage } from "@langchain/core/messages";
import { PromptTemplate } from "@langchain/core/prompts";
import {
BytesOutputParser,
StringOutputParser,
} from "@langchain/core/output_parsers";
import { ChatOllama } from "../chat_models.js";
test("test invoke", async () => {
const ollama = new ChatOllama({
maxRetries: 1,
});
const result = await ollama.invoke([
"human",
"What is a good name for a company that makes colorful socks?",
]);
expect(result).toBeDefined();
expect(typeof result.content).toBe("string");
expect(result.content.length).toBeGreaterThan(1);
});
test("test call with callback", async () => {
const ollama = new ChatOllama({
maxRetries: 1,
});
const tokens: string[] = [];
const result = await ollama.invoke(
"What is a good name for a company that makes colorful socks?",
{
callbacks: [
{
handleLLMNewToken(token: string) {
tokens.push(token);
},
},
],
}
);
expect(tokens.length).toBeGreaterThan(1);
expect(result.content).toEqual(tokens.join(""));
});
test("test streaming call", async () => {
const ollama = new ChatOllama({
maxRetries: 1,
});
const stream = await ollama.stream(
`Translate "I love programming" into German.`
);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(1);
});
test("should abort the request", async () => {
const ollama = new ChatOllama({
maxRetries: 1,
});
const controller = new AbortController();
await expect(() => {
const ret = ollama.invoke("Respond with an extremely verbose response", {
signal: controller.signal,
});
controller.abort();
return ret;
}).rejects.toThrow("This operation was aborted");
});
test("Test multiple messages", async () => {
const model = new ChatOllama({
maxRetries: 1,
});
const res = await model.invoke([
new HumanMessage({ content: "My name is Jonas" }),
]);
expect(res).toBeDefined();
expect(res.content).toBeDefined();
const res2 = await model.invoke([
new HumanMessage("My name is Jonas"),
new AIMessage(
"Hello Jonas! It's nice to meet you. Is there anything I can help you with?"
),
new HumanMessage("What did I say my name was?"),
]);
expect(res2).toBeDefined();
expect(res2.content).toBeDefined();
});
test("should stream through with a bytes output parser", async () => {
const TEMPLATE = `You are a pirate named Patchy. All responses must be extremely verbose and in pirate dialect.
User: {input}
AI:`;
// Infer the input variables from the template
const prompt = PromptTemplate.fromTemplate(TEMPLATE);
const ollama = new ChatOllama({
maxRetries: 1,
});
const outputParser = new BytesOutputParser();
const chain = prompt.pipe(ollama).pipe(outputParser);
const stream = await chain.stream({
input: `Translate "I love programming" into German.`,
});
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(1);
});
test("JSON mode", async () => {
const TEMPLATE = `You are a pirate named Patchy. All responses must be in pirate dialect and in JSON format, with a property named "response" followed by the value.
User: {input}
AI:`;
// Infer the input variables from the template
const prompt = PromptTemplate.fromTemplate(TEMPLATE);
const ollama = new ChatOllama({
model: "llama3",
format: "json",
maxRetries: 1,
});
const outputParser = new StringOutputParser();
const chain = prompt.pipe(ollama).pipe(outputParser);
const res = await chain.invoke({
input: `Translate "I love programming" into German.`,
});
expect(JSON.parse(res).response).toBeDefined();
});
test.skip("Test ChatOllama with an image", async () => {
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const imageData = await fs.readFile(path.join(__dirname, "/data/hotdog.jpg"));
const chat = new ChatOllama({
model: "llava",
maxRetries: 1,
});
const res = await chat.invoke([
new HumanMessage({
content: [
{
type: "text",
text: "What is in this image?",
},
{
type: "image_url",
image_url: `data:image/jpeg;base64,${imageData.toString("base64")}`,
},
],
}),
]);
expect(res).toBeDefined();
expect(res.content).toBeDefined();
});
test("test max tokens (numPredict)", async () => {
const ollama = new ChatOllama({
numPredict: 10,
maxRetries: 1,
}).pipe(new StringOutputParser());
const stream = await ollama.stream(
"explain quantum physics to me in as many words as possible"
);
let numTokens = 0;
let response = "";
for await (const s of stream) {
numTokens += 1;
response += s;
}
// Ollama doesn't always stream back the exact number of tokens, so we
// check for a number which is slightly above the `numPredict`.
expect(numTokens).toBeLessThanOrEqual(12);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama/src | lc_public_repos/langchainjs/libs/langchain-ollama/src/tests/embeddings.int.test.ts | import { test, expect } from "@jest/globals";
import { OllamaEmbeddings } from "../embeddings.js";
test("Test OllamaEmbeddings.embedQuery", async () => {
const embeddings = new OllamaEmbeddings();
const res = await embeddings.embedQuery("Hello world");
expect(res).toHaveLength(1024);
expect(typeof res[0]).toBe("number");
});
test("Test OllamaEmbeddings.embedDocuments", async () => {
const embeddings = new OllamaEmbeddings();
const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]);
expect(res).toHaveLength(2);
expect(res[0]).toHaveLength(1024);
expect(typeof res[0][0]).toBe("number");
expect(res[1]).toHaveLength(1024);
expect(typeof res[1][0]).toBe("number");
});
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama/src | lc_public_repos/langchainjs/libs/langchain-ollama/src/tests/chat_models-tools.int.test.ts | import {
HumanMessage,
AIMessage,
ToolMessage,
AIMessageChunk,
} from "@langchain/core/messages";
import { tool } from "@langchain/core/tools";
import { z } from "zod";
import { concat } from "@langchain/core/utils/stream";
import { ChatOllama } from "../chat_models.js";
const messageHistory = [
new HumanMessage("What's the weather like today in Paris?"),
new AIMessage({
content: "",
tool_calls: [
{
id: "89a1e453-0bce-4de3-a456-c54bed09c520",
name: "get_current_weather",
args: {
location: "Paris, France",
},
},
],
}),
new ToolMessage({
tool_call_id: "89a1e453-0bce-4de3-a456-c54bed09c520",
content: "22",
}),
new AIMessage("The weather in Paris is 22 degrees."),
new HumanMessage(
"What's the weather like today in San Francisco? Ensure you use the 'get_current_weather' tool."
),
];
const weatherTool = tool((_) => "Da weather is weatherin", {
name: "get_current_weather",
description: "Get the current weather in a given location",
schema: z.object({
location: z.string().describe("The city and state, e.g. San Francisco, CA"),
}),
});
test("Ollama can call tools", async () => {
const model = new ChatOllama({
model: "llama3-groq-tool-use",
maxRetries: 1,
}).bindTools([weatherTool]);
const result = await model.invoke(messageHistory);
expect(result).toBeDefined();
expect(result.tool_calls?.[0]).toBeDefined();
if (!result.tool_calls?.[0]) return;
expect(result.tool_calls[0].name).toBe("get_current_weather");
expect(result.tool_calls[0].id).toBeDefined();
expect(result.tool_calls[0].id).not.toBe("");
});
test("Ollama can stream tools", async () => {
const model = new ChatOllama({
model: "llama3-groq-tool-use",
maxRetries: 1,
}).bindTools([weatherTool]);
let finalChunk: AIMessageChunk | undefined;
for await (const chunk of await model.stream(messageHistory)) {
finalChunk = !finalChunk ? chunk : concat(finalChunk, chunk);
}
expect(finalChunk).toBeDefined();
if (!finalChunk) return;
expect(finalChunk.tool_calls?.[0]).toBeDefined();
if (!finalChunk.tool_calls?.[0]) return;
expect(finalChunk.tool_calls[0].name).toBe("get_current_weather");
expect(finalChunk.tool_calls[0].id).toBeDefined();
expect(finalChunk.tool_calls[0].id).not.toBe("");
});
test("Ollama can call withStructuredOutput", async () => {
const model = new ChatOllama({
model: "llama3-groq-tool-use",
maxRetries: 1,
}).withStructuredOutput(weatherTool.schema, {
name: weatherTool.name,
});
const result = await model.invoke(messageHistory);
expect(result).toBeDefined();
expect(result.location).toBeDefined();
expect(result.location).not.toBe("");
});
test("Ollama can call withStructuredOutput includeRaw", async () => {
const model = new ChatOllama({
model: "llama3-groq-tool-use",
maxRetries: 1,
}).withStructuredOutput(weatherTool.schema, {
name: weatherTool.name,
includeRaw: true,
});
const result = await model.invoke(messageHistory);
expect(result).toBeDefined();
expect(result.parsed.location).toBeDefined();
expect(result.parsed.location).not.toBe("");
expect((result.raw as AIMessage).tool_calls?.[0]).toBeDefined();
expect((result.raw as AIMessage).tool_calls?.[0].id).toBeDefined();
expect((result.raw as AIMessage).tool_calls?.[0].id).not.toBe("");
});
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama/src | lc_public_repos/langchainjs/libs/langchain-ollama/src/tests/embeddings.test.ts | import { test, expect } from "@jest/globals";
import { OllamaEmbeddings } from "../embeddings.js";
test("Test OllamaEmbeddings allows passthrough of request options", async () => {
const embeddings = new OllamaEmbeddings({
requestOptions: {
num_ctx: 1234,
numPredict: 4321,
},
});
expect(embeddings.requestOptions).toEqual({
num_ctx: 1234,
num_predict: 4321,
});
});
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama/src | lc_public_repos/langchainjs/libs/langchain-ollama/src/tests/chat_models.test.ts | import { test } from "@jest/globals";
test("Test chat model", async () => {
// Your test here
});
|
0 | lc_public_repos/langchainjs/libs/langchain-ollama | lc_public_repos/langchainjs/libs/langchain-ollama/scripts/jest-setup-after-env.js | import { awaitAllCallbacks } from "@langchain/core/callbacks/promises";
afterAll(awaitAllCallbacks);
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/tsconfig.json | {
"extends": "@tsconfig/recommended",
"compilerOptions": {
"outDir": "../dist",
"rootDir": "./src",
"target": "ES2021",
"lib": ["ES2021", "ES2022.Object", "DOM"],
"module": "ES2020",
"moduleResolution": "nodenext",
"esModuleInterop": true,
"declaration": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"useDefineForClassFields": true,
"strictPropertyInitialization": false,
"allowJs": true,
"strict": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "docs"]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/LICENSE | The MIT License
Copyright (c) 2023 LangChain
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. |
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/jest.config.cjs | /** @type {import('ts-jest').JestConfigWithTsJest} */
module.exports = {
preset: "ts-jest/presets/default-esm",
testEnvironment: "./jest.env.cjs",
modulePathIgnorePatterns: ["dist/", "docs/"],
moduleNameMapper: {
"^(\\.{1,2}/.*)\\.js$": "$1",
},
transform: {
"^.+\\.tsx?$": ["@swc/jest"],
},
transformIgnorePatterns: [
"/node_modules/",
"\\.pnp\\.[^\\/]+$",
"./scripts/jest-setup-after-env.js",
],
setupFiles: ["dotenv/config"],
testTimeout: 20_000,
passWithNoTests: true,
collectCoverageFrom: ["src/**/*.ts"],
};
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/jest.env.cjs | const { TestEnvironment } = require("jest-environment-node");
class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment {
constructor(config, context) {
// Make `instanceof Float32Array` return true in tests
// to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549
super(config, context);
this.global.Float32Array = Float32Array;
}
}
module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/README.md | # @langchain/azure-cosmosdb
This package contains the [Azure CosmosDB](https://learn.microsoft.com/azure/cosmos-db/) vector store integrations.
Learn more about how to use this package in the LangChain documentation:
- [Azure CosmosDB for NoSQL](https://js.langchain.com/docs/integrations/vector_stores/azure_cosmosdb_nosql)
- [Azure CosmosDB for MongoDB vCore](https://js.langchain.com/docs/integrations/vector_stores/azure_cosmosdb_mongodb)
## Installation
```bash npm2yarn
npm install @langchain/azure-cosmosdb @langchain/core
```
This package, along with the main LangChain package, depends on [`@langchain/core`](https://npmjs.com/package/@langchain/core/).
If you are using this package with other LangChain packages, you should make sure that all of the packages depend on the same instance of @langchain/core.
You can do so by adding appropriate fields to your project's `package.json` like this:
```json
{
"name": "your-project",
"version": "0.0.0",
"dependencies": {
"@langchain/core": "^0.3.0",
"@langchain/azure-cosmosdb": "^0.2.5"
},
"resolutions": {
"@langchain/core": "0.3.0"
},
"overrides": {
"@langchain/core": "0.3.0"
},
"pnpm": {
"overrides": {
"@langchain/core": "0.3.0"
}
}
}
```
The field you need depends on the package manager you're using, but we recommend adding a field for the common `yarn`, `npm`, and `pnpm` to maximize compatibility.
## Usage
```typescript
import { AzureCosmosDBNoSQLVectorStore } from "@langchain/azure-cosmosdb";
const store = await AzureCosmosDBNoSQLVectorStore.fromDocuments(
["Hello, World!"],
new OpenAIEmbeddings(),
{
databaseName: "langchain",
containerName: "documents",
}
);
const resultDocuments = await store.similaritySearch("hello");
console.log(resultDocuments[0].pageContent);
```
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/.release-it.json | {
"github": {
"release": true,
"autoGenerate": true,
"tokenRef": "GITHUB_TOKEN_RELEASE"
},
"npm": {
"versionArgs": ["--workspaces-update=false"]
}
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/.eslintrc.cjs | module.exports = {
extends: [
"airbnb-base",
"eslint:recommended",
"prettier",
"plugin:@typescript-eslint/recommended",
],
parserOptions: {
ecmaVersion: 12,
parser: "@typescript-eslint/parser",
project: "./tsconfig.json",
sourceType: "module",
},
plugins: ["@typescript-eslint", "no-instanceof"],
ignorePatterns: [
".eslintrc.cjs",
"scripts",
"node_modules",
"dist",
"dist-cjs",
"*.js",
"*.cjs",
"*.d.ts",
],
rules: {
"no-process-env": 2,
"no-instanceof/no-instanceof": 2,
"@typescript-eslint/explicit-module-boundary-types": 0,
"@typescript-eslint/no-empty-function": 0,
"@typescript-eslint/no-shadow": 0,
"@typescript-eslint/no-empty-interface": 0,
"@typescript-eslint/no-use-before-define": ["error", "nofunc"],
"@typescript-eslint/no-unused-vars": ["warn", { args: "none" }],
"@typescript-eslint/no-floating-promises": "error",
"@typescript-eslint/no-misused-promises": "error",
camelcase: 0,
"class-methods-use-this": 0,
"import/extensions": [2, "ignorePackages"],
"import/no-extraneous-dependencies": [
"error",
{ devDependencies: ["**/*.test.ts"] },
],
"import/no-unresolved": 0,
"import/prefer-default-export": 0,
"keyword-spacing": "error",
"max-classes-per-file": 0,
"max-len": 0,
"no-await-in-loop": 0,
"no-bitwise": 0,
"no-console": 0,
"no-restricted-syntax": 0,
"no-shadow": 0,
"no-continue": 0,
"no-void": 0,
"no-underscore-dangle": 0,
"no-use-before-define": 0,
"no-useless-constructor": 0,
"no-return-await": 0,
"consistent-return": 0,
"no-else-return": 0,
"func-names": 0,
"no-lonely-if": 0,
"prefer-rest-params": 0,
"new-cap": ["error", { properties: false, capIsNew: false }],
},
};
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/langchain.config.js | import { resolve, dirname } from "node:path";
import { fileURLToPath } from "node:url";
/**
* @param {string} relativePath
* @returns {string}
*/
function abs(relativePath) {
return resolve(dirname(fileURLToPath(import.meta.url)), relativePath);
}
export const config = {
internals: [/node\:/, /@langchain\/core\//],
entrypoints: {
index: "index",
},
requiresOptionalDependency: [],
tsConfigPath: resolve("./tsconfig.json"),
cjsSource: "./dist-cjs",
cjsDestination: "./dist",
abs,
};
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/.env.example | # Azure CosmosDB for NoSQL connection string
AZURE_COSMOSDB_NOSQL_CONNECTION_STRING=
# Azure CosmosDB for NoSQL endpoint (if you're using managed identity)
AZURE_COSMOSDB_NOSQL_ENDPOINT=
# Azure CosmosDB for MongoDB vCore connection string
AZURE_COSMOSDB_MONGODB_CONNECTION_STRING=
# If you're using Azure OpenAI API, you'll need to set these variables
AZURE_OPENAI_API_KEY=
AZURE_OPENAI_API_INSTANCE_NAME=
AZURE_OPENAI_API_DEPLOYMENT_NAME=
AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME=
AZURE_OPENAI_API_VERSION=
# Or you can use the OpenAI API directly
OPENAI_API_KEY=
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/package.json | {
"name": "@langchain/azure-cosmosdb",
"version": "0.2.4",
"description": "Azure CosmosDB integration for LangChain.js",
"type": "module",
"engines": {
"node": ">=18"
},
"main": "./index.js",
"types": "./index.d.ts",
"repository": {
"type": "git",
"url": "git@github.com:langchain-ai/langchainjs.git"
},
"homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-azure-cosmosdb/",
"scripts": {
"build": "yarn turbo:command build:internal --filter=@langchain/azure-cosmosdb",
"build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking",
"lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/",
"lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
"lint": "yarn lint:eslint && yarn lint:dpdm",
"lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm",
"clean": "rm -rf dist/ .turbo",
"prepack": "yarn build",
"test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
"test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
"test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000",
"test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
"format": "prettier --config .prettierrc --write \"src\"",
"format:check": "prettier --config .prettierrc --check \"src\""
},
"author": "LangChain",
"license": "MIT",
"dependencies": {
"@azure/cosmos": "^4.2.0",
"@azure/identity": "^4.5.0",
"mongodb": "^6.10.0"
},
"peerDependencies": {
"@langchain/core": ">=0.2.21 <0.4.0"
},
"devDependencies": {
"@jest/globals": "^29.5.0",
"@langchain/core": "workspace:*",
"@langchain/openai": "workspace:^",
"@langchain/scripts": ">=0.1.0 <0.2.0",
"@swc/core": "^1.3.90",
"@swc/jest": "^0.2.29",
"@tsconfig/recommended": "^1.0.3",
"@typescript-eslint/eslint-plugin": "^6.12.0",
"@typescript-eslint/parser": "^6.12.0",
"dotenv": "^16.4.5",
"dpdm": "^3.12.0",
"eslint": "^8.33.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-prettier": "^8.6.0",
"eslint-plugin-import": "^2.27.5",
"eslint-plugin-no-instanceof": "^1.0.1",
"eslint-plugin-prettier": "^4.2.1",
"jest": "^29.5.0",
"jest-environment-node": "^29.6.4",
"prettier": "^2.8.3",
"release-it": "^15.10.1",
"rollup": "^4.5.2",
"ts-jest": "^29.1.0",
"typescript": "<5.2.0"
},
"publishConfig": {
"access": "public"
},
"exports": {
".": {
"types": {
"import": "./index.d.ts",
"require": "./index.d.cts",
"default": "./index.d.ts"
},
"import": "./index.js",
"require": "./index.cjs"
},
"./package.json": "./package.json"
},
"files": [
"dist/",
"index.cjs",
"index.js",
"index.d.ts",
"index.d.cts"
]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/tsconfig.cjs.json | {
"extends": "./tsconfig.json",
"compilerOptions": {
"module": "commonjs",
"declaration": false
},
"exclude": ["node_modules", "dist", "docs", "**/tests"]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/.prettierrc | {
"$schema": "https://json.schemastore.org/prettierrc",
"printWidth": 80,
"tabWidth": 2,
"useTabs": false,
"semi": true,
"singleQuote": false,
"quoteProps": "as-needed",
"jsxSingleQuote": false,
"trailingComma": "es5",
"bracketSpacing": true,
"arrowParens": "always",
"requirePragma": false,
"insertPragma": false,
"proseWrap": "preserve",
"htmlWhitespaceSensitivity": "css",
"vueIndentScriptAndStyle": false,
"endOfLine": "lf"
}
|
0 | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src/chat_histories.ts | import {
Container,
CosmosClient,
CosmosClientOptions,
ErrorResponse,
} from "@azure/cosmos";
import { DefaultAzureCredential, TokenCredential } from "@azure/identity";
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
BaseMessage,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "@langchain/core/messages";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
const USER_AGENT_SUFFIX = "langchainjs-cdbnosql-chathistory-javascript";
const DEFAULT_DATABASE_NAME = "chatHistoryDB";
const DEFAULT_CONTAINER_NAME = "chatHistoryContainer";
/**
* Lightweight type for listing chat sessions.
*/
export type ChatSession = {
id: string;
context: Record<string, unknown>;
};
/**
* Type for the input to the `AzureCosmosDBNoSQLChatMessageHistory` constructor.
*/
export interface AzureCosmosDBNoSQLChatMessageHistoryInput {
sessionId: string;
userId?: string;
client?: CosmosClient;
connectionString?: string;
endpoint?: string;
databaseName?: string;
containerName?: string;
credentials?: TokenCredential;
ttl?: number;
}
/**
* Class for storing chat message history with Cosmos DB NoSQL. It extends the
* BaseListChatMessageHistory class and provides methods to get, add, and
* clear messages.
*
* @example
* ```typescript
* const model = new ChatOpenAI({
* model: "gpt-3.5-turbo",
* temperature: 0,
* });
* const prompt = ChatPromptTemplate.fromMessages([
* [
* "system",
* "You are a helpful assistant. Answer all questions to the best of your ability.",
* ],
* new MessagesPlaceholder("chat_history"),
* ["human", "{input}"],
* ]);
*
* const chain = prompt.pipe(model).pipe(new StringOutputParser());
* const chainWithHistory = new RunnableWithMessageHistory({
* runnable: chain,
* inputMessagesKey: "input",
* historyMessagesKey: "chat_history",
* getMessageHistory: async (sessionId) => {
* const chatHistory = new AzureCosmsosDBNoSQLChatMessageHistory({
* sessionId: sessionId,
* userId: "user-id",
* databaseName: "DATABASE_NAME",
* containerName: "CONTAINER_NAME",
* })
* return chatHistory;
* },
* });
* await chainWithHistory.invoke(
* { input: "What did I just say my name was?" },
* { configurable: { sessionId: "session-id" } }
* );
* ```
*/
export class AzureCosmsosDBNoSQLChatMessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "azurecosmosdb"];
private container: Container;
private sessionId: string;
private databaseName: string;
private containerName: string;
private client: CosmosClient;
private userId: string;
private ttl: number | undefined;
private messageList: BaseMessage[] = [];
private initPromise?: Promise<void>;
private context: Record<string, unknown> = {};
constructor(chatHistoryInput: AzureCosmosDBNoSQLChatMessageHistoryInput) {
super();
this.sessionId = chatHistoryInput.sessionId;
this.databaseName = chatHistoryInput.databaseName ?? DEFAULT_DATABASE_NAME;
this.containerName =
chatHistoryInput.containerName ?? DEFAULT_CONTAINER_NAME;
this.userId = chatHistoryInput.userId ?? "anonymous";
this.ttl = chatHistoryInput.ttl;
this.client = this.initializeClient(chatHistoryInput);
}
private initializeClient(
input: AzureCosmosDBNoSQLChatMessageHistoryInput
): CosmosClient {
const connectionString =
input.connectionString ??
getEnvironmentVariable("AZURE_COSMOSDB_NOSQL_CONNECTION_STRING");
const endpoint =
input.endpoint ?? getEnvironmentVariable("AZURE_COSMOSDB_NOSQL_ENDPOINT");
if (!input.client && !connectionString && !endpoint) {
throw new Error(
"CosmosClient, connection string, or endpoint must be provided."
);
}
if (input.client) {
return input.client;
}
if (connectionString) {
const [endpointPart, keyPart] = connectionString.split(";");
const endpoint = endpointPart.split("=")[1];
const key = keyPart.split("=")[1];
return new CosmosClient({
endpoint,
key,
userAgentSuffix: USER_AGENT_SUFFIX,
});
} else {
return new CosmosClient({
endpoint,
aadCredentials: input.credentials ?? new DefaultAzureCredential(),
userAgentSuffix: USER_AGENT_SUFFIX,
} as CosmosClientOptions);
}
}
private async initializeContainer(): Promise<void> {
if (!this.initPromise) {
this.initPromise = (async () => {
const { database } = await this.client.databases.createIfNotExists({
id: this.databaseName,
});
const { container } = await database.containers.createIfNotExists({
id: this.containerName,
partitionKey: "/userId",
defaultTtl: this.ttl,
});
this.container = container;
})().catch((error) => {
console.error("Error initializing Cosmos DB container:", error);
throw error;
});
}
return this.initPromise;
}
async getMessages(): Promise<BaseMessage[]> {
await this.initializeContainer();
const document = await this.container
.item(this.sessionId, this.userId)
.read();
const messages = document.resource?.messages || [];
this.messageList = mapStoredMessagesToChatMessages(messages);
return this.messageList;
}
async addMessage(message: BaseMessage): Promise<void> {
await this.initializeContainer();
this.messageList = await this.getMessages();
this.messageList.push(message);
const messages = mapChatMessagesToStoredMessages(this.messageList);
const context = await this.getContext();
await this.container.items.upsert({
id: this.sessionId,
userId: this.userId,
context,
messages,
});
}
async clear(): Promise<void> {
this.messageList = [];
await this.initializeContainer();
await this.container.item(this.sessionId, this.userId).delete();
}
async clearAllSessions() {
await this.initializeContainer();
const query = {
query: "SELECT c.id FROM c WHERE c.userId = @userId",
parameters: [{ name: "@userId", value: this.userId }],
};
const { resources: userSessions } = await this.container.items
.query(query)
.fetchAll();
for (const userSession of userSessions) {
await this.container.item(userSession.id, this.userId).delete();
}
}
async getAllSessions(): Promise<ChatSession[]> {
await this.initializeContainer();
const query = {
query: "SELECT c.id, c.context FROM c WHERE c.userId = @userId",
parameters: [{ name: "@userId", value: this.userId }],
};
const { resources: userSessions } = await this.container.items
.query(query)
.fetchAll();
return userSessions ?? [];
}
async getContext(): Promise<Record<string, unknown>> {
const document = await this.container
.item(this.sessionId, this.userId)
.read();
this.context = document.resource?.context || this.context;
return this.context;
}
async setContext(context: Record<string, unknown>): Promise<void> {
await this.initializeContainer();
this.context = context || {};
try {
await this.container
.item(this.sessionId, this.userId)
.patch([{ op: "replace", path: "/context", value: this.context }]);
} catch (_error: unknown) {
const error = _error as ErrorResponse;
// If document does not exist yet, context will be set when adding the first message
if (error?.code !== 404) {
throw error;
}
}
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src/azure_cosmosdb_mongodb.ts | import {
ObjectId,
Collection,
Document as MongoDBDocument,
MongoClient,
Db,
Filter,
} from "mongodb";
import type { EmbeddingsInterface } from "@langchain/core/embeddings";
import {
MaxMarginalRelevanceSearchOptions,
VectorStore,
} from "@langchain/core/vectorstores";
import { Document, DocumentInterface } from "@langchain/core/documents";
import { maximalMarginalRelevance } from "@langchain/core/utils/math";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
/** Azure Cosmos DB for MongoDB vCore Similarity type. */
export const AzureCosmosDBMongoDBSimilarityType = {
/** Cosine similarity */
COS: "COS",
/** Inner - product */
IP: "IP",
/** Euclidian distance */
L2: "L2",
} as const;
/** Azure Cosmos DB for MongoDB vCore Similarity type. */
export type AzureCosmosDBMongoDBSimilarityType =
(typeof AzureCosmosDBMongoDBSimilarityType)[keyof typeof AzureCosmosDBMongoDBSimilarityType];
/** Azure Cosmos DB for MongoDB vCore Index Options. */
export type AzureCosmosDBMongoDBIndexOptions = {
/** Skips automatic index creation. */
readonly skipCreate?: boolean;
readonly indexType?: "ivf" | "hnsw" | "diskann";
/** Number of clusters that the inverted file (IVF) index uses to group the vector data. */
readonly numLists?: number;
/** Number of dimensions for vector similarity. */
readonly dimensions?: number;
/** Similarity metric to use with the IVF index. */
readonly similarity?: AzureCosmosDBMongoDBSimilarityType;
/** The max number of connections per layer with the HNSW index. */
readonly m?: number;
/** The size of the dynamic candidate list for constructing the graph with the HNSW index. */
readonly efConstruction?: number;
/** Max number of neighbors withe the Diskann idnex */
readonly maxDegree?: number;
/** L value for index building withe the Diskann idnex */
readonly lBuild?: number;
/** L value for index searching withe the Diskann idnex */
readonly lSearch?: number;
};
/** Azure Cosmos DB for MongoDB vCore delete Parameters. */
export type AzureCosmosDBMongoDBDeleteParams = {
/** List of IDs for the documents to be removed. */
readonly ids?: string | string[];
/** MongoDB filter object or list of IDs for the documents to be removed. */
readonly filter?: Filter<MongoDBDocument>;
};
/** Configuration options for the `AzureCosmosDBMongoDBVectorStore` constructor. */
export interface AzureCosmosDBMongoDBConfig {
readonly client?: MongoClient;
readonly connectionString?: string;
readonly databaseName?: string;
readonly collectionName?: string;
readonly indexName?: string;
readonly textKey?: string;
readonly embeddingKey?: string;
readonly indexOptions?: AzureCosmosDBMongoDBIndexOptions;
}
/**
* Azure Cosmos DB for MongoDB vCore vector store.
* To use this, you should have both:
* - the `mongodb` NPM package installed
* - a connection string associated with a MongoDB VCore Cluster
*
* You do not need to create a database or collection, it will be created
* automatically.
*
* You also need an index on the collection, which is by default be created
* automatically using the `createIndex` method.
*/
export class AzureCosmosDBMongoDBVectorStore extends VectorStore {
get lc_secrets(): { [key: string]: string } {
return {
connectionString: "AZURE_COSMOSDB_MONGODB_CONNECTION_STRING",
};
}
private connectPromise: Promise<void>;
private initPromise?: Promise<void>;
private readonly client: MongoClient | undefined;
private database: Db;
private collection: Collection<MongoDBDocument>;
readonly indexName: string;
readonly textKey: string;
readonly embeddingKey: string;
private readonly indexOptions: AzureCosmosDBMongoDBIndexOptions;
/**
* Initializes the AzureCosmosDBMongoDBVectorStore.
* Connect the client to the database and create the container, creating them if needed.
* @returns A promise that resolves when the AzureCosmosDBMongoDBVectorStore has been initialized.
*/
initialize: () => Promise<void>;
_vectorstoreType(): string {
return "azure_cosmosdb_mongodb";
}
constructor(
embeddings: EmbeddingsInterface,
dbConfig: AzureCosmosDBMongoDBConfig
) {
super(embeddings, dbConfig);
const connectionString =
dbConfig.connectionString ??
getEnvironmentVariable("AZURE_COSMOSDB_MONGODB_CONNECTION_STRING");
if (!dbConfig.client && !connectionString) {
throw new Error(
"AzureCosmosDBMongoDBVectorStore client or connection string must be set."
);
}
if (!dbConfig.client) {
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
this.client = new MongoClient(connectionString!, {
appName: "langchainjs",
});
}
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
const client = dbConfig.client || this.client!;
const databaseName = dbConfig.databaseName ?? "documentsDB";
const collectionName = dbConfig.collectionName ?? "documents";
this.indexName = dbConfig.indexName ?? "vectorSearchIndex";
this.textKey = dbConfig.textKey ?? "textContent";
this.embeddingKey = dbConfig.embeddingKey ?? "vectorContent";
this.indexOptions = dbConfig.indexOptions ?? {};
// Deferring initialization to the first call to `initialize`
this.initialize = () => {
if (this.initPromise === undefined) {
this.initPromise = this.init(
client,
databaseName,
collectionName
).catch((error) => {
console.error(
"Error during AzureCosmosDBMongoDBVectorStore initialization:",
error
);
});
}
return this.initPromise;
};
}
/**
* Checks if the specified index name during instance construction exists
* on the collection.
* @returns A promise that resolves to a boolean indicating if the index exists.
*/
async checkIndexExists(): Promise<boolean> {
await this.initialize();
const indexes = await this.collection.listIndexes().toArray();
return indexes.some((index) => index.name === this.indexName);
}
/**
* Deletes the index specified during instance construction if it exists.
* @returns A promise that resolves when the index has been deleted.
*/
async deleteIndex(): Promise<void> {
await this.initialize();
if (await this.checkIndexExists()) {
await this.collection.dropIndex(this.indexName);
}
}
/**
* Creates an index on the collection with the specified index name during
* instance construction.
*
* Setting the numLists parameter correctly is important for achieving good
* accuracy and performance.
* Since the vector store uses IVF as the indexing strategy, you should
* create the index only after you have loaded a large enough sample
* documents to ensure that the centroids for the respective buckets are
* faily distributed.
*
* We recommend that numLists is set to documentCount/1000 for up to
* 1 million documents and to sqrt(documentCount) for more than 1 million
* documents.
* As the number of items in your database grows, you should tune numLists
* to be larger in order to achieve good latency performance for vector
* search.
*
* If you're experimenting with a new scenario or creating a small demo,
* you can start with numLists set to 1 to perform a brute-force search
* across all vectors.
* This should provide you with the most accurate results from the vector
* search, however be aware that the search speed and latency will be slow.
* After your initial setup, you should go ahead and tune the numLists
* parameter using the above guidance.
* @param numLists This integer is the number of clusters that the inverted
* file (IVF) index uses to group the vector data.
* We recommend that numLists is set to documentCount/1000 for up to
* 1 million documents and to sqrt(documentCount) for more than 1 million
* documents.
* Using a numLists value of 1 is akin to performing brute-force search,
* which has limited performance
* @param indexType Index Type for Mongo vCore index.
* @param dimensions Number of dimensions for vector similarity.
* The maximum number of supported dimensions is 2000.
* If no number is provided, it will be determined automatically by
* embedding a short text.
* @param similarity Similarity metric to use with the IVF index.
* Possible options are:
* - CosmosDBSimilarityType.COS (cosine distance)
* - CosmosDBSimilarityType.L2 (Euclidean distance)
* - CosmosDBSimilarityType.IP (inner product)
* @returns A promise that resolves when the index has been created.
*/
async createIndex(
dimensions: number | undefined = undefined,
indexType: "ivf" | "hnsw" | "diskann" = "ivf",
similarity: AzureCosmosDBMongoDBSimilarityType = AzureCosmosDBMongoDBSimilarityType.COS
): Promise<void> {
await this.connectPromise;
let vectorLength = dimensions;
if (vectorLength === undefined) {
const queryEmbedding = await this.embeddings.embedQuery("test");
vectorLength = queryEmbedding.length;
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const cosmosSearchOptions: any = {
kind: "",
similarity,
dimensions: vectorLength,
};
if (indexType === "hnsw") {
cosmosSearchOptions.kind = "vector-hnsw";
cosmosSearchOptions.m = this.indexOptions.m ?? 16;
cosmosSearchOptions.efConstruction =
this.indexOptions.efConstruction ?? 200;
} else if (indexType === "diskann") {
cosmosSearchOptions.kind = "vector-diskann";
cosmosSearchOptions.maxDegree = this.indexOptions.maxDegree ?? 40;
cosmosSearchOptions.lBuild = this.indexOptions.lBuild ?? 50;
cosmosSearchOptions.lSearch = this.indexOptions.lSearch ?? 40;
/** Default to IVF index */
} else {
cosmosSearchOptions.kind = "vector-ivf";
cosmosSearchOptions.numLists = this.indexOptions.numLists ?? 100;
}
const createIndexCommands = {
createIndexes: this.collection.collectionName,
indexes: [
{
name: this.indexName,
key: { [this.embeddingKey]: "cosmosSearch" },
cosmosSearchOptions,
},
],
};
await this.database.command(createIndexCommands);
}
/**
* Removes specified documents from the AzureCosmosDBMongoDBVectorStore.
* If no IDs or filter are specified, all documents will be removed.
* @param params Parameters for the delete operation.
* @returns A promise that resolves when the documents have been removed.
*/
async delete(
params: AzureCosmosDBMongoDBDeleteParams | string[] = {}
): Promise<void> {
await this.initialize();
let ids: string | string[] | undefined;
let filter: AzureCosmosDBMongoDBDeleteParams["filter"];
if (Array.isArray(params)) {
ids = params;
} else {
ids = params.ids;
filter = params.filter;
}
const idsArray = Array.isArray(ids) ? ids : [ids];
const deleteIds = ids && idsArray.length > 0 ? idsArray : undefined;
let deleteFilter = filter ?? {};
if (deleteIds) {
const objectIds = deleteIds.map((id) => new ObjectId(id));
deleteFilter = { _id: { $in: objectIds }, ...deleteFilter };
}
await this.collection.deleteMany(deleteFilter);
}
/**
* Closes any newly instanciated Azure Cosmos DB client.
* If the client was passed in the constructor, it will not be closed.
* @returns A promise that resolves when any newly instanciated Azure
* Cosmos DB client been closed.
*/
async close(): Promise<void> {
if (this.client) {
await this.client.close();
}
}
/**
* Method for adding vectors to the AzureCosmosDBMongoDBVectorStore.
* @param vectors Vectors to be added.
* @param documents Corresponding documents to be added.
* @returns A promise that resolves to the added documents IDs.
*/
async addVectors(
vectors: number[][],
documents: DocumentInterface[]
): Promise<string[]> {
const docs = vectors.map((embedding, idx) => ({
[this.textKey]: documents[idx].pageContent,
[this.embeddingKey]: embedding,
...documents[idx].metadata,
}));
await this.initialize();
const result = await this.collection.insertMany(docs);
return Object.values(result.insertedIds).map((id) => String(id));
}
/**
* Method for adding documents to the AzureCosmosDBMongoDBVectorStore. It first converts
* the documents to texts and then adds them as vectors.
* @param documents The documents to add.
* @returns A promise that resolves to the added documents IDs.
*/
async addDocuments(documents: DocumentInterface[]): Promise<string[]> {
const texts = documents.map(({ pageContent }) => pageContent);
return this.addVectors(
await this.embeddings.embedDocuments(texts),
documents
);
}
/**
* Method that performs a similarity search on the vectors stored in the
* collection. It returns a list of documents and their corresponding
* similarity scores.
* @param queryVector Query vector for the similarity search.
* @param k=4 Number of nearest neighbors to return.
* @returns Promise that resolves to a list of documents and their corresponding similarity scores.
*/
async similaritySearchVectorWithScore(
queryVector: number[],
k: number,
indexType?: "ivf" | "hnsw" | "diskann"
): Promise<[Document, number][]> {
await this.initialize();
const pipeline = [
{
$search: {
cosmosSearch: {
vector: queryVector,
path: this.embeddingKey,
k: k ?? 4,
...(indexType === "diskann"
? { lSearch: this.indexOptions.lSearch ?? 40 }
: {}),
},
returnStoredSource: true,
},
},
{
$project: {
similarityScore: { $meta: "searchScore" },
document: "$$ROOT",
},
},
];
const results = await this.collection
.aggregate(pipeline)
.map<[Document, number]>((result) => {
const { similarityScore: score, document } = result;
const text = document[this.textKey];
return [new Document({ pageContent: text, metadata: document }), score];
});
return results.toArray();
}
/**
* Return documents selected using the maximal marginal relevance.
* Maximal marginal relevance optimizes for similarity to the query AND
* diversity among selected documents.
* @param query Text to look up documents similar to.
* @param options.k Number of documents to return.
* @param options.fetchK=20 Number of documents to fetch before passing to
* the MMR algorithm.
* @param options.lambda=0.5 Number between 0 and 1 that determines the
* degree of diversity among the results, where 0 corresponds to maximum
* diversity and 1 to minimum diversity.
* @returns List of documents selected by maximal marginal relevance.
*/
async maxMarginalRelevanceSearch(
query: string,
options: MaxMarginalRelevanceSearchOptions<this["FilterType"]>
): Promise<Document[]>;
async maxMarginalRelevanceSearch(
query: string,
options: MaxMarginalRelevanceSearchOptions<this["FilterType"]>,
indexType: "ivf" | "hnsw" | "diskann"
): Promise<Document[]>;
async maxMarginalRelevanceSearch(
query: string,
options: MaxMarginalRelevanceSearchOptions<this["FilterType"]>,
indexType?: "ivf" | "hnsw" | "diskann"
): Promise<Document[]> {
const { k, fetchK = 20, lambda = 0.5 } = options;
const queryEmbedding = await this.embeddings.embedQuery(query);
const docs = await this.similaritySearchVectorWithScore(
queryEmbedding,
fetchK,
indexType
);
const embeddingList = docs.map((doc) => doc[0].metadata[this.embeddingKey]);
// Re-rank the results using MMR
const mmrIndexes = maximalMarginalRelevance(
queryEmbedding,
embeddingList,
lambda,
k
);
const mmrDocs = mmrIndexes.map((index) => docs[index][0]);
return mmrDocs;
}
/**
* Initializes the AzureCosmosDBMongoDBVectorStore by connecting to the database.
* @param client The MongoClient to use for connecting to the database.
* @param databaseName The name of the database to use.
* @param collectionName The name of the collection to use.
* @returns A promise that resolves when the AzureCosmosDBMongoDBVectorStore has been initialized.
*/
private async init(
client: MongoClient,
databaseName: string,
collectionName: string
): Promise<void> {
this.connectPromise = (async () => {
await client.connect();
this.database = client.db(databaseName);
this.collection = this.database.collection(collectionName);
})();
// Unless skipCreate is set, create the index
// This operation is no-op if the index already exists
if (!this.indexOptions.skipCreate) {
const indexType = this.indexOptions.indexType || "ivf";
await this.createIndex(
this.indexOptions.dimensions,
indexType,
this.indexOptions.similarity
);
}
}
/**
* Static method to create an instance of AzureCosmosDBMongoDBVectorStore from a
* list of texts. It first converts the texts to vectors and then adds
* them to the collection.
* @param texts List of texts to be converted to vectors.
* @param metadatas Metadata for the texts.
* @param embeddings Embeddings to be used for conversion.
* @param dbConfig Database configuration for Azure Cosmos DB for MongoDB vCore.
* @returns Promise that resolves to a new instance of AzureCosmosDBMongoDBVectorStore.
*/
static async fromTexts(
texts: string[],
metadatas: object[] | object,
embeddings: EmbeddingsInterface,
dbConfig: AzureCosmosDBMongoDBConfig
): Promise<AzureCosmosDBMongoDBVectorStore> {
const docs: Document[] = [];
for (let i = 0; i < texts.length; i += 1) {
const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas;
const newDoc = new Document({
pageContent: texts[i],
metadata,
});
docs.push(newDoc);
}
return AzureCosmosDBMongoDBVectorStore.fromDocuments(
docs,
embeddings,
dbConfig
);
}
/**
* Static method to create an instance of AzureCosmosDBMongoDBVectorStore from a
* list of documents. It first converts the documents to vectors and then
* adds them to the collection.
* @param docs List of documents to be converted to vectors.
* @param embeddings Embeddings to be used for conversion.
* @param dbConfig Database configuration for Azure Cosmos DB for MongoDB vCore.
* @returns Promise that resolves to a new instance of AzureCosmosDBMongoDBVectorStore.
*/
static async fromDocuments(
docs: Document[],
embeddings: EmbeddingsInterface,
dbConfig: AzureCosmosDBMongoDBConfig
): Promise<AzureCosmosDBMongoDBVectorStore> {
const instance = new this(embeddings, dbConfig);
await instance.addDocuments(docs);
return instance;
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src/caches.ts | import {
BaseCache,
deserializeStoredGeneration,
getCacheKey,
serializeGeneration,
} from "@langchain/core/caches";
import { Generation } from "@langchain/core/outputs";
import { Document } from "@langchain/core/documents";
import { EmbeddingsInterface } from "@langchain/core/embeddings";
import { CosmosClient, CosmosClientOptions } from "@azure/cosmos";
import { DefaultAzureCredential } from "@azure/identity";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import {
AzureCosmosDBNoSQLConfig,
AzureCosmosDBNoSQLVectorStore,
} from "./azure_cosmosdb_nosql.js";
const USER_AGENT_SUFFIX = "langchainjs-cdbnosql-semanticcache-javascript";
const DEFAULT_CONTAINER_NAME = "semanticCacheContainer";
/**
* Represents a Semantic Cache that uses CosmosDB NoSQL backend as the underlying
* storage system.
*
* @example
* ```typescript
* const embeddings = new OpenAIEmbeddings();
* const cache = new AzureCosmosDBNoSQLSemanticCache(embeddings, {
* databaseName: DATABASE_NAME,
* containerName: CONTAINER_NAME
* });
* const model = new ChatOpenAI({cache});
*
* // Invoke the model to perform an action
* const response = await model.invoke("Do something random!");
* console.log(response);
* ```
*/
export class AzureCosmosDBNoSQLSemanticCache extends BaseCache {
private embeddings: EmbeddingsInterface;
private config: AzureCosmosDBNoSQLConfig;
private similarityScoreThreshold: number;
private cacheDict: { [key: string]: AzureCosmosDBNoSQLVectorStore } = {};
private vectorDistanceFunction: string;
constructor(
embeddings: EmbeddingsInterface,
dbConfig: AzureCosmosDBNoSQLConfig,
similarityScoreThreshold: number = 0.6
) {
super();
let client: CosmosClient;
const connectionString =
dbConfig.connectionString ??
getEnvironmentVariable("AZURE_COSMOSDB_NOSQL_CONNECTION_STRING");
const endpoint =
dbConfig.endpoint ??
getEnvironmentVariable("AZURE_COSMOSDB_NOSQL_ENDPOINT");
if (!dbConfig.client && !connectionString && !endpoint) {
throw new Error(
"AzureCosmosDBNoSQLSemanticCache client, connection string or endpoint must be set."
);
}
if (!dbConfig.client) {
if (connectionString) {
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
let [endpoint, key] = connectionString!.split(";");
[, endpoint] = endpoint.split("=");
[, key] = key.split("=");
client = new CosmosClient({
endpoint,
key,
userAgentSuffix: USER_AGENT_SUFFIX,
});
} else {
// Use managed identity
client = new CosmosClient({
endpoint,
aadCredentials: dbConfig.credentials ?? new DefaultAzureCredential(),
userAgentSuffix: USER_AGENT_SUFFIX,
} as CosmosClientOptions);
}
} else {
client = dbConfig.client;
}
this.vectorDistanceFunction =
dbConfig.vectorEmbeddingPolicy?.vectorEmbeddings[0].distanceFunction ??
"cosine";
this.config = {
...dbConfig,
client,
databaseName: dbConfig.databaseName,
containerName: dbConfig.containerName ?? DEFAULT_CONTAINER_NAME,
};
this.embeddings = embeddings;
this.similarityScoreThreshold = similarityScoreThreshold;
}
private getLlmCache(llmKey: string) {
const key = getCacheKey(llmKey);
if (!this.cacheDict[key]) {
this.cacheDict[key] = new AzureCosmosDBNoSQLVectorStore(
this.embeddings,
this.config
);
}
return this.cacheDict[key];
}
/**
* Retrieves data from the cache.
*
* @param prompt The prompt for lookup.
* @param llmKey The LLM key used to construct the cache key.
* @returns An array of Generations if found, null otherwise.
*/
public async lookup(prompt: string, llmKey: string) {
const llmCache = this.getLlmCache(llmKey);
const results = await llmCache.similaritySearchWithScore(prompt, 1);
if (!results.length) return null;
const generations = results
.flatMap(([document, score]) => {
const isSimilar =
(this.vectorDistanceFunction === "euclidean" &&
score <= this.similarityScoreThreshold) ||
(this.vectorDistanceFunction !== "euclidean" &&
score >= this.similarityScoreThreshold);
if (!isSimilar) return undefined;
return document.metadata.return_value.map((gen: string) =>
deserializeStoredGeneration(JSON.parse(gen))
);
})
.filter((gen) => gen !== undefined);
return generations.length > 0 ? generations : null;
}
/**
* Updates the cache with new data.
*
* @param prompt The prompt for update.
* @param llmKey The LLM key used to construct the cache key.
* @param value The value to be stored in the cache.
*/
public async update(
prompt: string,
llmKey: string,
returnValue: Generation[]
) {
const serializedGenerations = returnValue.map((generation) =>
JSON.stringify(serializeGeneration(generation))
);
const llmCache = this.getLlmCache(llmKey);
const metadata = {
llm_string: llmKey,
prompt,
return_value: serializedGenerations,
};
const doc = new Document({
pageContent: prompt,
metadata,
});
await llmCache.addDocuments([doc]);
}
/**
* deletes the semantic cache for a given llmKey
* @param llmKey
*/
public async clear(llmKey: string) {
const key = getCacheKey(llmKey);
if (this.cacheDict[key]) {
await this.cacheDict[key].delete();
}
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src/azure_cosmosdb_nosql.ts | import type { EmbeddingsInterface } from "@langchain/core/embeddings";
import {
MaxMarginalRelevanceSearchOptions,
VectorStore,
} from "@langchain/core/vectorstores";
import { Document, DocumentInterface } from "@langchain/core/documents";
import { maximalMarginalRelevance } from "@langchain/core/utils/math";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import {
Container,
ContainerRequest,
CosmosClient,
CosmosClientOptions,
DatabaseRequest,
IndexingPolicy,
SqlParameter,
SqlQuerySpec,
VectorEmbedding,
VectorEmbeddingPolicy,
VectorIndex,
} from "@azure/cosmos";
import { DefaultAzureCredential, TokenCredential } from "@azure/identity";
/** Azure Cosmos DB for NoSQL query filter. */
export type AzureCosmosDBNoSQLQueryFilter = string | SqlQuerySpec;
/** Azure AI Search filter type. */
export type AzureCosmosDBNoSQLFilterType = {
/**
* SQL filter clause to add to the vector search query.
* @example 'WHERE c.category = "cars" LIMIT 10 OFFSSET 0'
*/
filterClause?: AzureCosmosDBNoSQLQueryFilter;
/** Determines whether or not to include the embeddings in the search results. */
includeEmbeddings?: boolean;
};
/** Azure Cosmos DB for NoSQL Delete Parameters. */
export type AzureCosmosDBNoSqlDeleteParams = {
/** List of IDs for the documents to be removed. */
readonly ids?: string | string[];
/** SQL query to select the documents to be removed. */
readonly filter?: AzureCosmosDBNoSQLQueryFilter;
};
/** Azure Cosmos DB for NoSQL database creation options. */
export type AzureCosmosDBNoSqlCreateDatabaseOptions = Partial<
Omit<DatabaseRequest, "id">
>;
/** Azure Cosmos DB for NoSQL container creation options. */
export type AzureCosmosDBNoSqlCreateContainerOptions = Partial<
Omit<ContainerRequest, "id" | "vectorEmbeddingPolicy" | "indexingPolicy">
>;
/**
* Initialization options for the Azure CosmosDB for NoSQL database and container.
*
* Note that if you provides multiple vector embeddings in the vectorEmbeddingPolicy,
* the first one will be used for creating documents and searching.
*/
export interface AzureCosmosDBNoSQLInitOptions {
readonly vectorEmbeddingPolicy?: VectorEmbeddingPolicy;
readonly indexingPolicy?: IndexingPolicy;
readonly createContainerOptions?: AzureCosmosDBNoSqlCreateContainerOptions;
readonly createDatabaseOptions?: AzureCosmosDBNoSqlCreateDatabaseOptions;
}
/**
* Configuration options for the `AzureCosmosDBNoSQLVectorStore` constructor.
*/
export interface AzureCosmosDBNoSQLConfig
extends AzureCosmosDBNoSQLInitOptions {
readonly client?: CosmosClient;
readonly connectionString?: string;
readonly endpoint?: string;
readonly credentials?: TokenCredential;
readonly databaseName?: string;
readonly containerName?: string;
readonly textKey?: string;
readonly metadataKey?: string;
}
const USER_AGENT_SUFFIX = "langchainjs-cdbnosql-vectorstore-javascript";
/**
* Azure Cosmos DB for NoSQL vCore vector store.
* To use this, you should have both:
* - the `@azure/cosmos` NPM package installed
* - a connection string associated with a NoSQL instance
*
* You do not need to create a database or container, it will be created
* automatically.
*/
export class AzureCosmosDBNoSQLVectorStore extends VectorStore {
declare FilterType: AzureCosmosDBNoSQLFilterType;
get lc_secrets(): { [key: string]: string } {
return {
connectionString: "AZURE_COSMOSDB_NOSQL_CONNECTION_STRING",
};
}
private initPromise?: Promise<void>;
private readonly client: CosmosClient;
private container: Container;
private readonly textKey: string;
private readonly metadataKey: string;
private embeddingKey: string;
/**
* Initializes the AzureCosmosDBNoSQLVectorStore.
* Connect the client to the database and create the container, creating them if needed.
* @returns A promise that resolves when the AzureCosmosDBNoSQLVectorStore has been initialized.
*/
initialize: () => Promise<void>;
_vectorstoreType(): string {
return "azure_cosmosdb_nosql";
}
constructor(
embeddings: EmbeddingsInterface,
dbConfig: AzureCosmosDBNoSQLConfig
) {
super(embeddings, dbConfig);
const connectionString =
dbConfig.connectionString ??
getEnvironmentVariable("AZURE_COSMOSDB_NOSQL_CONNECTION_STRING");
const endpoint =
dbConfig.endpoint ??
getEnvironmentVariable("AZURE_COSMOSDB_NOSQL_ENDPOINT");
if (!dbConfig.client && !connectionString && !endpoint) {
throw new Error(
"AzureCosmosDBNoSQLVectorStore client, connection string or endpoint must be set."
);
}
if (!dbConfig.client) {
if (connectionString) {
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
let [endpoint, key] = connectionString!.split(";");
[, endpoint] = endpoint.split("=");
[, key] = key.split("=");
this.client = new CosmosClient({
endpoint,
key,
userAgentSuffix: USER_AGENT_SUFFIX,
});
} else {
// Use managed identity
this.client = new CosmosClient({
endpoint,
aadCredentials: dbConfig.credentials ?? new DefaultAzureCredential(),
userAgentSuffix: USER_AGENT_SUFFIX,
} as CosmosClientOptions);
}
}
const client = dbConfig.client || this.client;
const databaseName = dbConfig.databaseName ?? "vectorSearchDB";
const containerName = dbConfig.containerName ?? "vectorSearchContainer";
this.textKey = dbConfig.textKey ?? "text";
this.metadataKey = dbConfig.metadataKey ?? "metadata";
const vectorEmbeddingPolicy = dbConfig.vectorEmbeddingPolicy ?? {
vectorEmbeddings: [],
};
const indexingPolicy = dbConfig.indexingPolicy ?? {
indexingMode: "consistent",
automatic: true,
includedPaths: [{ path: "/*" }],
excludedPaths: [{ path: "/_etag/?" }],
};
if (vectorEmbeddingPolicy.vectorEmbeddings.length === 0) {
vectorEmbeddingPolicy.vectorEmbeddings = [
{
path: "/vector",
dataType: "float32",
distanceFunction: "cosine",
// Will be determined automatically during initialization
dimensions: 0,
} as VectorEmbedding,
];
}
if (!indexingPolicy.vectorIndexes?.length) {
indexingPolicy.vectorIndexes = [
{
path: "/vector",
type: "quantizedFlat",
} as VectorIndex,
];
}
this.embeddingKey = vectorEmbeddingPolicy.vectorEmbeddings[0].path.slice(1);
if (!this.embeddingKey) {
throw new Error(
"AzureCosmosDBNoSQLVectorStore requires a valid vectorEmbeddings path"
);
}
// Deferring initialization to the first call to `initialize`
this.initialize = () => {
if (this.initPromise === undefined) {
this.initPromise = this.init(client, databaseName, containerName, {
vectorEmbeddingPolicy,
indexingPolicy,
createContainerOptions: dbConfig.createContainerOptions,
createDatabaseOptions: dbConfig.createDatabaseOptions,
}).catch((error) => {
console.error(
"Error during AzureCosmosDBNoSQLVectorStore initialization:",
error
);
});
}
return this.initPromise;
};
}
/**
* Removes specified documents from the AzureCosmosDBNoSQLVectorStore.
* If no IDs or filter are specified, all documents will be removed.
* @param params Parameters for the delete operation.
* @returns A promise that resolves when the documents have been removed.
*/
async delete(params: AzureCosmosDBNoSqlDeleteParams = {}): Promise<void> {
await this.initialize();
if (params.ids && params.filter) {
throw new Error(
`AzureCosmosDBNoSQLVectorStore delete requires either "ids" or "filter" to be set in the params object, not both`
);
}
let ids: string[];
let query: AzureCosmosDBNoSQLQueryFilter | undefined = params.filter;
// Delete all documents
if (!params.ids && !params.filter) {
query = "SELECT c.id FROM c";
}
if (query) {
const { resources } = await this.container.items.query(query).fetchAll();
ids = resources.map((item) => item.id);
} else {
ids = (Array.isArray(params.ids) ? params.ids : [params.ids]) as string[];
}
if (ids.length === 0) {
return;
}
await Promise.all(ids.map((id) => this.container.item(id).delete()));
}
/**
* Method for adding vectors to the AzureCosmosDBNoSQLVectorStore.
* @param vectors Vectors to be added.
* @param documents Corresponding documents to be added.
* @returns A promise that resolves to the added documents IDs.
*/
async addVectors(
vectors: number[][],
documents: DocumentInterface[]
): Promise<string[]> {
await this.initialize();
const docs = vectors.map((embedding, idx) => ({
[this.textKey]: documents[idx].pageContent,
[this.embeddingKey]: embedding,
[this.metadataKey]: documents[idx].metadata,
...(documents[idx].id ? { id: documents[idx].id } : {}),
}));
const ids: string[] = [];
const results = await Promise.all(
docs.map((doc) => this.container.items.create(doc))
);
for (const result of results) {
ids.push(result.resource?.id ?? "error: could not create item");
}
return ids;
}
/**
* Method for adding documents to the AzureCosmosDBNoSQLVectorStore. It first converts
* the documents to texts and then adds them as vectors.
* @param documents The documents to add.
* @returns A promise that resolves to the added documents IDs.
*/
async addDocuments(documents: DocumentInterface[]): Promise<string[]> {
const texts = documents.map(({ pageContent }) => pageContent);
return this.addVectors(
await this.embeddings.embedDocuments(texts),
documents
);
}
/**
* Performs a similarity search on the vectors stored in the container.
* @param query Query text for the similarity search.
* @param k=4 Number of nearest neighbors to return.
* @param filter Optional filter options for the documents.
* @returns Promise that resolves to a list of documents.
*/
async similaritySearch(
query: string,
k = 4,
filter: this["FilterType"] | undefined = undefined
): Promise<Document[]> {
const results = await this.similaritySearchWithScore(query, k, filter);
return results.map((result) => result[0]);
}
/**
* Performs a similarity search on the vectors stored in the container.
* @param queryVector Query vector for the similarity search.
* @param k=4 Number of nearest neighbors to return.
* @param filter Optional filter options for the documents.
* @returns Promise that resolves to a list of documents and their corresponding similarity scores.
*/
async similaritySearchVectorWithScore(
queryVector: number[],
k = 4,
filter: this["FilterType"] | undefined = undefined
): Promise<[Document, number][]> {
await this.initialize();
let filterClause = "";
let filterClauseParams: SqlParameter[] = [];
if (filter?.filterClause) {
if (typeof filter.filterClause === "string") {
filterClause = `${filter.filterClause} `;
} else {
filterClause = `${filter.filterClause.query} `;
filterClauseParams = filter.filterClause.parameters ?? [];
}
}
const embeddings = filter?.includeEmbeddings
? `c[@embeddingKey] AS vector, `
: "";
const query = `SELECT TOP @k c.id, ${embeddings}c[@textKey] AS text, c[@metadataKey] AS metadata, VectorDistance(c[@embeddingKey], @vector) AS similarityScore FROM c ${filterClause}ORDER BY VectorDistance(c[@embeddingKey], @vector)`;
const { resources: items } = await this.container.items
.query(
{
query,
parameters: [
...filterClauseParams,
{ name: "@k", value: k },
{ name: "@textKey", value: this.textKey },
{ name: "@metadataKey", value: this.metadataKey },
{ name: "@embeddingKey", value: this.embeddingKey },
{ name: "@vector", value: queryVector },
],
},
{ maxItemCount: k }
)
.fetchAll();
const docsAndScores = items.map(
(item) =>
[
new Document({
id: item.id,
pageContent: item.text,
metadata: {
...(item.metadata ?? {}),
...(filter?.includeEmbeddings
? { [this.embeddingKey]: item.vector }
: {}),
},
}),
item.similarityScore,
] as [Document, number]
);
return docsAndScores;
}
/**
* Return documents selected using the maximal marginal relevance.
* Maximal marginal relevance optimizes for similarity to the query AND
* diversity among selected documents.
* @param query Text to look up documents similar to.
* @param options.k Number of documents to return.
* @param options.fetchK=20 Number of documents to fetch before passing to
* the MMR algorithm.
* @param options.lambda=0.5 Number between 0 and 1 that determines the
* degree of diversity among the results, where 0 corresponds to maximum
* diversity and 1 to minimum diversity.
* @returns List of documents selected by maximal marginal relevance.
*/
async maxMarginalRelevanceSearch(
query: string,
options: MaxMarginalRelevanceSearchOptions<this["FilterType"]>
): Promise<Document[]> {
const { k, fetchK = 20, lambda = 0.5 } = options;
const includeEmbeddingsFlag = options.filter?.includeEmbeddings || false;
const queryEmbedding = await this.embeddings.embedQuery(query);
const docs = await this.similaritySearchVectorWithScore(
queryEmbedding,
fetchK,
{
...options.filter,
includeEmbeddings: true,
}
);
const embeddingList = docs.map((doc) => doc[0].metadata[this.embeddingKey]);
// Re-rank the results using MMR
const mmrIndexes = maximalMarginalRelevance(
queryEmbedding,
embeddingList,
lambda,
k
);
return mmrIndexes.map((index) => {
const doc = docs[index][0];
// Remove embeddings if they were not requested originally
if (!includeEmbeddingsFlag) {
delete doc.metadata[this.embeddingKey];
}
return doc;
});
}
/**
* Initializes the AzureCosmosDBNoSQLVectorStore by connecting to the database.
* @param client The CosmosClient to use for connecting to the database.
* @param databaseName The name of the database to use.
* @param containerName The name of the collection to use.
* @param initOptions Initialization options for the database and container.
* @returns A promise that resolves when the AzureCosmosDBNoSQLVectorStore has been initialized.
*/
private async init(
client: CosmosClient,
databaseName: string,
containerName: string,
initOptions: AzureCosmosDBNoSQLInitOptions
): Promise<void> {
// Determine vector dimensions if not provided
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
const vectorEmbeddingPolicy = initOptions.vectorEmbeddingPolicy!;
const needDimensions = vectorEmbeddingPolicy.vectorEmbeddings.some(
(v) => !v.dimensions
);
if (needDimensions) {
const queryEmbedding = await this.embeddings.embedQuery("test");
for (const v of vectorEmbeddingPolicy.vectorEmbeddings) {
if (!v.dimensions) {
v.dimensions = queryEmbedding.length;
}
}
}
const { database } = await client.databases.createIfNotExists({
...(initOptions?.createDatabaseOptions ?? {}),
id: databaseName,
});
const { container } = await database.containers.createIfNotExists({
...(initOptions?.createContainerOptions ?? {}),
indexingPolicy: initOptions?.indexingPolicy,
vectorEmbeddingPolicy,
id: containerName,
});
this.container = container;
}
/**
* Static method to create an instance of AzureCosmosDBNoSQLVectorStore from a
* list of texts. It first converts the texts to vectors and then adds
* them to the collection.
* @param texts List of texts to be converted to vectors.
* @param metadatas Metadata for the texts.
* @param embeddings Embeddings to be used for conversion.
* @param dbConfig Database configuration for Azure Cosmos DB for NoSQL.
* @returns Promise that resolves to a new instance of AzureCosmosDBNoSQLVectorStore.
*/
static async fromTexts(
texts: string[],
metadatas: object[] | object,
embeddings: EmbeddingsInterface,
dbConfig: AzureCosmosDBNoSQLConfig
): Promise<AzureCosmosDBNoSQLVectorStore> {
const docs: Document[] = [];
for (let i = 0; i < texts.length; i += 1) {
const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas;
const newDoc = new Document({
pageContent: texts[i],
metadata,
});
docs.push(newDoc);
}
return AzureCosmosDBNoSQLVectorStore.fromDocuments(
docs,
embeddings,
dbConfig
);
}
/**
* Static method to create an instance of AzureCosmosDBNoSQLVectorStore from a
* list of documents. It first converts the documents to vectors and then
* adds them to the collection.
* @param docs List of documents to be converted to vectors.
* @param embeddings Embeddings to be used for conversion.
* @param dbConfig Database configuration for Azure Cosmos DB for NoSQL.
* @returns Promise that resolves to a new instance of AzureCosmosDBNoSQLVectorStore.
*/
static async fromDocuments(
docs: Document[],
embeddings: EmbeddingsInterface,
dbConfig: AzureCosmosDBNoSQLConfig
): Promise<AzureCosmosDBNoSQLVectorStore> {
const instance = new this(embeddings, dbConfig);
await instance.addDocuments(docs);
return instance;
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src/index.ts | export * from "./azure_cosmosdb_mongodb.js";
export * from "./azure_cosmosdb_nosql.js";
export * from "./caches.js";
export * from "./chat_histories.js";
|
0 | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src/tests/azure_cosmosdb_mongodb.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
import { jest, test, expect } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import { FakeEmbeddings } from "@langchain/core/utils/testing";
import { AzureCosmosDBMongoDBVectorStore } from "../azure_cosmosdb_mongodb.js";
// Mock mongodb client
const createMockClient = () => ({
db: jest.fn<any>().mockReturnValue({
collectionName: "documents",
collection: jest.fn<any>().mockReturnValue({
listIndexes: jest.fn().mockReturnValue({
toArray: jest.fn().mockReturnValue([
{
name: "vectorSearchIndex",
},
]),
}),
dropIndex: jest.fn(),
deleteMany: jest.fn(),
insertMany: jest.fn().mockImplementation((docs: any) => ({
insertedIds: docs.map((_: any, i: any) => `id${i}`),
})),
aggregate: jest.fn().mockReturnValue({
map: jest.fn().mockReturnValue({
toArray: jest
.fn()
.mockReturnValue([
[new Document({ pageContent: "test", metadata: { a: 1 } }), 0.5],
]),
}),
}),
}),
command: jest.fn(),
}),
connect: jest.fn(),
close: jest.fn(),
});
const embedMock = jest.spyOn(FakeEmbeddings.prototype, "embedDocuments");
beforeEach(() => {
embedMock.mockClear();
});
test("AzureCosmosDBMongoDBVectorStore works", async () => {
const client = createMockClient();
const embeddings = new FakeEmbeddings();
const store = new AzureCosmosDBMongoDBVectorStore(embeddings, {
client: client as any,
});
expect(store).toBeDefined();
await store.addDocuments([
{
pageContent: "test",
metadata: { a: 1 },
},
]);
const mockCollection = client.db().collection();
expect(mockCollection.insertMany).toHaveBeenCalledTimes(1);
expect(embedMock).toHaveBeenCalledTimes(1);
const results = await store.similaritySearch("test", 1);
expect(mockCollection.aggregate).toHaveBeenCalledTimes(1);
expect(results).toHaveLength(1);
});
test("AzureCosmosDBMongoDBVectorStore manages its index", async () => {
const client = createMockClient();
const embeddings = new FakeEmbeddings();
const store = new AzureCosmosDBMongoDBVectorStore(embeddings, {
client: client as any,
});
const indexExists = await store.checkIndexExists();
const mockDb = client.db();
const mockCollection = mockDb.collection();
expect(mockDb.command).toHaveBeenCalledTimes(1);
expect(mockCollection.listIndexes).toHaveBeenCalledTimes(1);
expect(indexExists).toBe(true);
await store.deleteIndex();
expect(mockCollection.dropIndex).toHaveBeenCalledTimes(1);
});
test("AzureCosmosDBMongoDBVectorStore deletes documents", async () => {
const client = createMockClient();
const embeddings = new FakeEmbeddings();
const store = new AzureCosmosDBMongoDBVectorStore(embeddings, {
client: client as any,
});
await store.delete();
const mockCollection = client.db().collection();
expect(mockCollection.deleteMany).toHaveBeenCalledTimes(1);
expect(mockCollection.deleteMany).toHaveBeenCalledWith({});
await store.delete({
ids: ["123456789012345678901234", "123456789012345678901235"],
});
expect(mockCollection.deleteMany).toHaveBeenCalledTimes(2);
expect(mockCollection.deleteMany.mock.calls[1][0]).toMatchObject({ _id: {} });
await store.delete({ filter: { a: 1 } });
expect(mockCollection.deleteMany).toHaveBeenCalledTimes(3);
expect(mockCollection.deleteMany.mock.calls[2][0]).toMatchObject({ a: 1 });
});
test("AzureCosmosDBMongoDBVectorStore adds vectors", async () => {
const client = createMockClient();
const embeddings = new FakeEmbeddings();
const store = new AzureCosmosDBMongoDBVectorStore(embeddings, {
client: client as any,
});
await store.addVectors(
[[1, 2, 5]],
[
{
pageContent: "test",
metadata: { a: 1 },
},
]
);
const mockCollection = client.db().collection();
expect(embedMock).toHaveBeenCalledTimes(0);
expect(mockCollection.insertMany).toHaveBeenCalledTimes(1);
});
test("AzureCosmosDBMongoDBVectorStore initializes from texts", async () => {
const client = createMockClient();
const embeddings = new FakeEmbeddings();
const store = await AzureCosmosDBMongoDBVectorStore.fromTexts(
["test", "hello", "world"],
{},
embeddings,
{ client: client as any }
);
expect(store).toBeDefined();
const mockCollection = client.db().collection();
expect(mockCollection.insertMany).toHaveBeenCalledTimes(1);
expect(mockCollection.insertMany).toHaveBeenCalledWith([
{
textContent: "test",
vectorContent: [0.1, 0.2, 0.3, 0.4],
},
{
textContent: "hello",
vectorContent: [0.1, 0.2, 0.3, 0.4],
},
{
textContent: "world",
vectorContent: [0.1, 0.2, 0.3, 0.4],
},
]);
expect(embedMock).toHaveBeenCalledTimes(1);
});
test("AzureCosmosDBMongoDBVectorStore initializes from documents", async () => {
const client = createMockClient();
const embeddings = new FakeEmbeddings();
const store = await AzureCosmosDBMongoDBVectorStore.fromDocuments(
[
new Document({ pageContent: "house" }),
new Document({ pageContent: "pool" }),
],
embeddings,
{ client: client as any }
);
expect(store).toBeDefined();
const mockCollection = client.db().collection();
expect(mockCollection.insertMany).toHaveBeenCalledTimes(1);
expect(mockCollection.insertMany).toHaveBeenCalledWith([
{
textContent: "house",
vectorContent: [0.1, 0.2, 0.3, 0.4],
},
{
textContent: "pool",
vectorContent: [0.1, 0.2, 0.3, 0.4],
},
]);
expect(embedMock).toHaveBeenCalledTimes(1);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src/tests/azure_cosmosdb_nosql.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
import { jest, test, expect } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import { FakeEmbeddings } from "@langchain/core/utils/testing";
import { AzureCosmosDBNoSQLVectorStore } from "../azure_cosmosdb_nosql.js";
const embedMock = jest.spyOn(FakeEmbeddings.prototype, "embedDocuments");
const createMockClient = () => {
let id = 0;
const client = {
databases: {
createIfNotExists: jest.fn().mockReturnThis(),
get database() {
return this;
},
containers: {
createIfNotExists: jest.fn().mockReturnThis(),
get container() {
return this;
},
items: {
create: jest.fn().mockImplementation((doc: any) => ({
// eslint-disable-next-line no-plusplus
resource: { id: doc.id ?? `${id++}` },
})),
query: jest.fn().mockReturnThis(),
fetchAll: jest.fn().mockImplementation(() => ({
resources: Array(id)
.fill(0)
.map((_, i) => ({ id: i })),
})),
},
item: jest.fn().mockReturnThis(),
delete: jest.fn(),
},
},
};
return client;
};
const createDocuments = (n: number) => {
const documents = [];
for (let i = 0; i < n; i += 1) {
documents.push({
pageContent: `hello ${i}`,
metadata: {
source: `doc-${i}`,
attributes: [],
},
});
}
return documents;
};
beforeEach(() => {
embedMock.mockClear();
});
test("AzureCosmosDBNoSQLVectorStore addVectors should store documents", async () => {
const embeddings = new FakeEmbeddings();
const client = createMockClient();
const store = new AzureCosmosDBNoSQLVectorStore(embeddings, {
client: client as any,
});
expect(store).toBeDefined();
const documents = createDocuments(1500);
const vectors: number[][] = [];
for (const doc of documents) {
vectors.push(await embeddings.embedQuery(doc.pageContent));
}
await store.addVectors(vectors, documents);
expect(client.databases.containers.items.create).toHaveBeenCalledTimes(1500);
});
test("AzureCosmosDBNoSQLVectorStore addDocuments should embed and store documents", async () => {
const embeddings = new FakeEmbeddings();
const client = createMockClient();
const store = new AzureCosmosDBNoSQLVectorStore(embeddings, {
client: client as any,
});
expect(store).toBeDefined();
const documents = createDocuments(1500);
await store.addDocuments(documents);
expect(embedMock).toHaveBeenCalledTimes(1);
expect(client.databases.containers.items.create).toHaveBeenCalledTimes(1500);
});
test("AzureCosmosDBNoSQLVectorStore addDocuments should use specified IDs", async () => {
const embeddings = new FakeEmbeddings();
const client = createMockClient();
const store = new AzureCosmosDBNoSQLVectorStore(embeddings, {
client: client as any,
});
expect(store).toBeDefined();
const result = await store.addDocuments([
{
pageContent: "hello",
metadata: {
source: "test",
attributes: [],
},
id: "id1",
},
{
pageContent: "hello2",
metadata: {
source: "test",
attributes: [],
},
id: "id2",
},
]);
expect(client.databases.containers.items.create).toHaveBeenCalledTimes(2);
expect(result).toEqual(["id1", "id2"]);
});
test("AzureCosmosDBNoSQLVectorStore deletes documents", async () => {
const embeddings = new FakeEmbeddings();
const client = createMockClient();
const store = new AzureCosmosDBNoSQLVectorStore(embeddings, {
client: client as any,
});
const documents = createDocuments(10);
await store.addDocuments(documents);
await store.delete();
expect(client.databases.containers.delete).toHaveBeenCalledTimes(10);
await store.delete({ ids: ["0", "1"] });
expect(client.databases.containers.delete).toHaveBeenCalledTimes(12);
await store.delete({ filter: "SELECT * FROM c" });
expect(client.databases.containers.delete).toHaveBeenCalledTimes(22);
});
test("AzureCosmosDBNoSQLVectorStore initializes from texts", async () => {
const embeddings = new FakeEmbeddings();
const client = createMockClient();
const store = await AzureCosmosDBNoSQLVectorStore.fromTexts(
["test", "hello", "world"],
{},
embeddings,
{ client: client as any }
);
expect(store).toBeDefined();
expect(client.databases.containers.items.create).toHaveBeenCalledTimes(3);
expect(client.databases.containers.items.create.mock.calls).toEqual([
[
{
text: "test",
vector: [0.1, 0.2, 0.3, 0.4],
metadata: {},
},
],
[
{
text: "hello",
vector: [0.1, 0.2, 0.3, 0.4],
metadata: {},
},
],
[
{
text: "world",
vector: [0.1, 0.2, 0.3, 0.4],
metadata: {},
},
],
]);
expect(embedMock).toHaveBeenCalledTimes(1);
});
test("AzureCosmosDBNoSQLVectorStore initializes from documents", async () => {
const embeddings = new FakeEmbeddings();
const client = createMockClient();
const store = await AzureCosmosDBNoSQLVectorStore.fromDocuments(
[
new Document({ pageContent: "house" }),
new Document({ pageContent: "pool" }),
],
embeddings,
{ client: client as any }
);
expect(store).toBeDefined();
expect(client.databases.containers.items.create).toHaveBeenCalledTimes(2);
expect(client.databases.containers.items.create.mock.calls).toEqual([
[
{
text: "house",
vector: [0.1, 0.2, 0.3, 0.4],
metadata: {},
},
],
[
{
text: "pool",
vector: [0.1, 0.2, 0.3, 0.4],
metadata: {},
},
],
]);
expect(embedMock).toHaveBeenCalledTimes(1);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src/tests/azure_cosmosdb_nosql.int.test.ts | /* eslint-disable no-process-env */
import { test, expect } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import { OpenAIEmbeddings } from "@langchain/openai";
import { CosmosClient } from "@azure/cosmos";
import { DefaultAzureCredential } from "@azure/identity";
import { AzureCosmosDBNoSQLVectorStore } from "../azure_cosmosdb_nosql.js";
const DATABASE_NAME = "langchainTestDB";
const CONTAINER_NAME = "testContainer";
/*
* To run this test, you need have an Azure Cosmos DB for NoSQL instance
* running. You can deploy a free version on Azure Portal without any cost,
* following this guide:
* https://learn.microsoft.com/azure/cosmos-db/nosql/vector-search
*
* You do not need to create a database or collection, it will be created
* automatically by the test.
*
* Once you have the instance running, you need to set the following environment
* variables before running the test:
* - AZURE_COSMOSDB_NOSQL_CONNECTION_STRING or AZURE_COSMOSDB_NOSQL_ENDPOINT
* - AZURE_OPENAI_API_KEY
* - AZURE_OPENAI_API_INSTANCE_NAME
* - AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME
* - AZURE_OPENAI_API_VERSION
*
* A regular OpenAI key can also be used instead of Azure OpenAI.
*/
describe("AzureCosmosDBNoSQLVectorStore", () => {
beforeEach(async () => {
// Note: when using Azure OpenAI, you have to also set these variables
// in addition to the API key:
// - AZURE_OPENAI_API_INSTANCE_NAME
// - AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME
// - AZURE_OPENAI_API_VERSION
expect(
process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_API_KEY
).toBeDefined();
let client: CosmosClient;
if (process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING) {
client = new CosmosClient(
process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING
);
} else if (process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT) {
client = new CosmosClient({
endpoint: process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT,
aadCredentials: new DefaultAzureCredential(),
});
} else {
throw new Error(
"Please set the environment variable AZURE_COSMOSDB_NOSQL_CONNECTION_STRING or AZURE_COSMOSDB_NOSQL_ENDPOINT"
);
}
// Make sure the database does not exists
try {
await client.database(DATABASE_NAME).delete();
} catch {
// Ignore error if the database does not exist
}
});
test("performs similarity search", async () => {
const vectorStore = new AzureCosmosDBNoSQLVectorStore(
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
containerName: CONTAINER_NAME,
}
);
expect(vectorStore).toBeDefined();
await vectorStore.addDocuments([
{ pageContent: "This book is about politics", metadata: { a: 1 } },
{ pageContent: "Cats sleeps a lot.", metadata: { b: 1 } },
{ pageContent: "Sandwiches taste good.", metadata: { c: 1 } },
{ pageContent: "The house is open", metadata: { d: 1, e: 2 } },
]);
const results = await vectorStore.similaritySearch("sandwich", 1);
expect(results.length).toEqual(1);
expect(results).toMatchObject([
{ pageContent: "Sandwiches taste good.", metadata: { c: 1 } },
]);
const retriever = vectorStore.asRetriever({});
const docs = await retriever.invoke("house");
expect(docs).toBeDefined();
expect(docs[0]).toMatchObject({
pageContent: "The house is open",
metadata: { d: 1, e: 2 },
});
});
test("performs max marginal relevance search", async () => {
const texts = ["foo", "foo", "fox"];
const vectorStore = await AzureCosmosDBNoSQLVectorStore.fromTexts(
texts,
{},
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
containerName: CONTAINER_NAME,
}
);
const output = await vectorStore.maxMarginalRelevanceSearch("foo", {
k: 10,
fetchK: 20,
lambda: 0.1,
});
expect(output).toHaveLength(texts.length);
const actual = output.map((doc) => doc.pageContent);
const expected = ["foo", "fox", "foo"];
expect(actual).toEqual(expected);
const standardRetriever = await vectorStore.asRetriever();
const standardRetrieverOutput = await standardRetriever.invoke("foo");
expect(output).toHaveLength(texts.length);
const standardRetrieverActual = standardRetrieverOutput.map(
(doc) => doc.pageContent
);
const standardRetrieverExpected = ["foo", "foo", "fox"];
expect(standardRetrieverActual).toEqual(standardRetrieverExpected);
const retriever = await vectorStore.asRetriever({
searchType: "mmr",
searchKwargs: {
fetchK: 20,
lambda: 0.1,
},
});
const retrieverOutput = await retriever.invoke("foo");
expect(output).toHaveLength(texts.length);
const retrieverActual = retrieverOutput.map((doc) => doc.pageContent);
const retrieverExpected = ["foo", "fox", "foo"];
expect(retrieverActual).toEqual(retrieverExpected);
const similarity = await vectorStore.similaritySearchWithScore("foo", 1);
expect(similarity.length).toBe(1);
});
test("performs similarity search with filter", async () => {
const vectorStore = new AzureCosmosDBNoSQLVectorStore(
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
containerName: CONTAINER_NAME,
}
);
expect(vectorStore).toBeDefined();
await vectorStore.addDocuments([
{ pageContent: "This book is about politics", metadata: { a: 1 } },
{ pageContent: "Cats sleeps a lot.", metadata: { b: 1 } },
{ pageContent: "Sandwiches taste good.", metadata: { c: 1 } },
{ pageContent: "The house is open", metadata: { d: 1, e: 2 } },
]);
const results = await vectorStore.similaritySearch("sandwich", 1, {
filterClause: "WHERE c.metadata.d = 1",
});
expect(results.length).toEqual(1);
expect(results).toMatchObject([
{ pageContent: "The house is open", metadata: { d: 1, e: 2 } },
]);
});
test("performs similarity search including vectors in the results", async () => {
const vectorStore = new AzureCosmosDBNoSQLVectorStore(
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
containerName: CONTAINER_NAME,
}
);
expect(vectorStore).toBeDefined();
await vectorStore.addDocuments([
{ pageContent: "This book is about politics", metadata: { a: 1 } },
{ pageContent: "Cats sleeps a lot.", metadata: { b: 1 } },
{ pageContent: "Sandwiches taste good.", metadata: { c: 1 } },
{ pageContent: "The house is open", metadata: { d: 1, e: 2 } },
]);
const results: Document[] = await vectorStore.similaritySearch(
"sandwich",
1,
{ includeEmbeddings: true }
);
expect(results.length).toEqual(1);
expect(results).toMatchObject([
{ pageContent: "Sandwiches taste good.", metadata: { c: 1 } },
]);
expect(results[0].metadata.vector).toBeDefined();
});
test("deletes documents by id", async () => {
const vectorStore = new AzureCosmosDBNoSQLVectorStore(
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
containerName: CONTAINER_NAME,
}
);
const ids = await vectorStore.addDocuments([
{ pageContent: "This book is about politics", metadata: { a: 1 } },
{
pageContent: "The is the house of parliament",
metadata: { d: 1, e: 2 },
},
]);
// Delete document matching specified ids
await vectorStore.delete({ ids: ids.slice(0, 1) });
const results = await vectorStore.similaritySearch("politics", 10);
expect(results.length).toEqual(1);
expect(results[0].pageContent).toEqual("The is the house of parliament");
});
test("deletes documents by filter", async () => {
const vectorStore = new AzureCosmosDBNoSQLVectorStore(
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
containerName: CONTAINER_NAME,
}
);
await vectorStore.addDocuments([
{ pageContent: "This book is about politics", metadata: { a: 1 } },
{
pageContent: "The is the house of parliament",
metadata: { d: 1, e: 2 },
},
]);
// Delete document matching the filter
await vectorStore.delete({
filter: {
query: "SELECT * FROM c WHERE c.metadata.a = @value",
parameters: [{ name: "@value", value: 1 }],
},
});
const results = await vectorStore.similaritySearch("politics", 10);
expect(results.length).toEqual(1);
expect(results[0].pageContent).toEqual("The is the house of parliament");
});
test("deletes all documents", async () => {
const vectorStore = new AzureCosmosDBNoSQLVectorStore(
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
containerName: CONTAINER_NAME,
}
);
const documents = Array.from({ length: 101 }, (_, i) => ({
pageContent: `Document ${i}`,
metadata: { a: i },
}));
await vectorStore.addDocuments(documents);
// Delete all documents
await vectorStore.delete();
const results = await vectorStore.similaritySearch("document", 10);
expect(results.length).toEqual(0);
});
test("connect using managed identity", async () => {
// First initialize using a regular connection string
// to create the database and container, as managed identity
// with RBAC does not have permission to create them.
const vectorStoreCS = new AzureCosmosDBNoSQLVectorStore(
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
containerName: CONTAINER_NAME,
}
);
await vectorStoreCS.addDocuments([{ pageContent: "init", metadata: {} }]);
const connectionString = process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING;
if (connectionString) {
// Remove the connection string to test managed identity
process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING = "";
}
expect(process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING).toBeFalsy();
expect(process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT).toBeDefined();
const vectorStore = new AzureCosmosDBNoSQLVectorStore(
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
containerName: CONTAINER_NAME,
}
);
expect(vectorStore).toBeDefined();
await vectorStore.addDocuments([
{ pageContent: "This book is about politics", metadata: { a: 1 } },
{ pageContent: "Cats sleeps a lot.", metadata: { b: 1 } },
{ pageContent: "Sandwiches taste good.", metadata: { c: 1 } },
{ pageContent: "The house is open", metadata: { d: 1, e: 2 } },
]);
const results = await vectorStore.similaritySearch("sandwich", 1);
expect(results.length).toEqual(1);
expect(results).toMatchObject([
{ pageContent: "Sandwiches taste good.", metadata: { c: 1 } },
]);
if (connectionString) {
// Restore the connection string
process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING = connectionString;
}
});
});
|
0 | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src/tests/caches.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
import { jest } from "@jest/globals";
import { FakeEmbeddings, FakeLLM } from "@langchain/core/utils/testing";
import { AzureCosmosDBNoSQLSemanticCache } from "../index.js";
// Create the mock Cosmos DB client
const createMockClient = () => {
let id = 0;
const client = {
databases: {
createIfNotExists: jest.fn().mockReturnThis(),
get database() {
return this;
},
containers: {
createIfNotExists: jest.fn().mockReturnThis(),
get container() {
return this;
},
items: {
create: jest.fn().mockImplementation((doc: any) => ({
// eslint-disable-next-line no-plusplus
resource: { id: doc.id ?? `${id++}` },
})),
query: jest.fn().mockReturnThis(),
fetchAll: jest.fn().mockImplementation(() => ({
resources: [
{
metadata: {
return_value: ['{"text": "fizz"}'], // Simulate stored serialized generation
},
similarityScore: 0.8,
},
],
})),
},
item: jest.fn().mockReturnThis(),
delete: jest.fn(),
},
},
};
return client;
};
describe("AzureCosmosDBNoSQLSemanticCache", () => {
it("should store, retrieve, and clear cache", async () => {
const client = createMockClient();
const embeddings = new FakeEmbeddings();
const cache = new AzureCosmosDBNoSQLSemanticCache(embeddings, {
client: client as any,
});
expect(cache).toBeDefined();
const llm = new FakeLLM({});
const llmString = JSON.stringify(llm._identifyingParams());
await cache.update("foo", llmString, [{ text: "fizz" }]);
expect(client.databases.containers.items.create).toHaveBeenCalled();
const result = await cache.lookup("foo", llmString);
expect(result).toEqual([{ text: "fizz" }]);
expect(client.databases.containers.items.query).toHaveBeenCalled();
await cache.clear(llmString);
expect(client.databases.containers.delete).toHaveBeenCalled();
});
});
|
0 | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src/tests/caches.int.test.ts | /* eslint-disable no-process-env */
/* eslint-disable @typescript-eslint/no-explicit-any */
import {
CosmosClient,
IndexingMode,
VectorEmbedding,
VectorEmbeddingPolicy,
} from "@azure/cosmos";
import { DefaultAzureCredential } from "@azure/identity";
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { AzureCosmosDBNoSQLSemanticCache } from "../caches.js";
const DATABASE_NAME = "langchainTestCacheDB";
const CONTAINER_NAME = "testContainer";
function indexingPolicy(indexType: any) {
return {
indexingMode: IndexingMode.consistent,
includedPaths: [{ path: "/*" }],
excludedPaths: [{ path: '/"_etag"/?' }],
vectorIndexes: [{ path: "/embedding", type: indexType }],
};
}
function vectorEmbeddingPolicy(
distanceFunction: "euclidean" | "cosine" | "dotproduct",
dimension: number
): VectorEmbeddingPolicy {
return {
vectorEmbeddings: [
{
path: "/embedding",
dataType: "float32",
distanceFunction,
dimensions: dimension,
} as VectorEmbedding,
],
};
}
async function initializeCache(
indexType: any,
distanceFunction: any,
similarityThreshold?: number
): Promise<AzureCosmosDBNoSQLSemanticCache> {
let cache: AzureCosmosDBNoSQLSemanticCache;
const embeddingModel = new OpenAIEmbeddings();
const testEmbedding = await embeddingModel.embedDocuments(["sample text"]);
const dimension = Math.min(
testEmbedding[0].length,
indexType === "flat" ? 505 : 4096
);
if (process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING) {
cache = new AzureCosmosDBNoSQLSemanticCache(
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
containerName: CONTAINER_NAME,
connectionString: process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING,
indexingPolicy: indexingPolicy(indexType),
vectorEmbeddingPolicy: vectorEmbeddingPolicy(
distanceFunction,
dimension
),
},
similarityThreshold
);
} else if (process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT) {
cache = new AzureCosmosDBNoSQLSemanticCache(
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
containerName: CONTAINER_NAME,
endpoint: process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT,
indexingPolicy: indexingPolicy(indexType),
vectorEmbeddingPolicy: vectorEmbeddingPolicy(
distanceFunction,
dimension
),
},
similarityThreshold
);
} else {
throw new Error(
"Please set the environment variable AZURE_COSMOSDB_NOSQL_CONNECTION_STRING or AZURE_COSMOSDB_NOSQL_ENDPOINT"
);
}
return cache;
}
/*
* To run this test, you need have an Azure Cosmos DB for NoSQL instance
* running. You can deploy a free version on Azure Portal without any cost,
* following this guide:
* https://learn.microsoft.com/azure/cosmos-db/nosql/vector-search
*
* You do not need to create a database or collection, it will be created
* automatically by the test.
*
* Once you have the instance running, you need to set the following environment
* variables before running the test:
* - AZURE_COSMOSDB_NOSQL_CONNECTION_STRING or AZURE_COSMOSDB_NOSQL_ENDPOINT
* - AZURE_OPENAI_API_KEY
* - AZURE_OPENAI_API_INSTANCE_NAME
* - AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME
* - AZURE_OPENAI_API_VERSION
*/
describe("Azure CosmosDB NoSQL Semantic Cache", () => {
beforeEach(async () => {
let client: CosmosClient;
if (process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING) {
client = new CosmosClient(
process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING
);
} else if (process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT) {
client = new CosmosClient({
endpoint: process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT,
aadCredentials: new DefaultAzureCredential(),
});
} else {
throw new Error(
"Please set the environment variable AZURE_COSMOSDB_NOSQL_CONNECTION_STRING or AZURE_COSMOSDB_NOSQL_ENDPOINT"
);
}
// Make sure the database does not exists
try {
await client.database(DATABASE_NAME).delete();
} catch {
// Ignore error if the database does not exist
}
});
it("test AzureCosmosDBNoSqlSemanticCache with cosine quantizedFlat", async () => {
const cache = await initializeCache("quantizedFlat", "cosine");
const model = new ChatOpenAI({ cache });
const llmString = JSON.stringify(model._identifyingParams);
await cache.update("foo", llmString, [{ text: "fizz" }]);
let cacheOutput = await cache.lookup("foo", llmString);
expect(cacheOutput).toEqual([{ text: "fizz" }]);
cacheOutput = await cache.lookup("bar", llmString);
expect(cacheOutput).toEqual(null);
await cache.clear(llmString);
});
it("test AzureCosmosDBNoSqlSemanticCache with cosine flat", async () => {
const cache = await initializeCache("flat", "cosine");
const model = new ChatOpenAI({ cache });
const llmString = JSON.stringify(model._identifyingParams);
await cache.update("foo", llmString, [{ text: "fizz" }]);
let cacheOutput = await cache.lookup("foo", llmString);
expect(cacheOutput).toEqual([{ text: "fizz" }]);
cacheOutput = await cache.lookup("bar", llmString);
expect(cacheOutput).toEqual(null);
await cache.clear(llmString);
});
it("test AzureCosmosDBNoSqlSemanticCache with dotProduct quantizedFlat", async () => {
const cache = await initializeCache("quantizedFlat", "dotproduct");
const model = new ChatOpenAI({ cache });
const llmString = JSON.stringify(model._identifyingParams);
await cache.update("foo", llmString, [{ text: "fizz" }]);
let cacheOutput = await cache.lookup("foo", llmString);
expect(cacheOutput).toEqual([{ text: "fizz" }]);
cacheOutput = await cache.lookup("bar", llmString);
expect(cacheOutput).toEqual(null);
await cache.clear(llmString);
});
it("test AzureCosmosDBNoSqlSemanticCache with dotProduct flat", async () => {
const cache = await initializeCache("flat", "cosine");
const model = new ChatOpenAI({ cache });
const llmString = JSON.stringify(model._identifyingParams);
await cache.update("foo", llmString, [{ text: "fizz" }]);
let cacheOutput = await cache.lookup("foo", llmString);
expect(cacheOutput).toEqual([{ text: "fizz" }]);
cacheOutput = await cache.lookup("bar", llmString);
expect(cacheOutput).toEqual(null);
await cache.clear(llmString);
});
it("test AzureCosmosDBNoSqlSemanticCache with euclidean quantizedFlat", async () => {
const cache = await initializeCache("quantizedFlat", "euclidean");
const model = new ChatOpenAI({ cache });
const llmString = JSON.stringify(model._identifyingParams);
await cache.update("foo", llmString, [{ text: "fizz" }]);
let cacheOutput = await cache.lookup("foo", llmString);
expect(cacheOutput).toEqual([{ text: "fizz" }]);
cacheOutput = await cache.lookup("bar", llmString);
expect(cacheOutput).toEqual(null);
await cache.clear(llmString);
});
it("test AzureCosmosDBNoSqlSemanticCache with euclidean flat", async () => {
const cache = await initializeCache("flat", "euclidean");
const model = new ChatOpenAI({ cache });
const llmString = JSON.stringify(model._identifyingParams);
await cache.update("foo", llmString, [{ text: "fizz" }]);
let cacheOutput = await cache.lookup("foo", llmString);
expect(cacheOutput).toEqual([{ text: "fizz" }]);
cacheOutput = await cache.lookup("bar", llmString);
expect(cacheOutput).toEqual(null);
await cache.clear(llmString);
});
it("test AzureCosmosDBNoSqlSemanticCache response according to similarity score", async () => {
const cache = await initializeCache("quantizedFlat", "cosine");
const model = new ChatOpenAI({ cache });
const response1 = await model.invoke(
"Where is the headquarter of Microsoft?"
);
console.log(response1.content);
// gives similarity score of 0.56 which is less than the threshold of 0.6. The cache
// will retun null which will allow the model to generate result.
const response2 = await model.invoke(
"List all Microsoft offices in India."
);
expect(response2.content).not.toEqual(response1.content);
console.log(response2.content);
// gives similarity score of .63 > 0.6
const response3 = await model.invoke("Tell me something about Microsoft");
expect(response3.content).toEqual(response1.content);
console.log(response3.content);
});
});
|
0 | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src/tests/azure_cosmosdb_mongodb.int.test.ts | /* eslint-disable no-process-env */
import { test, expect } from "@jest/globals";
import { MongoClient } from "mongodb";
import { Document } from "@langchain/core/documents";
import { OpenAIEmbeddings } from "@langchain/openai";
import { AzureCosmosDBMongoDBVectorStore } from "../azure_cosmosdb_mongodb.js";
const DATABASE_NAME = "langchain";
const COLLECTION_NAME = "test";
const INDEX_NAME = "vectorSearchIndex";
/*
* To run this test, you need have an Azure Cosmos DB for vCore instance
* running. You can deploy a free version on Azure Portal without any cost,
* following this guide:
* https://learn.microsoft.com/azure/cosmos-db/mongodb/vcore/quickstart-portal
*
* You do not need to create a database or collection, it will be created
* automatically by the test.
*
* Once you have the instance running, you need to set the following environment
* variables before running the test:
* - AZURE_COSMOSDB_MONGODB_CONNECTION_STRING
* - AZURE_OPENAI_API_KEY
* - AZURE_OPENAI_API_INSTANCE_NAME
* - AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME
* - AZURE_OPENAI_API_VERSION
*
* A regular OpenAI key can also be used instead of Azure OpenAI.
*/
describe("AzureCosmosDBMongoDBVectorStore", () => {
beforeEach(async () => {
expect(process.env.AZURE_COSMOSDB_MONGODB_CONNECTION_STRING).toBeDefined();
// Note: when using Azure OpenAI, you have to also set these variables
// in addition to the API key:
// - AZURE_OPENAI_API_INSTANCE_NAME
// - AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME
// - AZURE_OPENAI_API_VERSION
expect(
process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_API_KEY
).toBeDefined();
const client = new MongoClient(
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
process.env.AZURE_COSMOSDB_MONGODB_CONNECTION_STRING!
);
await client.connect();
const db = client.db(DATABASE_NAME);
const collection = await db.createCollection(COLLECTION_NAME);
// Make sure the database is empty
await collection.deleteMany({});
// Delete any existing index
try {
await collection.dropIndex(INDEX_NAME);
} catch {
// Ignore error if the index does not exist
}
await client.close();
});
test("performs similarity search", async () => {
const vectorStore = new AzureCosmosDBMongoDBVectorStore(
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
collectionName: COLLECTION_NAME,
indexName: INDEX_NAME,
indexOptions: {
numLists: 1,
},
}
);
expect(vectorStore).toBeDefined();
await vectorStore.addDocuments([
{ pageContent: "This book is about politics", metadata: { a: 1 } },
{ pageContent: "Cats sleeps a lot.", metadata: { b: 1 } },
{ pageContent: "Sandwiches taste good.", metadata: { c: 1 } },
{ pageContent: "The house is open", metadata: { d: 1, e: 2 } },
]);
const results: Document[] = await vectorStore.similaritySearch(
"sandwich",
1
);
expect(results.length).toEqual(1);
expect(results).toMatchObject([
{ pageContent: "Sandwiches taste good.", metadata: { c: 1 } },
]);
const retriever = vectorStore.asRetriever({});
const docs = await retriever.getRelevantDocuments("house");
expect(docs).toBeDefined();
expect(docs[0]).toMatchObject({
pageContent: "The house is open",
metadata: { d: 1, e: 2 },
});
await vectorStore.close();
});
test("performs max marginal relevance search", async () => {
const texts = ["foo", "foo", "fox"];
const vectorStore = await AzureCosmosDBMongoDBVectorStore.fromTexts(
texts,
{},
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
collectionName: COLLECTION_NAME,
indexName: INDEX_NAME,
indexOptions: {
numLists: 1,
},
}
);
const output = await vectorStore.maxMarginalRelevanceSearch("foo", {
k: 10,
fetchK: 20,
lambda: 0.1,
});
expect(output).toHaveLength(texts.length);
const actual = output.map((doc) => doc.pageContent);
const expected = ["foo", "fox", "foo"];
expect(actual).toEqual(expected);
const standardRetriever = await vectorStore.asRetriever();
const standardRetrieverOutput =
await standardRetriever.getRelevantDocuments("foo");
expect(output).toHaveLength(texts.length);
const standardRetrieverActual = standardRetrieverOutput.map(
(doc) => doc.pageContent
);
const standardRetrieverExpected = ["foo", "foo", "fox"];
expect(standardRetrieverActual).toEqual(standardRetrieverExpected);
const retriever = await vectorStore.asRetriever({
searchType: "mmr",
searchKwargs: {
fetchK: 20,
lambda: 0.1,
},
});
const retrieverOutput = await retriever.getRelevantDocuments("foo");
expect(output).toHaveLength(texts.length);
const retrieverActual = retrieverOutput.map((doc) => doc.pageContent);
const retrieverExpected = ["foo", "fox", "foo"];
expect(retrieverActual).toEqual(retrieverExpected);
const similarity = await vectorStore.similaritySearchWithScore("foo", 1);
expect(similarity.length).toBe(1);
await vectorStore.close();
});
test("deletes documents by id", async () => {
const vectorStore = new AzureCosmosDBMongoDBVectorStore(
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
collectionName: COLLECTION_NAME,
indexName: INDEX_NAME,
indexOptions: {
numLists: 1,
},
}
);
const ids = await vectorStore.addDocuments([
{ pageContent: "This book is about politics", metadata: { a: 1 } },
{
pageContent: "The is the house of parliament",
metadata: { d: 1, e: 2 },
},
]);
// Delete document matching specified ids
await vectorStore.delete({ ids: ids.slice(0, 1) });
const results = await vectorStore.similaritySearch("politics", 10);
expect(results.length).toEqual(1);
expect(results[0].pageContent).toEqual("The is the house of parliament");
await vectorStore.close();
});
test("deletes documents by filter", async () => {
const vectorStore = new AzureCosmosDBMongoDBVectorStore(
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
collectionName: COLLECTION_NAME,
indexName: INDEX_NAME,
indexOptions: {
numLists: 1,
},
}
);
await vectorStore.addDocuments([
{ pageContent: "This book is about politics", metadata: { a: 1 } },
{
pageContent: "The is the house of parliament",
metadata: { d: 1, e: 2 },
},
]);
// Delete document matching the filter
await vectorStore.delete({ filter: { a: 1 } });
const results = await vectorStore.similaritySearch("politics", 10);
expect(results.length).toEqual(1);
expect(results[0].pageContent).toEqual("The is the house of parliament");
await vectorStore.close();
});
test("deletes all documents", async () => {
const vectorStore = new AzureCosmosDBMongoDBVectorStore(
new OpenAIEmbeddings(),
{
databaseName: DATABASE_NAME,
collectionName: COLLECTION_NAME,
indexName: INDEX_NAME,
indexOptions: {
numLists: 1,
},
}
);
await vectorStore.addDocuments([
{ pageContent: "This book is about politics", metadata: { a: 1 } },
{
pageContent: "The is the house of parliament",
metadata: { d: 1, e: 2 },
},
]);
// Delete all documents
await vectorStore.delete();
const results = await vectorStore.similaritySearch("politics", 10);
expect(results.length).toEqual(0);
await vectorStore.close();
});
});
|
0 | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/src/tests/chat_histories.int.test.ts | /* eslint-disable no-promise-executor-return */
/* eslint-disable no-process-env */
import { expect } from "@jest/globals";
import { HumanMessage, AIMessage } from "@langchain/core/messages";
import { CosmosClient } from "@azure/cosmos";
import { DefaultAzureCredential } from "@azure/identity";
import { ObjectId } from "mongodb";
import { AzureCosmsosDBNoSQLChatMessageHistory } from "../chat_histories.js";
const DATABASE_NAME = "langchainTestDB";
const CONTAINER_NAME = "testContainer";
/*
* To run this test, you need have an Azure Cosmos DB for NoSQL instance
* running. You can deploy a free version on Azure Portal without any cost,
* following this guide:
* https://learn.microsoft.com/azure/cosmos-db/nosql/vector-search
*
* You do not need to create a database or collection, it will be created
* automatically by the test.
*
* Once you have the instance running, you need to set the following environment
* variables before running the test:
* - AZURE_COSMOSDB_NOSQL_CONNECTION_STRING or AZURE_COSMOSDB_NOSQL_ENDPOINT
*/
beforeEach(async () => {
let client: CosmosClient;
if (process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING) {
client = new CosmosClient(
process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING
);
} else if (process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT) {
client = new CosmosClient({
endpoint: process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT,
aadCredentials: new DefaultAzureCredential(),
});
} else {
throw new Error(
"Please set the environment variable AZURE_COSMOSDB_NOSQL_CONNECTION_STRING or AZURE_COSMOSDB_NOSQL_ENDPOINT"
);
}
try {
await client.database(DATABASE_NAME).delete();
} catch {
// Ignore error if the database does not exist
}
try {
await client.database("DbWithTTL").delete();
} catch {
// Ignore error if the database does not exist
}
});
test("Test CosmosDB History Store", async () => {
const input = {
sessionId: new ObjectId().toString(),
userId: new ObjectId().toString(),
databaseName: DATABASE_NAME,
containerName: CONTAINER_NAME,
};
const chatHistory = new AzureCosmsosDBNoSQLChatMessageHistory(input);
const blankResult = await chatHistory.getMessages();
expect(blankResult).toStrictEqual([]);
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
});
test("Test clear CosmosDB history Store", async () => {
const input = {
sessionId: new ObjectId().toString(),
userId: new ObjectId().toString(),
databaseName: DATABASE_NAME,
containerName: CONTAINER_NAME,
};
const chatHistory = new AzureCosmsosDBNoSQLChatMessageHistory(input);
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
await chatHistory.clear();
const blankResult = await chatHistory.getMessages();
expect(blankResult).toStrictEqual([]);
});
test("Test CosmosDB history with a TTL", async () => {
const input = {
sessionId: new ObjectId().toString(),
userId: new ObjectId().toString(),
databaseName: "DbWithTTL",
ttl: 5,
};
const chatHistory = new AzureCosmsosDBNoSQLChatMessageHistory(input);
await chatHistory.addUserMessage("Who is the best vocalist?");
await chatHistory.addAIMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const resultWithHistory = await chatHistory.getMessages();
expect(resultWithHistory).toEqual(expectedMessages);
await new Promise((resolve) => setTimeout(resolve, 6000));
const expiredResult = await chatHistory.getMessages();
expect(expiredResult).toStrictEqual([]);
});
test("Test clear all sessions for a user", async () => {
const input1 = {
sessionId: new Date().toISOString(),
userId: "user1",
databaseName: "DbWithTTL",
ttl: 5,
};
const chatHistory1 = new AzureCosmsosDBNoSQLChatMessageHistory(input1);
await chatHistory1.addUserMessage("Who is the best vocalist?");
await chatHistory1.addAIMessage("Ozzy Osbourne");
const input2 = {
sessionId: new Date().toISOString(),
userId: "user1",
databaseName: "DbWithTTL",
ttl: 5,
};
const chatHistory2 = new AzureCosmsosDBNoSQLChatMessageHistory(input2);
await chatHistory2.addUserMessage("Who is the best vocalist?");
await chatHistory2.addAIMessage("Ozzy Osbourne");
const expectedMessages = [
new HumanMessage("Who is the best vocalist?"),
new AIMessage("Ozzy Osbourne"),
];
const result1 = await chatHistory1.getMessages();
expect(result1).toEqual(expectedMessages);
const result2 = await chatHistory1.getMessages();
expect(result2).toEqual(expectedMessages);
await chatHistory1.clearAllSessions();
const deletedResult1 = await chatHistory1.getMessages();
const deletedResult2 = await chatHistory2.getMessages();
expect(deletedResult1).toStrictEqual([]);
expect(deletedResult2).toStrictEqual([]);
});
test("Test set context and get all sessions for a user", async () => {
const session1 = {
userId: "user1",
databaseName: DATABASE_NAME,
containerName: CONTAINER_NAME,
sessionId: new ObjectId().toString(),
};
const context1 = { title: "Best vocalist" };
const chatHistory1 = new AzureCosmsosDBNoSQLChatMessageHistory(session1);
await chatHistory1.setContext(context1);
await chatHistory1.addUserMessage("Who is the best vocalist?");
await chatHistory1.addAIMessage("Ozzy Osbourne");
const chatHistory2 = new AzureCosmsosDBNoSQLChatMessageHistory({
...session1,
sessionId: new ObjectId().toString(),
});
const context2 = { title: "Best guitarist" };
await chatHistory2.addUserMessage("Who is the best guitarist?");
await chatHistory2.addAIMessage("Jimi Hendrix");
await chatHistory2.setContext(context2);
const sessions = await chatHistory1.getAllSessions();
expect(sessions.length).toBe(2);
expect(sessions[0].context).toEqual(context1);
expect(sessions[1].context).toEqual(context2);
});
|
0 | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb | lc_public_repos/langchainjs/libs/langchain-azure-cosmosdb/scripts/jest-setup-after-env.js | import { awaitAllCallbacks } from "@langchain/core/callbacks/promises";
import { afterAll, jest } from "@jest/globals";
afterAll(awaitAllCallbacks);
// Allow console.log to be disabled in tests
if (process.env.DISABLE_CONSOLE_LOGS === "true") {
console.log = jest.fn();
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-yandex/tsconfig.json | {
"extends": "@tsconfig/recommended",
"compilerOptions": {
"outDir": "../dist",
"rootDir": "./src",
"target": "ES2021",
"lib": ["ES2021", "ES2022.Object", "DOM"],
"module": "ES2020",
"moduleResolution": "nodenext",
"esModuleInterop": true,
"declaration": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"useDefineForClassFields": true,
"strictPropertyInitialization": false,
"allowJs": true,
"strict": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "docs"]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-yandex/LICENSE | The MIT License
Copyright (c) 2023 LangChain
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. |
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-yandex/jest.config.cjs | /** @type {import('ts-jest').JestConfigWithTsJest} */
module.exports = {
preset: "ts-jest/presets/default-esm",
testEnvironment: "./jest.env.cjs",
modulePathIgnorePatterns: ["dist/", "docs/"],
moduleNameMapper: {
"^(\\.{1,2}/.*)\\.js$": "$1",
},
transform: {
"^.+\\.tsx?$": ["@swc/jest"],
},
transformIgnorePatterns: [
"/node_modules/",
"\\.pnp\\.[^\\/]+$",
"./scripts/jest-setup-after-env.js",
],
setupFiles: ["dotenv/config"],
testTimeout: 20_000,
passWithNoTests: true,
collectCoverageFrom: ["src/**/*.ts"],
};
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-yandex/jest.env.cjs | const { TestEnvironment } = require("jest-environment-node");
class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment {
constructor(config, context) {
// Make `instanceof Float32Array` return true in tests
// to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549
super(config, context);
this.global.Float32Array = Float32Array;
}
}
module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-yandex/README.md | # @langchain/yandex
This package contains the LangChain.js integrations for YandexGPT through their [Foundation Models REST API](https://cloud.yandex.ru/en/docs/yandexgpt/api-ref/v1/).
## Installation
```bash npm2yarn
npm install @langchain/yandex @langchain/core
```
## Setup your environment
First, you should [create a service account](https://cloud.yandex.com/en/docs/iam/operations/sa/create) with the `ai.languageModels.user` role.
Next, you have two authentication options:
- [IAM token](https://cloud.yandex.com/en/docs/iam/operations/iam-token/create-for-sa).
You can specify the token in a constructor parameter as `iam_token` or in an environment variable `YC_IAM_TOKEN`.
- [API key](https://cloud.yandex.com/en/docs/iam/operations/api-key/create)
You can specify the key in a constructor parameter as `api_key` or in an environment variable `YC_API_KEY`.
## Chat Models and LLM Models
This package contains the `ChatYandexGPT` and `YandexGPT` classes for working with the YandexGPT series of models.
To specify the model you can use `model_uri` parameter, see [the documentation](https://cloud.yandex.com/en/docs/yandexgpt/concepts/models#yandexgpt-generation) for more details.
By default, the latest version of `yandexgpt-lite` is used from the folder specified in the parameter `folder_id` or `YC_FOLDER_ID` environment variable.
### Examples
```typescript
import { ChatYandexGPT } from "@langchain/yandex";
import { HumanMessage, SystemMessage } from "@langchain/core/messages";
const chat = new ChatYandexGPT();
const response = await chat.invoke([
new SystemMessage(
"You are a helpful assistant that translates English to French."
),
new HumanMessage("I love programming."),
]);
```
```typescript
import { YandexGPT } from "@langchain/yandex";
const model = new YandexGPT();
const res = await model.invoke([`Translate "I love programming" into French.`]);
```
## Embeddings
This package also adds support for YandexGPT embeddings models.
To specify the model you can use `model_uri` parameter, see [the documentation](https://cloud.yandex.com/en/docs/yandexgpt/concepts/models#yandexgpt-embeddings) for more details.
By default, the latest version of `text-search-query` embeddings model is used from the folder specified in the parameter `folder_id` or `YC_FOLDER_ID` environment variable.
### Example
```typescript
import { YandexGPTEmbeddings } from "@langchain/yandex";
const model = new YandexGPTEmbeddings({});
/* Embed queries */
const res = await model.embedQuery(
"This is a test document."
);
/* Embed documents */
const documentRes = await model.embedDocuments(["This is a test document."]);
```
## Development
To develop the yandex package, you'll need to follow these instructions:
### Install dependencies
```bash
yarn install
```
### Build the package
```bash
yarn build
```
Or from the repo root:
```bash
yarn build --filter=@langchain/yandex
```
### Run tests
Test files should live within a `tests/` file in the `src/` folder. Unit tests should end in `.test.ts` and integration tests should
end in `.int.test.ts`:
```bash
$ yarn test:int
```
### Lint & Format
Run the linter & formatter to ensure your code is up to standard:
```bash
yarn lint && yarn format
```
### Adding new entrypoints
If you add a new file to be exported, either import & re-export from `src/index.ts`, or add it to the `entrypoints` field in the `config` variable located inside `langchain.config.js` and run `yarn build` to generate the new entrypoint.
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-yandex/.eslintrc.cjs | module.exports = {
extends: [
"airbnb-base",
"eslint:recommended",
"prettier",
"plugin:@typescript-eslint/recommended",
],
parserOptions: {
ecmaVersion: 12,
parser: "@typescript-eslint/parser",
project: "./tsconfig.json",
sourceType: "module",
},
plugins: ["@typescript-eslint", "no-instanceof"],
ignorePatterns: [
".eslintrc.cjs",
"scripts",
"node_modules",
"dist",
"dist-cjs",
"*.js",
"*.cjs",
"*.d.ts",
],
rules: {
"no-process-env": 2,
"no-instanceof/no-instanceof": 2,
"@typescript-eslint/explicit-module-boundary-types": 0,
"@typescript-eslint/no-empty-function": 0,
"@typescript-eslint/no-shadow": 0,
"@typescript-eslint/no-empty-interface": 0,
"@typescript-eslint/no-use-before-define": ["error", "nofunc"],
"@typescript-eslint/no-unused-vars": ["warn", { args: "none" }],
"@typescript-eslint/no-floating-promises": "error",
"@typescript-eslint/no-misused-promises": "error",
camelcase: 0,
"class-methods-use-this": 0,
"import/extensions": [2, "ignorePackages"],
"import/no-extraneous-dependencies": [
"error",
{ devDependencies: ["**/*.test.ts"] },
],
"import/no-unresolved": 0,
"import/prefer-default-export": 0,
"keyword-spacing": "error",
"max-classes-per-file": 0,
"max-len": 0,
"no-await-in-loop": 0,
"no-bitwise": 0,
"no-console": 0,
"no-restricted-syntax": 0,
"no-shadow": 0,
"no-continue": 0,
"no-void": 0,
"no-underscore-dangle": 0,
"no-use-before-define": 0,
"no-useless-constructor": 0,
"no-return-await": 0,
"consistent-return": 0,
"no-else-return": 0,
"func-names": 0,
"no-lonely-if": 0,
"prefer-rest-params": 0,
"new-cap": ["error", { properties: false, capIsNew: false }],
},
overrides: [
{
files: ['**/*.test.ts'],
rules: {
'@typescript-eslint/no-unused-vars': 'off'
}
}
]
};
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-yandex/langchain.config.js | import { resolve, dirname } from "node:path";
import { fileURLToPath } from "node:url";
/**
* @param {string} relativePath
* @returns {string}
*/
function abs(relativePath) {
return resolve(dirname(fileURLToPath(import.meta.url)), relativePath);
}
export const config = {
internals: [/node\:/, /@langchain\/core\//],
entrypoints: {
chat_models: "chat_models",
embeddings: "embeddings",
index: "index",
llms: "llms",
},
tsConfigPath: resolve("./tsconfig.json"),
cjsSource: "./dist-cjs",
cjsDestination: "./dist",
abs,
} |
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-yandex/package.json | {
"name": "@langchain/yandex",
"version": "0.1.0",
"description": "Yandex integration for LangChain.js",
"type": "module",
"engines": {
"node": ">=18"
},
"main": "./index.js",
"types": "./index.d.ts",
"repository": {
"type": "git",
"url": "git@github.com:langchain-ai/langchainjs.git"
},
"homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-yandex/",
"scripts": {
"build": "yarn turbo:command build:internal --filter=@langchain/yandex",
"build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking",
"lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/",
"lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
"lint": "yarn lint:eslint && yarn lint:dpdm",
"lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm",
"clean": "rm -rf .turbo dist/",
"prepack": "yarn build",
"test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
"test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
"test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000",
"test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
"format": "prettier --config .prettierrc --write \"src\"",
"format:check": "prettier --config .prettierrc --check \"src\""
},
"author": "LangChain",
"license": "MIT",
"peerDependencies": {
"@langchain/core": ">=0.2.21 <0.4.0"
},
"devDependencies": {
"@jest/globals": "^29.5.0",
"@langchain/core": "workspace:*",
"@langchain/scripts": ">=0.1.0 <0.2.0",
"@swc/core": "^1.3.90",
"@swc/jest": "^0.2.29",
"@tsconfig/recommended": "^1.0.3",
"@typescript-eslint/eslint-plugin": "^6.12.0",
"@typescript-eslint/parser": "^6.12.0",
"dotenv": "^16.3.1",
"dpdm": "^3.12.0",
"eslint": "^8.33.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-prettier": "^8.6.0",
"eslint-plugin-import": "^2.27.5",
"eslint-plugin-no-instanceof": "^1.0.1",
"eslint-plugin-prettier": "^4.2.1",
"jest": "^29.5.0",
"jest-environment-node": "^29.6.4",
"prettier": "^2.8.3",
"rollup": "^4.5.2",
"ts-jest": "^29.1.0",
"typescript": "<5.2.0"
},
"publishConfig": {
"access": "public"
},
"exports": {
"./chat_models": {
"types": {
"import": "./chat_models.d.ts",
"require": "./chat_models.d.cts",
"default": "./chat_models.d.ts"
},
"import": "./chat_models.js",
"require": "./chat_models.cjs"
},
"./embeddings": {
"types": {
"import": "./embeddings.d.ts",
"require": "./embeddings.d.cts",
"default": "./embeddings.d.ts"
},
"import": "./embeddings.js",
"require": "./embeddings.cjs"
},
".": {
"types": {
"import": "./index.d.ts",
"require": "./index.d.cts",
"default": "./index.d.ts"
},
"import": "./index.js",
"require": "./index.cjs"
},
"./llms": {
"types": {
"import": "./llms.d.ts",
"require": "./llms.d.cts",
"default": "./llms.d.ts"
},
"import": "./llms.js",
"require": "./llms.cjs"
},
"./package.json": "./package.json"
},
"files": [
"dist/",
"chat_models.cjs",
"chat_models.js",
"chat_models.d.ts",
"chat_models.d.cts",
"embeddings.cjs",
"embeddings.js",
"embeddings.d.ts",
"embeddings.d.cts",
"index.cjs",
"index.js",
"index.d.ts",
"index.d.cts",
"llms.cjs",
"llms.js",
"llms.d.ts",
"llms.d.cts"
]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-yandex/tsconfig.cjs.json | {
"extends": "./tsconfig.json",
"compilerOptions": {
"module": "commonjs",
"declaration": false
},
"exclude": ["node_modules", "dist", "docs", "**/tests"]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-yandex/turbo.json | {
"extends": ["//"],
"pipeline": {
"build": {
"outputs": ["**/dist/**"]
},
"build:internal": {
"dependsOn": ["^build:internal"]
}
}
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-yandex/.prettierrc | {
"$schema": "https://json.schemastore.org/prettierrc",
"printWidth": 80,
"tabWidth": 2,
"useTabs": false,
"semi": true,
"singleQuote": false,
"quoteProps": "as-needed",
"jsxSingleQuote": false,
"trailingComma": "es5",
"bracketSpacing": true,
"arrowParens": "always",
"requirePragma": false,
"insertPragma": false,
"proseWrap": "preserve",
"htmlWhitespaceSensitivity": "css",
"vueIndentScriptAndStyle": false,
"endOfLine": "lf"
}
|
0 | lc_public_repos/langchainjs/libs/langchain-yandex | lc_public_repos/langchainjs/libs/langchain-yandex/src/llms.ts | import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms";
const apiUrl =
"https://llm.api.cloud.yandex.net/foundationModels/v1/completion";
export interface YandexGPTInputs extends BaseLLMParams {
/**
* What sampling temperature to use.
* Should be a double number between 0 (inclusive) and 1 (inclusive).
*/
temperature?: number;
/**
* Maximum limit on the total number of tokens
* used for both the input prompt and the generated response.
*/
maxTokens?: number;
/** Model name to use. */
model?: string;
/** Model version to use. */
modelVersion?: string;
/** Model URI to use. */
modelURI?: string;
/**
* Yandex Cloud Folder ID
*/
folderID?: string;
/**
* Yandex Cloud Api Key for service account
* with the `ai.languageModels.user` role.
*/
apiKey?: string;
/**
* Yandex Cloud IAM token for service or user account
* with the `ai.languageModels.user` role.
*/
iamToken?: string;
}
export class YandexGPT extends LLM implements YandexGPTInputs {
lc_serializable = true;
static lc_name() {
return "YandexGPT";
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
apiKey: "YC_API_KEY",
iamToken: "YC_IAM_TOKEN",
folderID: "YC_FOLDER_ID",
};
}
temperature = 0.6;
maxTokens = 1700;
model = "yandexgpt-lite";
modelVersion = "latest";
modelURI?: string;
apiKey?: string;
iamToken?: string;
folderID?: string;
constructor(fields?: YandexGPTInputs) {
super(fields ?? {});
const apiKey = fields?.apiKey ?? getEnvironmentVariable("YC_API_KEY");
const iamToken = fields?.iamToken ?? getEnvironmentVariable("YC_IAM_TOKEN");
const folderID = fields?.folderID ?? getEnvironmentVariable("YC_FOLDER_ID");
if (apiKey === undefined && iamToken === undefined) {
throw new Error(
"Please set the YC_API_KEY or YC_IAM_TOKEN environment variable or pass it to the constructor as the apiKey or iamToken field."
);
}
this.modelURI = fields?.modelURI;
this.apiKey = apiKey;
this.iamToken = iamToken;
this.folderID = folderID;
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
this.temperature = fields?.temperature ?? this.temperature;
this.model = fields?.model ?? this.model;
this.modelVersion = fields?.modelVersion ?? this.modelVersion;
if (this.modelURI === undefined && folderID === undefined) {
throw new Error(
"Please set the YC_FOLDER_ID environment variable or pass Yandex GPT model URI to the constructor as the modelURI field."
);
}
if (!this.modelURI) {
this.modelURI = `gpt://${this.folderID}/${this.model}/${this.modelVersion}`;
}
}
_llmType() {
return "yandexgpt";
}
/** @ignore */
async _call(
prompt: string,
options: this["ParsedCallOptions"]
): Promise<string> {
// Hit the `generate` endpoint on the `large` model
return this.caller.callWithOptions({ signal: options.signal }, async () => {
const headers = {
"Content-Type": "application/json",
Authorization: "",
"x-folder-id": "",
};
if (this.apiKey !== undefined) {
headers.Authorization = `Api-Key ${this.apiKey}`;
} else {
headers.Authorization = `Bearer ${this.iamToken}`;
if (this.folderID !== undefined) {
headers["x-folder-id"] = this.folderID;
}
}
const bodyData = {
modelUri: this.modelURI,
completionOptions: {
temperature: this.temperature,
maxTokens: this.maxTokens,
},
messages: [{ role: "user", text: prompt }],
};
try {
const response = await fetch(apiUrl, {
method: "POST",
headers,
body: JSON.stringify(bodyData),
});
if (!response.ok) {
throw new Error(
`Failed to fetch ${apiUrl} from YandexGPT: ${response.status}`
);
}
const responseData = await response.json();
return responseData.result.alternatives[0].message.text;
} catch (error) {
throw new Error(`Failed to fetch ${apiUrl} from YandexGPT ${error}`);
}
});
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-yandex | lc_public_repos/langchainjs/libs/langchain-yandex/src/index.ts | export * from "./chat_models.js";
export * from "./llms.js";
export * from "./embeddings.js";
|
0 | lc_public_repos/langchainjs/libs/langchain-yandex | lc_public_repos/langchainjs/libs/langchain-yandex/src/chat_models.ts | import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { AIMessage, BaseMessage } from "@langchain/core/messages";
import { ChatResult, ChatGeneration } from "@langchain/core/outputs";
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { YandexGPTInputs } from "./llms.js";
const apiUrl =
"https://llm.api.cloud.yandex.net/foundationModels/v1/completion";
interface ParsedMessage {
role: string;
text: string;
}
function _parseChatHistory(history: BaseMessage[]): ParsedMessage[] {
const chatHistory: ParsedMessage[] = [];
for (const message of history) {
if (typeof message.content !== "string") {
throw new Error(
"ChatYandexGPT does not support non-string message content."
);
}
if ("content" in message) {
if (message._getType() === "human") {
chatHistory.push({ role: "user", text: message.content });
} else if (message._getType() === "ai") {
chatHistory.push({ role: "assistant", text: message.content });
} else if (message._getType() === "system") {
chatHistory.push({ role: "system", text: message.content });
}
}
}
return chatHistory;
}
/**
* @example
* ```typescript
* const chat = new ChatYandexGPT({});
* // The assistant is set to translate English to French.
* const res = await chat.invoke([
* new SystemMessage(
* "You are a helpful assistant that translates English to French."
* ),
* new HumanMessage("I love programming."),
* ]);
* ```
*/
export class ChatYandexGPT extends BaseChatModel {
apiKey?: string;
iamToken?: string;
temperature = 0.6;
maxTokens = 1700;
model = "yandexgpt-lite";
modelVersion = "latest";
modelURI?: string;
folderID?: string;
constructor(fields?: YandexGPTInputs) {
super(fields ?? {});
const apiKey = fields?.apiKey ?? getEnvironmentVariable("YC_API_KEY");
const iamToken = fields?.iamToken ?? getEnvironmentVariable("YC_IAM_TOKEN");
const folderID = fields?.folderID ?? getEnvironmentVariable("YC_FOLDER_ID");
if (apiKey === undefined && iamToken === undefined) {
throw new Error(
"Please set the YC_API_KEY or YC_IAM_TOKEN environment variable or pass it to the constructor as the apiKey or iamToken field."
);
}
this.modelURI = fields?.modelURI;
this.apiKey = apiKey;
this.iamToken = iamToken;
this.folderID = folderID;
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
this.temperature = fields?.temperature ?? this.temperature;
this.model = fields?.model ?? this.model;
this.modelVersion = fields?.modelVersion ?? this.modelVersion;
if (this.modelURI === undefined && folderID === undefined) {
throw new Error(
"Please set the YC_FOLDER_ID environment variable or pass Yandex GPT model URI to the constructor as the modelURI field."
);
}
if (!this.modelURI) {
this.modelURI = `gpt://${this.folderID}/${this.model}/${this.modelVersion}`;
}
}
_llmType() {
return "yandexgpt";
}
_combineLLMOutput?() {
return {};
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
apiKey: "YC_API_KEY",
iamToken: "YC_IAM_TOKEN",
folderID: "YC_FOLDER_ID",
};
}
/** @ignore */
async _generate(
messages: BaseMessage[],
options: this["ParsedCallOptions"],
_runManager?: CallbackManagerForLLMRun | undefined
): Promise<ChatResult> {
const messageHistory = _parseChatHistory(messages);
const headers = {
"Content-Type": "application/json",
Authorization: "",
"x-folder-id": "",
};
if (this.apiKey !== undefined) {
headers.Authorization = `Api-Key ${this.apiKey}`;
if (this.folderID !== undefined) {
headers["x-folder-id"] = this.folderID;
}
} else {
headers.Authorization = `Bearer ${this.iamToken}`;
}
const bodyData = {
modelUri: this.modelURI,
completionOptions: {
temperature: this.temperature,
maxTokens: this.maxTokens,
},
messages: messageHistory,
};
const response = await fetch(apiUrl, {
method: "POST",
headers,
body: JSON.stringify(bodyData),
signal: options?.signal,
});
if (!response.ok) {
throw new Error(
`Failed to fetch ${apiUrl} from YandexGPT: ${response.status}`
);
}
const responseData = await response.json();
const { result } = responseData;
const { text } = result.alternatives[0].message;
const { totalTokens } = result.usage;
const generations: ChatGeneration[] = [
{ text, message: new AIMessage(text) },
];
return {
generations,
llmOutput: { totalTokens },
};
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-yandex | lc_public_repos/langchainjs/libs/langchain-yandex/src/embeddings.ts | import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings";
const apiUrl =
"https://llm.api.cloud.yandex.net/foundationModels/v1/textEmbedding";
export interface YandexGPTEmbeddingsParams extends EmbeddingsParams {
/** Model name to use. */
model?: string;
/** Model version to use. */
modelVersion?: string;
/** Model version to use. */
/** Model URI to use. */
modelURI?: string;
/** Yandex Cloud Folder ID. */
folderID?: string;
/**
* Yandex Cloud Api Key for service account
* with the `ai.languageModels.user` role.
*/
apiKey?: string;
/**
* Yandex Cloud IAM token for service or user account
* with the `ai.languageModels.user` role.
*/
iamToken?: string;
}
/**
* Class for generating embeddings using the YandexGPT Foundation models API. Extends the
* Embeddings class and implements YandexGPTEmbeddings
*/
export class YandexGPTEmbeddings
extends Embeddings
implements YandexGPTEmbeddingsParams
{
model = "text-search-query";
modelVersion = "latest";
modelURI?: string;
apiKey?: string;
iamToken?: string;
folderID?: string;
constructor(fields?: YandexGPTEmbeddingsParams) {
super(fields ?? {});
const apiKey = fields?.apiKey ?? getEnvironmentVariable("YC_API_KEY");
const iamToken = fields?.iamToken ?? getEnvironmentVariable("YC_IAM_TOKEN");
const folderID = fields?.folderID ?? getEnvironmentVariable("YC_FOLDER_ID");
if (apiKey === undefined && iamToken === undefined) {
throw new Error(
"Please set the YC_API_KEY or YC_IAM_TOKEN environment variable or pass it to the constructor as the apiKey or iamToken field."
);
}
this.modelURI = fields?.modelURI;
this.apiKey = apiKey;
this.iamToken = iamToken;
this.folderID = folderID;
this.model = fields?.model ?? this.model;
this.modelVersion = fields?.modelVersion ?? this.modelVersion;
if (this.modelURI === undefined && folderID === undefined) {
throw new Error(
"Please set the YC_FOLDER_ID environment variable or pass Yandex GPT model URI to the constructor as the modelURI field."
);
}
if (!this.modelURI) {
this.modelURI = `emb://${this.folderID}/${this.model}/${this.modelVersion}`;
}
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
apiKey: "YC_API_KEY",
iamToken: "YC_IAM_TOKEN",
folderID: "YC_FOLDER_ID",
};
}
/**
* Method to generate embeddings for an array of documents.
* @param texts Array of documents to generate embeddings for.
* @returns Promise that resolves to a 2D array of embeddings for each document.
*/
async embedDocuments(texts: string[]): Promise<number[][]> {
return this.embeddingWithRetry(texts);
}
/**
* Method to generate an embedding for a single document. Calls the
* embedDocuments method with the document as the input.
* @param text Document to generate an embedding for.
* @returns Promise that resolves to an embedding for the document.
*/
async embedQuery(text: string): Promise<number[]> {
const data = await this.embedDocuments([text]);
return data[0];
}
/**
* Private method to make a request to the YandexGPT API to generate
* embeddings. Handles the retry logic and returns the embeddings from the API.
* @param {string | Array<string>} texts Array of documents to generate embeddings for.
* @returns {Promise<MistralAIEmbeddingsResult>} Promise that resolves to a 2D array of embeddings for each document.
*/
private async embeddingWithRetry(texts: string[]): Promise<number[][]> {
return this.caller.call(async () => {
const headers = {
"Content-Type": "application/json",
Authorization: "",
"x-folder-id": "",
};
if (this.apiKey !== undefined) {
headers.Authorization = `Api-Key ${this.apiKey}`;
} else {
headers.Authorization = `Bearer ${this.iamToken}`;
if (this.folderID !== undefined) {
headers["x-folder-id"] = this.folderID;
}
}
const embeddings: number[][] = [];
for (const text of texts) {
const bodyData = {
modelUri: this.modelURI,
text,
};
try {
const response = await fetch(apiUrl, {
method: "POST",
headers,
body: JSON.stringify(bodyData),
});
if (!response.ok) {
throw new Error(
`Failed to fetch ${apiUrl} from YandexGPT: ${response.status}`
);
}
const responseData = await response.json();
embeddings.push(responseData.embedding);
} catch (error) {
throw new Error(`Failed to fetch ${apiUrl} from YandexGPT ${error}`);
}
}
return embeddings;
});
}
}
|
0 | lc_public_repos/langchainjs/libs/langchain-yandex/src | lc_public_repos/langchainjs/libs/langchain-yandex/src/tests/chat_models.int.test.ts | import { test } from "@jest/globals";
import { ChatYandexGPT } from "../chat_models.js";
test("Test YandexGPT generation", async () => {
const model = new ChatYandexGPT({});
const res = await model?.generate([
[["human", `Translate "I love programming" into Korean.`]],
]);
expect(res).toBeTruthy();
});
|
0 | lc_public_repos/langchainjs/libs/langchain-yandex/src | lc_public_repos/langchainjs/libs/langchain-yandex/src/tests/embeddings.int.test.ts | import { test, expect } from "@jest/globals";
import { YandexGPTEmbeddings } from "../embeddings.js";
test("Test YandexGPTEmbeddings.embedQuery", async () => {
const embeddings = new YandexGPTEmbeddings({
maxRetries: 1,
});
const res = await embeddings.embedQuery("Hello world");
expect(typeof res[0]).toBe("number");
});
test("Test YandexGPTEmbeddings.embedDocuments", async () => {
const embeddings = new YandexGPTEmbeddings({
maxRetries: 1,
});
const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]);
expect(res).toHaveLength(2);
res.forEach((r) => {
expect(typeof r[0]).toBe("number");
});
});
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-cloudflare/tsconfig.json | {
"extends": "@tsconfig/recommended",
"compilerOptions": {
"outDir": "../dist",
"rootDir": "./src",
"target": "ES2021",
"lib": ["ES2021", "ES2022.Object", "DOM"],
"module": "ES2020",
"moduleResolution": "nodenext",
"esModuleInterop": true,
"declaration": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"useDefineForClassFields": true,
"strictPropertyInitialization": false,
"allowJs": true,
"strict": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "docs"]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-cloudflare/LICENSE | The MIT License
Copyright (c) 2023 LangChain
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. |
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-cloudflare/jest.config.cjs | /** @type {import('ts-jest').JestConfigWithTsJest} */
module.exports = {
preset: "ts-jest/presets/default-esm",
testEnvironment: "./jest.env.cjs",
modulePathIgnorePatterns: ["dist/", "docs/"],
moduleNameMapper: {
"^(\\.{1,2}/.*)\\.js$": "$1",
},
transform: {
"^.+\\.tsx?$": ["@swc/jest"],
},
transformIgnorePatterns: [
"/node_modules/",
"\\.pnp\\.[^\\/]+$",
"./scripts/jest-setup-after-env.js",
],
setupFiles: ["dotenv/config"],
testTimeout: 20_000,
passWithNoTests: true,
collectCoverageFrom: ["src/**/*.ts"],
};
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-cloudflare/jest.env.cjs | const { TestEnvironment } = require("jest-environment-node");
class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment {
constructor(config, context) {
// Make `instanceof Float32Array` return true in tests
// to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549
super(config, context);
this.global.Float32Array = Float32Array;
}
}
module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-cloudflare/README.md | # @langchain/cloudflare
This package contains the LangChain.js integrations for Cloudflare through their SDK.
## Installation
```bash npm2yarn
npm install @langchain/cloudflare @langchain/core
```
## Development
To develop the Cloudflare package, you'll need to follow these instructions:
### Install dependencies
```bash
yarn install
```
### Build the package
```bash
yarn build
```
Or from the repo root:
```bash
yarn build --filter=@langchain/cloudflare
```
### Run tests
Test files should live within a `tests/` file in the `src/` folder. Unit tests should end in `.test.ts` and integration tests should
end in `.int.test.ts`:
```bash
$ yarn test
$ yarn test:int
```
### Lint & Format
Run the linter & formatter to ensure your code is up to standard:
```bash
yarn lint && yarn format
```
### Adding new entrypoints
If you add a new file to be exported, either import & re-export from `src/index.ts`, or add it to the `entrypoints` field in the `config` variable located inside `langchain.config.js` and run `yarn build` to generate the new entrypoint.
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-cloudflare/.release-it.json | {
"github": {
"release": true,
"autoGenerate": true,
"tokenRef": "GITHUB_TOKEN_RELEASE"
},
"npm": {
"versionArgs": [
"--workspaces-update=false"
]
}
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-cloudflare/.eslintrc.cjs | module.exports = {
extends: [
"airbnb-base",
"eslint:recommended",
"prettier",
"plugin:@typescript-eslint/recommended",
],
parserOptions: {
ecmaVersion: 12,
parser: "@typescript-eslint/parser",
project: "./tsconfig.json",
sourceType: "module",
},
plugins: ["@typescript-eslint", "no-instanceof"],
ignorePatterns: [
".eslintrc.cjs",
"scripts",
"node_modules",
"dist",
"dist-cjs",
"*.js",
"*.cjs",
"*.d.ts",
],
rules: {
"no-process-env": 2,
"no-instanceof/no-instanceof": 2,
"@typescript-eslint/explicit-module-boundary-types": 0,
"@typescript-eslint/no-empty-function": 0,
"@typescript-eslint/no-shadow": 0,
"@typescript-eslint/no-empty-interface": 0,
"@typescript-eslint/no-use-before-define": ["error", "nofunc"],
"@typescript-eslint/no-unused-vars": ["warn", { args: "none" }],
"@typescript-eslint/no-floating-promises": "error",
"@typescript-eslint/no-misused-promises": "error",
camelcase: 0,
"class-methods-use-this": 0,
"import/extensions": [2, "ignorePackages"],
"import/no-extraneous-dependencies": [
"error",
{ devDependencies: ["**/*.test.ts"] },
],
"import/no-unresolved": 0,
"import/prefer-default-export": 0,
"keyword-spacing": "error",
"max-classes-per-file": 0,
"max-len": 0,
"no-await-in-loop": 0,
"no-bitwise": 0,
"no-console": 0,
"no-restricted-syntax": 0,
"no-shadow": 0,
"no-continue": 0,
"no-void": 0,
"no-underscore-dangle": 0,
"no-use-before-define": 0,
"no-useless-constructor": 0,
"no-return-await": 0,
"consistent-return": 0,
"no-else-return": 0,
"func-names": 0,
"no-lonely-if": 0,
"prefer-rest-params": 0,
"new-cap": ["error", { properties: false, capIsNew: false }],
},
overrides: [
{
files: ['**/*.test.ts'],
rules: {
'@typescript-eslint/no-unused-vars': 'off'
}
}
]
};
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-cloudflare/langchain.config.js | import { resolve, dirname } from "node:path";
import { fileURLToPath } from "node:url";
/**
* @param {string} relativePath
* @returns {string}
*/
function abs(relativePath) {
return resolve(dirname(fileURLToPath(import.meta.url)), relativePath);
}
export const config = {
internals: [/node\:/, /@langchain\/core\//, /@langchain\/langgraph\/web/],
entrypoints: {
index: "index"
},
tsConfigPath: resolve("./tsconfig.json"),
cjsSource: "./dist-cjs",
cjsDestination: "./dist",
abs,
} |
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-cloudflare/package.json | {
"name": "@langchain/cloudflare",
"version": "0.1.0",
"description": "Cloudflare integration for LangChain.js",
"type": "module",
"engines": {
"node": ">=18"
},
"main": "./index.js",
"types": "./index.d.ts",
"repository": {
"type": "git",
"url": "git@github.com:langchain-ai/langchainjs.git"
},
"homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-cloudflare/",
"scripts": {
"build": "yarn turbo:command build:internal --filter=@langchain/cloudflare",
"build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking",
"lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/",
"lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
"lint": "yarn lint:eslint && yarn lint:dpdm",
"lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm",
"clean": "rm -rf .turbo dist/",
"prepack": "yarn build",
"test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
"test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
"test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000",
"test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
"test:standard:unit": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.test.ts --testTimeout 100000 --maxWorkers=50%",
"test:standard:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
"test:standard": "yarn test:standard:unit && yarn test:standard:int",
"format": "prettier --config .prettierrc --write \"src\"",
"format:check": "prettier --config .prettierrc --check \"src\""
},
"author": "LangChain",
"license": "MIT",
"dependencies": {
"@langchain/core": ">=0.2.21 <0.4.0",
"uuid": "^10.0.0"
},
"devDependencies": {
"@cloudflare/workers-types": "^4.20240909.0",
"@jest/globals": "^29.5.0",
"@langchain/core": "workspace:*",
"@langchain/scripts": ">=0.1.0 <0.2.0",
"@langchain/standard-tests": "0.0.0",
"@swc/core": "^1.3.90",
"@swc/jest": "^0.2.29",
"@tsconfig/recommended": "^1.0.3",
"@types/uuid": "^9",
"@typescript-eslint/eslint-plugin": "^6.12.0",
"@typescript-eslint/parser": "^6.12.0",
"dotenv": "^16.3.1",
"dpdm": "^3.12.0",
"eslint": "^8.33.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-prettier": "^8.6.0",
"eslint-plugin-import": "^2.27.5",
"eslint-plugin-no-instanceof": "^1.0.1",
"eslint-plugin-prettier": "^4.2.1",
"jest": "^29.5.0",
"jest-environment-node": "^29.6.4",
"prettier": "^2.8.3",
"release-it": "^17.6.0",
"rollup": "^4.5.2",
"ts-jest": "^29.1.0",
"typescript": "<5.2.0"
},
"peerDependencies": {
"@langchain/core": ">=0.2.21 <0.4.0"
},
"publishConfig": {
"access": "public"
},
"exports": {
".": {
"types": {
"import": "./index.d.ts",
"require": "./index.d.cts",
"default": "./index.d.ts"
},
"import": "./index.js",
"require": "./index.cjs"
},
"./package.json": "./package.json"
},
"files": [
"dist/",
"index.cjs",
"index.js",
"index.d.ts",
"index.d.cts"
]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-cloudflare/tsconfig.cjs.json | {
"extends": "./tsconfig.json",
"compilerOptions": {
"module": "commonjs",
"declaration": false
},
"exclude": ["node_modules", "dist", "docs", "**/tests"]
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-cloudflare/turbo.json | {
"extends": ["//"],
"pipeline": {
"build": {
"outputs": ["**/dist/**"]
},
"build:internal": {
"dependsOn": ["^build:internal"]
}
}
}
|
0 | lc_public_repos/langchainjs/libs | lc_public_repos/langchainjs/libs/langchain-cloudflare/.prettierrc | {
"$schema": "https://json.schemastore.org/prettierrc",
"printWidth": 80,
"tabWidth": 2,
"useTabs": false,
"semi": true,
"singleQuote": false,
"quoteProps": "as-needed",
"jsxSingleQuote": false,
"trailingComma": "es5",
"bracketSpacing": true,
"arrowParens": "always",
"requirePragma": false,
"insertPragma": false,
"proseWrap": "preserve",
"htmlWhitespaceSensitivity": "css",
"vueIndentScriptAndStyle": false,
"endOfLine": "lf"
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.