index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/bedrock/web.ts
import { SignatureV4 } from "@smithy/signature-v4"; import { HttpRequest } from "@smithy/protocol-http"; import { EventStreamCodec } from "@smithy/eventstream-codec"; import { fromUtf8, toUtf8 } from "@smithy/util-utf8"; import { Sha256 } from "@aws-crypto/sha256-js"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { type BaseChatModelParams, BaseChatModel, LangSmithParams, BaseChatModelCallOptions, BindToolsInput, } from "@langchain/core/language_models/chat_models"; import { BaseLanguageModelInput, isOpenAITool, } from "@langchain/core/language_models/base"; import { Runnable } from "@langchain/core/runnables"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { AIMessageChunk, BaseMessage, AIMessage, ChatMessage, BaseMessageChunk, isAIMessage, } from "@langchain/core/messages"; import { ChatGeneration, ChatGenerationChunk, ChatResult, } from "@langchain/core/outputs"; import { isLangChainTool, isStructuredTool, } from "@langchain/core/utils/function_calling"; import { zodToJsonSchema } from "zod-to-json-schema"; import type { SerializedFields } from "../../load/map_keys.js"; import { BaseBedrockInput, BedrockLLMInputOutputAdapter, type CredentialType, } from "../../utils/bedrock/index.js"; import { _toolsInParams, isAnthropicTool, } from "../../utils/bedrock/anthropic.js"; type AnthropicTool = Record<string, unknown>; type BedrockChatToolType = BindToolsInput | AnthropicTool; const AWS_REGIONS = [ "us", "sa", "me", "il", "eu", "cn", "ca", "ap", "af", "us-gov", ]; const ALLOWED_MODEL_PROVIDERS = [ "ai21", "anthropic", "amazon", "cohere", "meta", "mistral", ]; const PRELUDE_TOTAL_LENGTH_BYTES = 4; function convertOneMessageToText( message: BaseMessage, humanPrompt: string, aiPrompt: string ): string { if (message._getType() === "human") { return `${humanPrompt} ${message.content}`; } else if (message._getType() === "ai") { return `${aiPrompt} ${message.content}`; } else if (message._getType() === "system") { return `${humanPrompt} <admin>${message.content}</admin>`; } else if (message._getType() === "function") { return `${humanPrompt} ${message.content}`; } else if (ChatMessage.isInstance(message)) { return `\n\n${ message.role[0].toUpperCase() + message.role.slice(1) }: {message.content}`; } throw new Error(`Unknown role: ${message._getType()}`); } export function convertMessagesToPromptAnthropic( messages: BaseMessage[], humanPrompt = "\n\nHuman:", aiPrompt = "\n\nAssistant:" ): string { const messagesCopy = [...messages]; if ( messagesCopy.length === 0 || messagesCopy[messagesCopy.length - 1]._getType() !== "ai" ) { messagesCopy.push(new AIMessage({ content: "" })); } return messagesCopy .map((message) => convertOneMessageToText(message, humanPrompt, aiPrompt)) .join(""); } /** * Function that converts an array of messages into a single string prompt * that can be used as input for a chat model. It delegates the conversion * logic to the appropriate provider-specific function. * @param messages Array of messages to be converted. * @param options Options to be used during the conversion. * @returns A string prompt that can be used as input for a chat model. */ export function convertMessagesToPrompt( messages: BaseMessage[], provider: string ): string { if (provider === "anthropic") { return convertMessagesToPromptAnthropic(messages); } throw new Error(`Provider ${provider} does not support chat.`); } function formatTools(tools: BedrockChatCallOptions["tools"]): AnthropicTool[] { if (!tools || !tools.length) { return []; } if (tools.every(isLangChainTool)) { return tools.map((tc) => ({ name: tc.name, description: tc.description, input_schema: zodToJsonSchema(tc.schema), })); } if (tools.every(isOpenAITool)) { return tools.map((tc) => ({ name: tc.function.name, description: tc.function.description, input_schema: tc.function.parameters, })); } if (tools.every(isAnthropicTool)) { return tools; } if ( tools.some(isStructuredTool) || tools.some(isOpenAITool) || tools.some(isAnthropicTool) ) { throw new Error( "All tools passed to BedrockChat must be of the same type." ); } throw new Error("Invalid tool format received."); } export interface BedrockChatCallOptions extends BaseChatModelCallOptions { tools?: BedrockChatToolType[]; } export interface BedrockChatFields extends Partial<BaseBedrockInput>, BaseChatModelParams {} /** * AWS Bedrock chat model integration. * * Setup: * Install `@langchain/community` and set the following environment variables: * * ```bash * npm install @langchain/openai * export AWS_REGION="your-aws-region" * export AWS_SECRET_ACCESS_KEY="your-aws-secret-access-key" * export AWS_ACCESS_KEY_ID="your-aws-access-key-id" * ``` * * ## [Constructor args](/classes/langchain_community_chat_models_bedrock.BedrockChat.html#constructor) * * ## [Runtime args](/interfaces/langchain_community_chat_models_bedrock_web.BedrockChatCallOptions.html) * * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc. * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below: * * ```typescript * // When calling `.bind`, call options should be passed via the first argument * const llmWithArgsBound = llm.bind({ * stop: ["\n"], * tools: [...], * }); * * // When calling `.bindTools`, call options should be passed via the second argument * const llmWithTools = llm.bindTools( * [...], * { * stop: ["stop on this token!"], * } * ); * ``` * * ## Examples * * <details open> * <summary><strong>Instantiate</strong></summary> * * ```typescript * import { BedrockChat } from '@langchain/community/chat_models/bedrock/web'; * * const llm = new BedrockChat({ * region: process.env.AWS_REGION, * maxRetries: 0, * model: "anthropic.claude-3-5-sonnet-20240620-v1:0", * temperature: 0, * maxTokens: undefined, * // other params... * }); * * // You can also pass credentials in explicitly: * const llmWithCredentials = new BedrockChat({ * region: process.env.BEDROCK_AWS_REGION, * model: "anthropic.claude-3-5-sonnet-20240620-v1:0", * credentials: { * secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!, * accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!, * }, * }); * ``` * </details> * * <br /> * * <details> * <summary><strong>Invoking</strong></summary> * * ```typescript * const input = `Translate "I love programming" into French.`; * * // Models also accept a list of chat messages or a formatted prompt * const result = await llm.invoke(input); * console.log(result); * ``` * * ```txt * AIMessage { * "content": "Here's the translation to French:\n\nJ'adore la programmation.", * "additional_kwargs": { * "id": "msg_bdrk_01HCZHa2mKbMZeTeHjLDd286" * }, * "response_metadata": { * "type": "message", * "role": "assistant", * "model": "claude-3-5-sonnet-20240620", * "stop_reason": "end_turn", * "stop_sequence": null, * "usage": { * "input_tokens": 25, * "output_tokens": 19 * } * }, * "tool_calls": [], * "invalid_tool_calls": [] * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Streaming Chunks</strong></summary> * * ```typescript * for await (const chunk of await llm.stream(input)) { * console.log(chunk); * } * ``` * * ```txt * AIMessageChunk { * "content": "", * "additional_kwargs": { * "id": "msg_bdrk_01RhFuGR9uJ2bj5GbdAma4y6" * }, * "response_metadata": { * "type": "message", * "role": "assistant", * "model": "claude-3-5-sonnet-20240620", * "stop_reason": null, * "stop_sequence": null * }, * } * AIMessageChunk { * "content": "J", * } * AIMessageChunk { * "content": "'adore la", * } * AIMessageChunk { * "content": " programmation.", * } * AIMessageChunk { * "content": "", * "additional_kwargs": { * "stop_reason": "end_turn", * "stop_sequence": null * }, * } * AIMessageChunk { * "content": "", * "response_metadata": { * "amazon-bedrock-invocationMetrics": { * "inputTokenCount": 25, * "outputTokenCount": 11, * "invocationLatency": 659, * "firstByteLatency": 506 * } * }, * "usage_metadata": { * "input_tokens": 25, * "output_tokens": 11, * "total_tokens": 36 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Aggregate Streamed Chunks</strong></summary> * * ```typescript * import { AIMessageChunk } from '@langchain/core/messages'; * import { concat } from '@langchain/core/utils/stream'; * * const stream = await llm.stream(input); * let full: AIMessageChunk | undefined; * for await (const chunk of stream) { * full = !full ? chunk : concat(full, chunk); * } * console.log(full); * ``` * * ```txt * AIMessageChunk { * "content": "J'adore la programmation.", * "additional_kwargs": { * "id": "msg_bdrk_017b6PuBybA51P5LZ9K6gZHm", * "stop_reason": "end_turn", * "stop_sequence": null * }, * "response_metadata": { * "type": "message", * "role": "assistant", * "model": "claude-3-5-sonnet-20240620", * "stop_reason": null, * "stop_sequence": null, * "amazon-bedrock-invocationMetrics": { * "inputTokenCount": 25, * "outputTokenCount": 11, * "invocationLatency": 1181, * "firstByteLatency": 1177 * } * }, * "usage_metadata": { * "input_tokens": 25, * "output_tokens": 11, * "total_tokens": 36 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Bind tools</strong></summary> * * ```typescript * import { z } from 'zod'; * import { AIMessage } from '@langchain/core/messages'; * * const GetWeather = { * name: "GetWeather", * description: "Get the current weather in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const GetPopulation = { * name: "GetPopulation", * description: "Get the current population in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]); * const aiMsg: AIMessage = await llmWithTools.invoke( * "Which city is hotter today and which is bigger: LA or NY?" * ); * console.log(aiMsg.tool_calls); * ``` * * ```txt * [ * { * name: 'GetWeather', * args: { location: 'Los Angeles, CA' }, * id: 'toolu_bdrk_01R2daqwHR931r4baVNzbe38', * type: 'tool_call' * }, * { * name: 'GetWeather', * args: { location: 'New York, NY' }, * id: 'toolu_bdrk_01WDadwNc7PGqVZvCN7Dr7eD', * type: 'tool_call' * }, * { * name: 'GetPopulation', * args: { location: 'Los Angeles, CA' }, * id: 'toolu_bdrk_014b8zLkpAgpxrPfewKinJFc', * type: 'tool_call' * }, * { * name: 'GetPopulation', * args: { location: 'New York, NY' }, * id: 'toolu_bdrk_01Tt8K2MUP15kNuMDFCLEFKN', * type: 'tool_call' * } * ] * ``` * </details> * * <br /> * * <details> * <summary><strong>Structured Output</strong></summary> * * ```typescript * const Joke = z.object({ * setup: z.string().describe("The setup of the joke"), * punchline: z.string().describe("The punchline to the joke"), * rating: z.number().optional().describe("How funny the joke is, from 1 to 10") * }).describe('Joke to tell user.'); * * const structuredLlm = llm.withStructuredOutput(Joke); * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats"); * console.log(jokeResult); * ``` * * ```txt * { * setup: "Why don't cats play poker in the jungle?", * punchline: 'Too many cheetahs!' * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Response Metadata</strong></summary> * * ```typescript * const aiMsgForResponseMetadata = await llm.invoke(input); * console.log(aiMsgForResponseMetadata.response_metadata); * ``` * * ```txt * "response_metadata": { * "type": "message", * "role": "assistant", * "model": "claude-3-5-sonnet-20240620", * "stop_reason": "end_turn", * "stop_sequence": null, * "usage": { * "input_tokens": 25, * "output_tokens": 19 * } * } * ``` * </details> */ export class BedrockChat extends BaseChatModel<BedrockChatCallOptions, AIMessageChunk> implements BaseBedrockInput { model = "amazon.titan-tg1-large"; modelProvider: string; region: string; credentials: CredentialType; temperature?: number | undefined = undefined; maxTokens?: number | undefined = undefined; fetchFn: typeof fetch; endpointHost?: string; /** @deprecated Use as a call option using .bind() instead. */ stopSequences?: string[]; modelKwargs?: Record<string, unknown>; codec: EventStreamCodec = new EventStreamCodec(toUtf8, fromUtf8); streaming = false; usesMessagesApi = false; lc_serializable = true; trace?: "ENABLED" | "DISABLED"; guardrailIdentifier = ""; guardrailVersion = ""; guardrailConfig?: { tagSuffix: string; streamProcessingMode: "SYNCHRONOUS" | "ASYNCHRONOUS"; }; get lc_aliases(): Record<string, string> { return { model: "model_id", region: "region_name", }; } get lc_secrets(): { [key: string]: string } | undefined { return { "credentials.accessKeyId": "AWS_ACCESS_KEY_ID", "credentials.secretAccessKey": "AWS_SECRET_ACCESS_KEY", "credentials.sessionToken": "AWS_SECRET_ACCESS_KEY", awsAccessKeyId: "AWS_ACCESS_KEY_ID", awsSecretAccessKey: "AWS_SECRET_ACCESS_KEY", awsSessionToken: "AWS_SESSION_TOKEN", }; } get lc_attributes(): SerializedFields | undefined { return { region: this.region }; } _identifyingParams(): Record<string, string> { return { model: this.model, }; } _llmType() { return "bedrock"; } static lc_name() { return "BedrockChat"; } constructor(fields?: BedrockChatFields) { const awsAccessKeyId = fields?.awsAccessKeyId ?? getEnvironmentVariable("AWS_ACCESS_KEY_ID"); const awsSecretAccessKey = fields?.awsSecretAccessKey ?? getEnvironmentVariable("AWS_SECRET_ACCESS_KEY"); const awsSessionToken = fields?.awsSessionToken ?? getEnvironmentVariable("AWS_SESSION_TOKEN"); let credentials = fields?.credentials; if (credentials === undefined) { if (awsAccessKeyId === undefined || awsSecretAccessKey === undefined) { throw new Error( "Please set your AWS credentials in the 'credentials' field or set env vars AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY, and optionally AWS_SESSION_TOKEN." ); } credentials = { accessKeyId: awsAccessKeyId, secretAccessKey: awsSecretAccessKey, sessionToken: awsSessionToken, }; } // eslint-disable-next-line no-param-reassign fields = { ...fields, awsAccessKeyId, awsSecretAccessKey, awsSessionToken }; super(fields); this.model = fields?.model ?? this.model; this.modelProvider = getModelProvider(this.model); if (!ALLOWED_MODEL_PROVIDERS.includes(this.modelProvider)) { throw new Error( `Unknown model provider: '${this.modelProvider}', only these are supported: ${ALLOWED_MODEL_PROVIDERS}` ); } const region = fields?.region ?? getEnvironmentVariable("AWS_DEFAULT_REGION"); if (!region) { throw new Error( "Please set the AWS_DEFAULT_REGION environment variable or pass it to the constructor as the region field." ); } this.region = region; this.credentials = credentials; this.temperature = fields?.temperature ?? this.temperature; this.maxTokens = fields?.maxTokens ?? this.maxTokens; this.fetchFn = fields?.fetchFn ?? fetch.bind(globalThis); this.endpointHost = fields?.endpointHost ?? fields?.endpointUrl; this.stopSequences = fields?.stopSequences; this.modelKwargs = fields?.modelKwargs; this.streaming = fields?.streaming ?? this.streaming; this.usesMessagesApi = canUseMessagesApi(this.model); this.trace = fields?.trace ?? this.trace; this.guardrailVersion = fields?.guardrailVersion ?? this.guardrailVersion; this.guardrailIdentifier = fields?.guardrailIdentifier ?? this.guardrailIdentifier; this.guardrailConfig = fields?.guardrailConfig; } override invocationParams(options?: this["ParsedCallOptions"]) { if (options?.tool_choice) { throw new Error( "'tool_choice' call option is not supported by BedrockChat." ); } return { tools: options?.tools ? formatTools(options.tools) : undefined, temperature: this.temperature, max_tokens: this.maxTokens, stop: options?.stop ?? this.stopSequences, modelKwargs: this.modelKwargs, guardrailConfig: this.guardrailConfig, }; } getLsParams(options: this["ParsedCallOptions"]): LangSmithParams { const params = this.invocationParams(options); return { ls_provider: "bedrock", ls_model_name: this.model, ls_model_type: "chat", ls_temperature: params.temperature ?? undefined, ls_max_tokens: params.max_tokens ?? undefined, ls_stop: options.stop, }; } async _generate( messages: BaseMessage[], options: Partial<this["ParsedCallOptions"]>, runManager?: CallbackManagerForLLMRun ): Promise<ChatResult> { if (this.streaming) { const stream = this._streamResponseChunks(messages, options, runManager); let finalResult: ChatGenerationChunk | undefined; for await (const chunk of stream) { if (finalResult === undefined) { finalResult = chunk; } else { finalResult = finalResult.concat(chunk); } } if (finalResult === undefined) { throw new Error( "Could not parse final output from Bedrock streaming call." ); } return { generations: [finalResult], llmOutput: finalResult.generationInfo, }; } return this._generateNonStreaming(messages, options, runManager); } async _generateNonStreaming( messages: BaseMessage[], options: Partial<this["ParsedCallOptions"]>, _runManager?: CallbackManagerForLLMRun ): Promise<ChatResult> { const service = "bedrock-runtime"; const endpointHost = this.endpointHost ?? `${service}.${this.region}.amazonaws.com`; const provider = this.modelProvider; const response = await this._signedFetch(messages, options, { bedrockMethod: "invoke", endpointHost, provider, }); const json = await response.json(); if (!response.ok) { throw new Error( `Error ${response.status}: ${json.message ?? JSON.stringify(json)}` ); } if (this.usesMessagesApi) { const outputGeneration = BedrockLLMInputOutputAdapter.prepareMessagesOutput(provider, json); if (outputGeneration === undefined) { throw new Error("Failed to parse output generation."); } return { generations: [outputGeneration], llmOutput: outputGeneration.generationInfo, }; } else { const text = BedrockLLMInputOutputAdapter.prepareOutput(provider, json); return { generations: [{ text, message: new AIMessage(text) }] }; } } async _signedFetch( messages: BaseMessage[], options: this["ParsedCallOptions"], fields: { bedrockMethod: "invoke" | "invoke-with-response-stream"; endpointHost: string; provider: string; } ) { const { bedrockMethod, endpointHost, provider } = fields; const { max_tokens, temperature, stop, modelKwargs, guardrailConfig, tools, } = this.invocationParams(options); const inputBody = this.usesMessagesApi ? BedrockLLMInputOutputAdapter.prepareMessagesInput( provider, messages, max_tokens, temperature, stop, modelKwargs, guardrailConfig, tools ) : BedrockLLMInputOutputAdapter.prepareInput( provider, convertMessagesToPromptAnthropic(messages), max_tokens, temperature, stop, modelKwargs, fields.bedrockMethod, guardrailConfig ); const url = new URL( `https://${endpointHost}/model/${this.model}/${bedrockMethod}` ); const request = new HttpRequest({ hostname: url.hostname, path: url.pathname, protocol: url.protocol, method: "POST", // method must be uppercase body: JSON.stringify(inputBody), query: Object.fromEntries(url.searchParams.entries()), headers: { // host is required by AWS Signature V4: https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html host: url.host, accept: "application/json", "content-type": "application/json", ...(this.trace && this.guardrailIdentifier && this.guardrailVersion && { "X-Amzn-Bedrock-Trace": this.trace, "X-Amzn-Bedrock-GuardrailIdentifier": this.guardrailIdentifier, "X-Amzn-Bedrock-GuardrailVersion": this.guardrailVersion, }), }, }); const signer = new SignatureV4({ credentials: this.credentials, service: "bedrock", region: this.region, sha256: Sha256, }); const signedRequest = await signer.sign(request); // Send request to AWS using the low-level fetch API const response = await this.caller.callWithOptions( { signal: options.signal }, async () => this.fetchFn(url, { headers: signedRequest.headers, body: signedRequest.body, method: signedRequest.method, }) ); return response; } async *_streamResponseChunks( messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): AsyncGenerator<ChatGenerationChunk> { const provider = this.modelProvider; const service = "bedrock-runtime"; const endpointHost = this.endpointHost ?? `${service}.${this.region}.amazonaws.com`; const bedrockMethod = provider === "anthropic" || provider === "cohere" || provider === "meta" || provider === "mistral" ? "invoke-with-response-stream" : "invoke"; const response = await this._signedFetch(messages, options, { bedrockMethod, endpointHost, provider, }); if (response.status < 200 || response.status >= 300) { throw Error( `Failed to access underlying url '${endpointHost}': got ${ response.status } ${response.statusText}: ${await response.text()}` ); } if ( provider === "anthropic" || provider === "cohere" || provider === "meta" || provider === "mistral" ) { const toolsInParams = _toolsInParams(options); const reader = response.body?.getReader(); const decoder = new TextDecoder(); for await (const chunk of this._readChunks(reader)) { const event = this.codec.decode(chunk); if ( (event.headers[":event-type"] !== undefined && event.headers[":event-type"].value !== "chunk") || event.headers[":content-type"].value !== "application/json" ) { throw Error(`Failed to get event chunk: got ${chunk}`); } const body = JSON.parse(decoder.decode(event.body)); if (body.message) { throw new Error(body.message); } if (body.bytes !== undefined) { const chunkResult = JSON.parse( decoder.decode( Uint8Array.from(atob(body.bytes), (m) => m.codePointAt(0) ?? 0) ) ); if (this.usesMessagesApi) { const chunk = BedrockLLMInputOutputAdapter.prepareMessagesOutput( provider, chunkResult, { // Content should _ONLY_ be coerced if tools are not in params // If they are, we need content to be of type MessageTypeComplex // so the tools can be passed through. coerceContentToString: !toolsInParams, } ); if (chunk === undefined) { continue; } if ( provider === "anthropic" && chunk.generationInfo?.usage !== undefined ) { // Avoid bad aggregation in chunks, rely on final Bedrock data delete chunk.generationInfo.usage; } const finalMetrics = chunk.generationInfo?.["amazon-bedrock-invocationMetrics"]; if ( finalMetrics != null && typeof finalMetrics === "object" && isAIMessage(chunk.message) ) { chunk.message.usage_metadata = { input_tokens: finalMetrics.inputTokenCount, output_tokens: finalMetrics.outputTokenCount, total_tokens: finalMetrics.inputTokenCount + finalMetrics.outputTokenCount, }; } if (isChatGenerationChunk(chunk)) { yield chunk; } // eslint-disable-next-line no-void void runManager?.handleLLMNewToken(chunk.text); } else { const text = BedrockLLMInputOutputAdapter.prepareOutput( provider, chunkResult ); yield new ChatGenerationChunk({ text, message: new AIMessageChunk({ content: text }), }); // eslint-disable-next-line no-void void runManager?.handleLLMNewToken(text); } } } } else { const json = await response.json(); const text = BedrockLLMInputOutputAdapter.prepareOutput(provider, json); yield new ChatGenerationChunk({ text, message: new AIMessageChunk({ content: text }), }); // eslint-disable-next-line no-void void runManager?.handleLLMNewToken(text); } } // eslint-disable-next-line @typescript-eslint/no-explicit-any _readChunks(reader: any) { function _concatChunks(a: Uint8Array, b: Uint8Array) { const newBuffer = new Uint8Array(a.length + b.length); newBuffer.set(a); newBuffer.set(b, a.length); return newBuffer; } function getMessageLength(buffer: Uint8Array) { if (buffer.byteLength < PRELUDE_TOTAL_LENGTH_BYTES) return 0; const view = new DataView( buffer.buffer, buffer.byteOffset, buffer.byteLength ); return view.getUint32(0, false); } return { async *[Symbol.asyncIterator]() { let readResult = await reader.read(); let buffer: Uint8Array = new Uint8Array(0); while (!readResult.done) { const chunk: Uint8Array = readResult.value; buffer = _concatChunks(buffer, chunk); let messageLength = getMessageLength(buffer); while ( buffer.byteLength >= PRELUDE_TOTAL_LENGTH_BYTES && buffer.byteLength >= messageLength ) { yield buffer.slice(0, messageLength); buffer = buffer.slice(messageLength); messageLength = getMessageLength(buffer); } readResult = await reader.read(); } }, }; } _combineLLMOutput() { return {}; } override bindTools( tools: BedrockChatToolType[], _kwargs?: Partial<this["ParsedCallOptions"]> ): Runnable< BaseLanguageModelInput, BaseMessageChunk, this["ParsedCallOptions"] > { const provider = this.modelProvider; if (provider !== "anthropic") { throw new Error( "Currently, tool calling through Bedrock is only supported for Anthropic models." ); } return this.bind({ tools: formatTools(tools), }); } } function isChatGenerationChunk( x?: ChatGenerationChunk | ChatGeneration ): x is ChatGenerationChunk { return ( x !== undefined && typeof (x as ChatGenerationChunk).concat === "function" ); } function canUseMessagesApi(model: string): boolean { const modelProviderName = getModelProvider(model); if ( modelProviderName === "anthropic" && !model.includes("claude-v2") && !model.includes("claude-instant-v1") ) { return true; } if (modelProviderName === "cohere") { if (model.includes("command-r-v1")) { return true; } if (model.includes("command-r-plus-v1")) { return true; } } return false; } function isInferenceModel(modelId: string): boolean { const parts = modelId.split("."); return AWS_REGIONS.some((region) => parts[0] === region); } function getModelProvider(modelId: string): string { const parts = modelId.split("."); if (isInferenceModel(modelId)) { return parts[1]; } else { return parts[0]; } } /** * @deprecated Use `BedrockChat` instead. */ export const ChatBedrock = BedrockChat;
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/bedrock/index.ts
import { defaultProvider, DefaultProviderInit, } from "@aws-sdk/credential-provider-node"; import type { BaseChatModelParams } from "@langchain/core/language_models/chat_models"; import { BaseBedrockInput } from "../../utils/bedrock/index.js"; import { BedrockChat as BaseBedrockChat } from "./web.js"; export interface BedrockChatFields extends Partial<BaseBedrockInput>, BaseChatModelParams, Partial<DefaultProviderInit> {} /** * AWS Bedrock chat model integration. * * Setup: * Install `@langchain/community` and set the following environment variables: * * ```bash * npm install @langchain/openai * export AWS_REGION="your-aws-region" * export AWS_SECRET_ACCESS_KEY="your-aws-secret-access-key" * export AWS_ACCESS_KEY_ID="your-aws-access-key-id" * ``` * * ## [Constructor args](/classes/langchain_community_chat_models_bedrock.BedrockChat.html#constructor) * * ## [Runtime args](/interfaces/langchain_community_chat_models_bedrock_web.BedrockChatCallOptions.html) * * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc. * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below: * * ```typescript * // When calling `.bind`, call options should be passed via the first argument * const llmWithArgsBound = llm.bind({ * stop: ["\n"], * tools: [...], * }); * * // When calling `.bindTools`, call options should be passed via the second argument * const llmWithTools = llm.bindTools( * [...], * { * stop: ["stop on this token!"], * } * ); * ``` * * ## Examples * * <details open> * <summary><strong>Instantiate</strong></summary> * * ```typescript * import { BedrockChat } from '@langchain/community/chat_models/bedrock'; * * const llm = new BedrockChat({ * region: process.env.BEDROCK_AWS_REGION, * maxRetries: 0, * model: "anthropic.claude-3-5-sonnet-20240620-v1:0", * temperature: 0, * maxTokens: undefined, * // other params... * }); * * // You can also pass credentials in explicitly: * const llmWithCredentials = new BedrockChat({ * region: process.env.BEDROCK_AWS_REGION, * model: "anthropic.claude-3-5-sonnet-20240620-v1:0", * credentials: { * secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!, * accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!, * }, * }); * ``` * </details> * * <br /> * * <details> * <summary><strong>Invoking</strong></summary> * * ```typescript * const messages = [ * { * type: "system" as const, * content: "You are a helpful translator. Translate the user sentence to French.", * }, * { * type: "human" as const, * content: "I love programming.", * }, * ]; * const result = await llm.invoke(messages); * console.log(result); * ``` * * ```txt * AIMessage { * "content": "Here's the translation to French:\n\nJ'adore la programmation.", * "additional_kwargs": { * "id": "msg_bdrk_01HCZHa2mKbMZeTeHjLDd286" * }, * "response_metadata": { * "type": "message", * "role": "assistant", * "model": "claude-3-5-sonnet-20240620", * "stop_reason": "end_turn", * "stop_sequence": null, * "usage": { * "input_tokens": 25, * "output_tokens": 19 * } * }, * "tool_calls": [], * "invalid_tool_calls": [] * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Streaming Chunks</strong></summary> * * ```typescript * for await (const chunk of await llm.stream(messages)) { * console.log(chunk); * } * ``` * * ```txt * AIMessageChunk { * "content": "", * "additional_kwargs": { * "id": "msg_bdrk_01RhFuGR9uJ2bj5GbdAma4y6" * }, * "response_metadata": { * "type": "message", * "role": "assistant", * "model": "claude-3-5-sonnet-20240620", * "stop_reason": null, * "stop_sequence": null * }, * } * AIMessageChunk { * "content": "J", * } * AIMessageChunk { * "content": "'adore la", * } * AIMessageChunk { * "content": " programmation.", * } * AIMessageChunk { * "content": "", * "additional_kwargs": { * "stop_reason": "end_turn", * "stop_sequence": null * }, * } * AIMessageChunk { * "content": "", * "response_metadata": { * "amazon-bedrock-invocationMetrics": { * "inputTokenCount": 25, * "outputTokenCount": 11, * "invocationLatency": 659, * "firstByteLatency": 506 * } * }, * "usage_metadata": { * "input_tokens": 25, * "output_tokens": 11, * "total_tokens": 36 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Aggregate Streamed Chunks</strong></summary> * * ```typescript * import { AIMessageChunk } from '@langchain/core/messages'; * import { concat } from '@langchain/core/utils/stream'; * * const stream = await llm.stream(messages); * let full: AIMessageChunk | undefined; * for await (const chunk of stream) { * full = !full ? chunk : concat(full, chunk); * } * console.log(full); * ``` * * ```txt * AIMessageChunk { * "content": "J'adore la programmation.", * "additional_kwargs": { * "id": "msg_bdrk_017b6PuBybA51P5LZ9K6gZHm", * "stop_reason": "end_turn", * "stop_sequence": null * }, * "response_metadata": { * "type": "message", * "role": "assistant", * "model": "claude-3-5-sonnet-20240620", * "stop_reason": null, * "stop_sequence": null, * "amazon-bedrock-invocationMetrics": { * "inputTokenCount": 25, * "outputTokenCount": 11, * "invocationLatency": 1181, * "firstByteLatency": 1177 * } * }, * "usage_metadata": { * "input_tokens": 25, * "output_tokens": 11, * "total_tokens": 36 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Bind tools</strong></summary> * * ```typescript * import { z } from 'zod'; * import { AIMessage } from '@langchain/core/messages'; * * const GetWeather = { * name: "GetWeather", * description: "Get the current weather in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const GetPopulation = { * name: "GetPopulation", * description: "Get the current population in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]); * const aiMsg: AIMessage = await llmWithTools.invoke( * "Which city is hotter today and which is bigger: LA or NY?" * ); * console.log(aiMsg.tool_calls); * ``` * * ```txt * [ * { * name: 'GetWeather', * args: { location: 'Los Angeles, CA' }, * id: 'toolu_bdrk_01R2daqwHR931r4baVNzbe38', * type: 'tool_call' * }, * { * name: 'GetWeather', * args: { location: 'New York, NY' }, * id: 'toolu_bdrk_01WDadwNc7PGqVZvCN7Dr7eD', * type: 'tool_call' * }, * { * name: 'GetPopulation', * args: { location: 'Los Angeles, CA' }, * id: 'toolu_bdrk_014b8zLkpAgpxrPfewKinJFc', * type: 'tool_call' * }, * { * name: 'GetPopulation', * args: { location: 'New York, NY' }, * id: 'toolu_bdrk_01Tt8K2MUP15kNuMDFCLEFKN', * type: 'tool_call' * } * ] * ``` * </details> * * <br /> * * <details> * <summary><strong>Structured Output</strong></summary> * * ```typescript * const Joke = z.object({ * setup: z.string().describe("The setup of the joke"), * punchline: z.string().describe("The punchline to the joke"), * rating: z.number().optional().describe("How funny the joke is, from 1 to 10") * }).describe('Joke to tell user.'); * * const structuredLlm = llm.withStructuredOutput(Joke); * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats"); * console.log(jokeResult); * ``` * * ```txt * { * setup: "Why don't cats play poker in the jungle?", * punchline: 'Too many cheetahs!' * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Response Metadata</strong></summary> * * ```typescript * const aiMsgForResponseMetadata = await llm.invoke(messages); * console.log(aiMsgForResponseMetadata.response_metadata); * ``` * * ```txt * "response_metadata": { * "type": "message", * "role": "assistant", * "model": "claude-3-5-sonnet-20240620", * "stop_reason": "end_turn", * "stop_sequence": null, * "usage": { * "input_tokens": 25, * "output_tokens": 19 * } * } * ``` * </details> */ export class BedrockChat extends BaseBedrockChat { static lc_name() { return "BedrockChat"; } constructor(fields?: BedrockChatFields) { const { profile, filepath, configFilepath, ignoreCache, mfaCodeProvider, roleAssumer, roleArn, webIdentityTokenFile, roleAssumerWithWebIdentity, ...rest } = fields ?? {}; super({ ...rest, credentials: rest?.credentials ?? defaultProvider({ profile, filepath, configFilepath, ignoreCache, mfaCodeProvider, roleAssumer, roleArn, webIdentityTokenFile, roleAssumerWithWebIdentity, }), }); } } export { convertMessagesToPromptAnthropic, convertMessagesToPrompt, } from "./web.js"; /** * @deprecated Use `BedrockChat` instead. */ export const ChatBedrock = BedrockChat;
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatbaiduwenxin.int.test.ts
import { test, expect } from "@jest/globals"; import { SystemMessage, HumanMessage } from "@langchain/core/messages"; import { ChatBaiduWenxin } from "../baiduwenxin.js"; interface TestConfig { modelName: string | undefined; config: { description?: string; temperature?: number; topP?: number; penaltyScore?: number; streaming?: boolean; callbacks?: Array<{ nrNewTokens?: number; streamedCompletion?: string; handleLLMNewToken?: (token: string) => Promise<void>; }>; }; system?: string; message?: string; shouldThrow?: boolean; } test.skip("Test chat.stream work fine", async () => { const chat = new ChatBaiduWenxin({ modelName: "ERNIE-Bot", }); const stream = await chat.stream( `Translate "I love programming" into Chinese.` ); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(0); }); const runTest = async ({ modelName, config, system = "", message = "Hello!", shouldThrow = false, }: TestConfig) => { const description = `Test ChatBaiduWenxin ${modelName || "default model"} ${ config.description || "" }`.trim(); let nrNewTokens = 0; let streamedCompletion = ""; if (config.streaming) { // eslint-disable-next-line no-param-reassign config.callbacks = [ { async handleLLMNewToken(token: string) { nrNewTokens += 1; streamedCompletion += token; }, }, ]; } test.skip(description, async () => { const chat = new ChatBaiduWenxin({ modelName, ...config, }); const messages = []; if (system) { messages.push(new SystemMessage(system)); } messages.push(new HumanMessage(message)); if (shouldThrow) { await expect(chat.invoke(messages)).rejects.toThrow(); return; } const res = await chat.invoke(messages); // console.log({ res }); if (config.streaming) { expect(nrNewTokens > 0).toBe(true); expect(res.text).toBe(streamedCompletion); } }); }; const testConfigs: TestConfig[] = [ { modelName: undefined, config: {} }, { modelName: "ERNIE-Bot", config: {} }, { modelName: "ERNIE-Bot", config: { description: "with temperature", temperature: 1 }, }, { modelName: "ERNIE-Bot", config: { description: "with topP", topP: 1 } }, { modelName: "ERNIE-Bot", config: { description: "with penaltyScore", penaltyScore: 1 }, }, { modelName: "ERNIE-Bot", config: { description: "in streaming mode", streaming: true, }, message: "您好,请讲个长笑话", }, { modelName: "ERNIE-Bot", config: { description: "illegal input should throw an error", temperature: 0, }, shouldThrow: true, }, { modelName: "ERNIE-Bot", config: { description: "illegal input in streaming mode should throw an error", streaming: true, temperature: 0, }, shouldThrow: true, }, { modelName: "ERNIE-Lite-8K", config: {} }, { modelName: "ERNIE-Lite-8K", config: { description: "in streaming mode", streaming: true, }, message: "您好,请讲个长笑话", }, { modelName: "ERNIE-Lite-8K", config: { description: "with system message", }, system: "你是一个说文言文的人", }, { modelName: "ERNIE-Bot-4", config: {}, }, { modelName: "ERNIE-Speed-8K", config: {}, }, { modelName: "ERNIE-Speed-128K", config: {}, }, ]; testConfigs.forEach((testConfig) => { // eslint-disable-next-line no-void void runTest(testConfig); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatbedrock.standard.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelIntegrationTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { BaseChatModelCallOptions } from "@langchain/core/language_models/chat_models"; import { BedrockChat } from "../bedrock/index.js"; class BedrockChatStandardIntegrationTests extends ChatModelIntegrationTests< BaseChatModelCallOptions, AIMessageChunk > { constructor() { const region = process.env.BEDROCK_AWS_REGION ?? "us-east-1"; super({ Cls: BedrockChat, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, supportsParallelToolCalls: true, constructorArgs: { region, model: "anthropic.claude-3-sonnet-20240229-v1:0", credentials: { secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY, accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID, }, }, }); } async testUsageMetadataStreaming() { this.skipTestMessage( "testUsageMetadataStreaming", "BedrockChat", "Streaming tokens is not currently supported." ); } async testUsageMetadata() { this.skipTestMessage( "testUsageMetadata", "BedrockChat", "Usage metadata tokens is not currently supported." ); } async testStreamTokensWithToolCalls() { this.skipTestMessage( "testStreamTokensWithToolCalls", "BedrockChat", "Flaky test with Bedrock not consistently returning tool calls. TODO: Fix prompting." ); } async testModelCanUseToolUseAIMessageWithStreaming() { this.skipTestMessage( "testModelCanUseToolUseAIMessageWithStreaming", "BedrockChat", "Flaky test with Bedrock not consistently returning tool calls. TODO: Fix prompting." ); } async testParallelToolCalling() { // Anthropic is very flaky when calling multiple tools in parallel. // Because of this, we pass `true` as the second arg to only verify // it can handle parallel tools in the message history. await super.testParallelToolCalling(undefined, true); } } const testClass = new BedrockChatStandardIntegrationTests(); test("BedrockChatStandardIntegrationTests", async () => { const testResults = await testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatbedrock.test.ts
/* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-non-null-assertion */ import { BedrockChat } from "../bedrock/web.js"; test("Test Bedrock identifying params", async () => { const region = "us-west-2"; const model = "anthropic.claude-3-sonnet-20240229-v1:0"; const bedrock = new BedrockChat({ maxTokens: 20, region, model, maxRetries: 0, trace: "ENABLED", guardrailIdentifier: "define", guardrailVersion: "DRAFT", credentials: { accessKeyId: "unused", secretAccessKey: "unused", sessionToken: "unused", }, }); expect(bedrock._identifyingParams()).toMatchObject({ model, }); }); test("Test Bedrock serialization", async () => { delete process.env.AWS_ACCESS_KEY_ID; delete process.env.AWS_SECRET_ACCESS_KEY; const bedrock = new BedrockChat({ region: "us-west-2", model: "anthropic.claude-3-sonnet-20240229-v1:0", credentials: { accessKeyId: "unused", secretAccessKey: "unused", sessionToken: "unused", }, }); expect(JSON.stringify(bedrock)).toEqual( `{"lc":1,"type":"constructor","id":["langchain","chat_models","bedrock","BedrockChat"],"kwargs":{"region_name":"us-west-2","model_id":"anthropic.claude-3-sonnet-20240229-v1:0","credentials":{"accessKeyId":{"lc":1,"type":"secret","id":["AWS_ACCESS_KEY_ID"]},"secretAccessKey":{"lc":1,"type":"secret","id":["AWS_SECRET_ACCESS_KEY"]},"sessionToken":{"lc":1,"type":"secret","id":["AWS_SECRET_ACCESS_KEY"]}}}}` ); }); test("Test Bedrock serialization from environment variables", async () => { process.env.AWS_ACCESS_KEY_ID = "foo"; process.env.AWS_SECRET_ACCESS_KEY = "bar"; const bedrock = new BedrockChat({ region: "us-west-2", model: "anthropic.claude-3-sonnet-20240229-v1:0", }); expect(JSON.stringify(bedrock)).toEqual( `{"lc":1,"type":"constructor","id":["langchain","chat_models","bedrock","BedrockChat"],"kwargs":{"region_name":"us-west-2","model_id":"anthropic.claude-3-sonnet-20240229-v1:0","aws_access_key_id":{"lc":1,"type":"secret","id":["AWS_ACCESS_KEY_ID"]},"aws_secret_access_key":{"lc":1,"type":"secret","id":["AWS_SECRET_ACCESS_KEY"]},"credentials":{"accessKeyId":{"lc":1,"type":"secret","id":["AWS_ACCESS_KEY_ID"]},"secretAccessKey":{"lc":1,"type":"secret","id":["AWS_SECRET_ACCESS_KEY"]}}}}` ); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatollama.standard.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelUnitTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { ChatOllama, ChatOllamaCallOptions } from "../ollama.js"; class ChatOllamaStandardUnitTests extends ChatModelUnitTests< ChatOllamaCallOptions, AIMessageChunk > { constructor() { super({ Cls: ChatOllama, chatModelHasToolCalling: false, chatModelHasStructuredOutput: false, constructorArgs: {}, }); } testChatModelInitApiKey() { this.skipTestMessage( "testChatModelInitApiKey", "ChatOllama", "API key not required." ); } } const testClass = new ChatOllamaStandardUnitTests(); test("ChatOllamaStandardUnitTests", () => { const testResults = testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatpremai.int.test.ts
import { describe, test, expect } from "@jest/globals"; import { ChatMessage, HumanMessage } from "@langchain/core/messages"; import { ChatPrem } from "../premai.js"; describe.skip("ChatPrem", () => { test("invoke", async () => { const chat = new ChatPrem(); const message = new HumanMessage("What color is the sky?"); const res = await chat.invoke([message]); expect(res.content.length).toBeGreaterThan(10); }); test("generate", async () => { const chat = new ChatPrem(); const message = new HumanMessage("Hello!"); const res = await chat.generate([[message]]); // console.log(JSON.stringify(res, null, 2)); expect(res.generations[0][0].text.length).toBeGreaterThan(10); }); test("custom messages", async () => { const chat = new ChatPrem(); const res = await chat.invoke([new ChatMessage("Hello!", "user")]); // console.log({ res }); expect(res.content.length).toBeGreaterThan(10); }); test("custom messages in streaming mode", async () => { const chat = new ChatPrem({ streaming: true }); const res = await chat.invoke([new ChatMessage("Hello!", "user")]); // console.log({ res }); expect(res.content.length).toBeGreaterThan(10); }); test("streaming", async () => { const chat = new ChatPrem(); const message = new HumanMessage("What color is the sky?"); const stream = await chat.stream([message]); let iters = 0; let finalRes = ""; for await (const chunk of stream) { iters += 1; finalRes += chunk.content; } // console.log({ finalRes, iters }); expect(iters).toBeGreaterThan(1); }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatfireworks.standard.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelIntegrationTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { ChatFireworks, ChatFireworksCallOptions } from "../fireworks.js"; class ChatFireworksStandardIntegrationTests extends ChatModelIntegrationTests< ChatFireworksCallOptions, AIMessageChunk > { constructor() { super({ Cls: ChatFireworks, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: { model: "accounts/fireworks/models/firefunction-v2", }, }); } async testToolMessageHistoriesListContent() { this.skipTestMessage( "testToolMessageHistoriesListContent", "ChatFireworks", "Not implemented." ); } } const testClass = new ChatFireworksStandardIntegrationTests(); test("ChatFireworksStandardIntegrationTests", async () => { const testResults = await testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatzhipuai.int.test.ts
import { test, expect } from "@jest/globals"; import { SystemMessage, HumanMessage } from "@langchain/core/messages"; import { ChatZhipuAI } from "../zhipuai.js"; interface TestConfig { modelName: string | undefined; config: { description?: string; temperature?: number; topP?: number; streaming?: boolean; callbacks?: Array<{ nrNewTokens?: number; streamedCompletion?: string; handleLLMNewToken?: (token: string) => Promise<void>; }>; }; system?: string; message?: string; shouldThrow?: boolean; } test.skip("Test chat.stream work fine", async () => { const chat = new ChatZhipuAI({ modelName: "glm-3-turbo", }); const stream = await chat.stream( `Translate "I love programming" into Chinese.` ); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } // console.log(chunks); expect(chunks.length).toBeGreaterThan(0); }); const runTest = async ({ modelName, config, system = "", message = "Hello!", shouldThrow = false, }: TestConfig) => { const description = `Test ChatZhipuAI ${modelName || "default model"} ${ config.description || "" }`.trim(); let nrNewTokens = 0; let streamedCompletion = ""; const passedConfig = { ...config }; if (passedConfig.streaming) { passedConfig.callbacks = [ { async handleLLMNewToken(token: string) { nrNewTokens += 1; streamedCompletion += token; }, }, ]; } test.skip(description, async () => { const chat = new ChatZhipuAI({ modelName, ...config, }); const messages = []; if (system) { messages.push(new SystemMessage(system)); } messages.push(new HumanMessage(message)); if (shouldThrow) { await expect(chat.invoke(messages)).rejects.toThrow(); return; } const res = await chat.invoke(messages, passedConfig); // console.log({ res }); if (passedConfig.streaming) { expect(nrNewTokens > 0).toBe(true); expect(res.content).toBe(streamedCompletion); } }); }; const testConfigs: TestConfig[] = [ { modelName: undefined, config: {} }, { modelName: "glm-3-turbo", config: {} }, { modelName: "glm-3-turbo", config: { description: "with temperature", temperature: 1 }, }, { modelName: "glm-3-turbo", config: { description: "with topP", topP: 1 } }, { modelName: "glm-3-turbo", config: { description: "with repetitionPenalty" }, }, { modelName: "glm-3-turbo", config: { description: "in streaming mode", streaming: true, }, message: "您好,请讲个长笑话", }, { modelName: "glm-3-turbo", config: { description: "illegal input should throw an error", temperature: 0, }, shouldThrow: true, }, { modelName: "glm-3-turbo", config: { description: "illegal input in streaming mode should throw an error", streaming: true, temperature: 0, }, shouldThrow: true, }, { modelName: "glm-4", config: {} }, { modelName: "glm-4", config: { description: "in streaming mode", streaming: true, }, message: "您好,请讲个长笑话", }, { modelName: "glm-4", config: { description: "with system message", }, system: "你是一个说文言文的人", }, { modelName: "glm-4", config: {}, }, ]; testConfigs.forEach((testConfig) => { void runTest(testConfig); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatalitongyi.int.test.ts
import { test, expect } from "@jest/globals"; import { SystemMessage, HumanMessage } from "@langchain/core/messages"; import { ChatAlibabaTongyi } from "../alibaba_tongyi.js"; interface TestConfig { modelName: string | undefined; config: { description?: string; temperature?: number; topP?: number; repetitionPenalty?: number; streaming?: boolean; callbacks?: Array<{ nrNewTokens?: number; streamedCompletion?: string; handleLLMNewToken?: (token: string) => Promise<void>; }>; }; system?: string; message?: string; shouldThrow?: boolean; } const runTest = async ({ modelName, config, system = "", message = "Hello!", shouldThrow = false, }: TestConfig) => { const description = `Test ChatAlibabaTongyi ${modelName || "default model"} ${ config.description || "" }`.trim(); let nrNewTokens = 0; let streamedCompletion = ""; const passedConfig = { ...config }; if (passedConfig.streaming) { passedConfig.callbacks = [ { async handleLLMNewToken(token: string) { nrNewTokens += 1; streamedCompletion += token; }, }, ]; } test.skip(description, async () => { const chat = new ChatAlibabaTongyi({ modelName, ...config, }); const messages = []; if (system) { messages.push(new SystemMessage(system)); } messages.push(new HumanMessage(message)); if (shouldThrow) { await expect(chat.invoke(messages)).rejects.toThrow(); return; } const res = await chat.invoke(messages); // console.log({ res }); // test streaming call const stream = await chat.stream( `Translate "I love programming" into Chinese.` ); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(0); if (passedConfig.streaming) { expect(nrNewTokens > 0).toBe(true); expect(res.text).toBe(streamedCompletion); } }); }; const testConfigs: TestConfig[] = [ { modelName: undefined, config: {} }, { modelName: "qwen-turbo", config: {} }, { modelName: "qwen-turbo", config: { description: "with temperature", temperature: 1 }, }, { modelName: "qwen-turbo", config: { description: "with topP", topP: 1 } }, { modelName: "qwen-turbo", config: { description: "with repetitionPenalty", repetitionPenalty: 1 }, }, { modelName: "qwen-turbo", config: { description: "in streaming mode", streaming: true, }, message: "您好,请讲个长笑话", }, { modelName: "qwen-turbo", config: { description: "illegal input should throw an error", temperature: 0, }, shouldThrow: true, }, { modelName: "qwen-turbo", config: { description: "illegal input in streaming mode should throw an error", streaming: true, temperature: 0, }, shouldThrow: true, }, { modelName: "qwen-plus", config: {} }, { modelName: "qwen-plus", config: { description: "in streaming mode", streaming: true, }, message: "您好,请讲个长笑话", }, { modelName: "qwen-plus", config: { description: "with system message", }, system: "你是一个说文言文的人", }, { modelName: "qwen-turbo-max", config: {}, }, ]; testConfigs.forEach((testConfig) => { void runTest(testConfig); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chattogetherai.standard.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelUnitTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { ChatTogetherAI, ChatTogetherAICallOptions } from "../togetherai.js"; class ChatTogetherAIStandardUnitTests extends ChatModelUnitTests< ChatTogetherAICallOptions, AIMessageChunk > { constructor() { super({ Cls: ChatTogetherAI, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: {}, }); process.env.TOGETHER_AI_API_KEY = "test"; } testChatModelInitApiKey() { // Unset the API key env var here so this test can properly check // the API key class arg. process.env.TOGETHER_AI_API_KEY = ""; super.testChatModelInitApiKey(); // Re-set the API key env var here so other tests can run properly. process.env.TOGETHER_AI_API_KEY = "test"; } } const testClass = new ChatTogetherAIStandardUnitTests(); test("ChatTogetherAIStandardUnitTests", () => { const testResults = testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatfireworks.int.test.ts
import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { describe, test } from "@jest/globals"; import { ChatMessage, HumanMessage } from "@langchain/core/messages"; import { PromptTemplate, ChatPromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts"; import { ChatFireworks } from "../fireworks.js"; describe.skip("ChatFireworks", () => { test("call", async () => { const chat = new ChatFireworks(); const message = new HumanMessage("Hello!"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([message]); // console.log({ res }); }); test("generate", async () => { const chat = new ChatFireworks(); const message = new HumanMessage("Hello!"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.generate([[message]]); // console.log(JSON.stringify(res, null, 2)); }); test("custom messages", async () => { const chat = new ChatFireworks(); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([new ChatMessage("Hello!", "user")]); // console.log(JSON.stringify(res, null, 2)); }); test("prompt templates", async () => { const chat = new ChatFireworks(); // PaLM doesn't support translation yet const systemPrompt = PromptTemplate.fromTemplate( "You are a helpful assistant who must always respond like a {job}." ); const chatPrompt = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(systemPrompt), HumanMessagePromptTemplate.fromTemplate("{text}"), ]); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ job: "pirate", text: "What would be a good company name a company that makes colorful socks?", }), ]); // console.log(responseA.generations); }); test("longer chain of messages", async () => { const chat = new ChatFireworks(); const chatPrompt = ChatPromptTemplate.fromMessages([ HumanMessagePromptTemplate.fromTemplate(`Hi, my name is Joe!`), AIMessagePromptTemplate.fromTemplate(`Nice to meet you, Joe!`), HumanMessagePromptTemplate.fromTemplate("{text}"), ]); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ text: "What did I just say my name was?", }), ]); // console.log(responseA.generations); }); test("Tool calling", async () => { const zodSchema = z .object({ location: z .string() .describe("The name of city to get the weather for."), }) .describe( "Get the weather of a specific location and return the temperature in Celsius." ); const chat = new ChatFireworks({ modelName: "accounts/fireworks/models/firefunction-v1", temperature: 0, }).bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: zodToJsonSchema(zodSchema), }, }, ], }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const result = await chat.invoke("What is the current weather in SF?"); // console.log(result); }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatnovita.int.test.ts
import { describe, test } from "@jest/globals"; import { ChatMessage, HumanMessage } from "@langchain/core/messages"; import { PromptTemplate, ChatPromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts"; import { ChatNovitaAI } from "../novita.js"; describe("ChatNovitaAI", () => { test("invoke", async () => { const chat = new ChatNovitaAI(); const message = new HumanMessage("Hello!"); const res = await chat.invoke([message]); expect(res.content.length).toBeGreaterThan(10); }); test("generate", async () => { const chat = new ChatNovitaAI(); const message = new HumanMessage("Hello!"); const res = await chat.generate([[message]]); expect(res.generations[0][0].text.length).toBeGreaterThan(10); }); test("custom messages", async () => { const chat = new ChatNovitaAI(); const res = await chat.invoke([new ChatMessage("Hello!", "user")]); expect(res.content.length).toBeGreaterThan(2); }); test("chaining", async () => { const chat = new ChatNovitaAI(); const prompt = ChatPromptTemplate.fromMessages([ [ "system", "You are a helpful assistant that translates {input_language} to {output_language}.", ], ["human", "{input}"], ]); const chain = prompt.pipe(chat); const response = await chain.invoke({ input_language: "English", output_language: "German", input: "I love programming.", }); expect(response.content.length).toBeGreaterThan(10); }); test("prompt templates", async () => { const chat = new ChatNovitaAI(); // PaLM doesn't support translation yet const systemPrompt = PromptTemplate.fromTemplate( "You are a helpful assistant who must always respond like a {job}." ); const chatPrompt = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(systemPrompt), HumanMessagePromptTemplate.fromTemplate("{text}"), ]); const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ job: "pirate", text: "What would be a good company name a company that makes colorful socks?", }), ]); expect(responseA.generations[0][0].text.length).toBeGreaterThan(10); }); test("longer chain of messages", async () => { const chat = new ChatNovitaAI(); const chatPrompt = ChatPromptTemplate.fromMessages([ HumanMessagePromptTemplate.fromTemplate(`Hi, my name is Joe!`), AIMessagePromptTemplate.fromTemplate(`Nice to meet you, Joe!`), HumanMessagePromptTemplate.fromTemplate("{text}"), ]); const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ text: "What did I just say my name was?", }), ]); expect(responseA.generations[0][0].text.length).toBeGreaterThan(10); }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatollama.int.test.ts
import { test } from "@jest/globals"; import * as fs from "node:fs/promises"; import { fileURLToPath } from "node:url"; import * as path from "node:path"; import { AIMessage, HumanMessage } from "@langchain/core/messages"; import { PromptTemplate } from "@langchain/core/prompts"; import { BytesOutputParser, StringOutputParser, } from "@langchain/core/output_parsers"; import { ChatOllama } from "../ollama.js"; test.skip("test call", async () => { const ollama = new ChatOllama({}); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const result = await ollama.invoke( "What is a good name for a company that makes colorful socks?" ); // console.log({ result }); }); test.skip("test call with callback", async () => { const ollama = new ChatOllama({ baseUrl: "http://localhost:11434", }); const tokens: string[] = []; const result = await ollama.invoke( "What is a good name for a company that makes colorful socks?", { callbacks: [ { handleLLMNewToken(token: string) { tokens.push(token); }, }, ], } ); expect(tokens.length).toBeGreaterThan(1); expect(result).toEqual(tokens.join("")); }); test.skip("test streaming call", async () => { const ollama = new ChatOllama({ baseUrl: "http://localhost:11434", }); const stream = await ollama.stream( `Translate "I love programming" into German.` ); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); }); test.skip("should abort the request", async () => { const ollama = new ChatOllama({ baseUrl: "http://localhost:11434", }); const controller = new AbortController(); await expect(() => { const ret = ollama.invoke("Respond with an extremely verbose response", { signal: controller.signal, }); controller.abort(); return ret; }).rejects.toThrow("This operation was aborted"); }); test.skip("Test multiple messages", async () => { const model = new ChatOllama({ baseUrl: "http://localhost:11434" }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke([ new HumanMessage({ content: "My name is Jonas" }), ]); // console.log({ res }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res2 = await model.invoke([ new HumanMessage("My name is Jonas"), new AIMessage( "Hello Jonas! It's nice to meet you. Is there anything I can help you with?" ), new HumanMessage("What did I say my name was?"), ]); // console.log({ res2 }); }); test.skip("should stream through with a bytes output parser", async () => { const TEMPLATE = `You are a pirate named Patchy. All responses must be extremely verbose and in pirate dialect. User: {input} AI:`; // Infer the input variables from the template const prompt = PromptTemplate.fromTemplate(TEMPLATE); const ollama = new ChatOllama({ model: "llama2", baseUrl: "http://127.0.0.1:11434", }); const outputParser = new BytesOutputParser(); const chain = prompt.pipe(ollama).pipe(outputParser); const stream = await chain.stream({ input: `Translate "I love programming" into German.`, }); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } // console.log(chunks.join("")); expect(chunks.length).toBeGreaterThan(1); }); test.skip("JSON mode", async () => { const TEMPLATE = `You are a pirate named Patchy. All responses must be in pirate dialect and in JSON format, with a property named "response" followed by the value. User: {input} AI:`; // Infer the input variables from the template const prompt = PromptTemplate.fromTemplate(TEMPLATE); const ollama = new ChatOllama({ model: "llama2", baseUrl: "http://127.0.0.1:11434", format: "json", }); const outputParser = new StringOutputParser(); const chain = prompt.pipe(ollama).pipe(outputParser); const res = await chain.invoke({ input: `Translate "I love programming" into German.`, }); expect(JSON.parse(res).response).toBeDefined(); }); test.skip("Test ChatOllama with an image", async () => { const __filename = fileURLToPath(import.meta.url); const __dirname = path.dirname(__filename); const imageData = await fs.readFile(path.join(__dirname, "/data/hotdog.jpg")); const chat = new ChatOllama({ model: "llava", baseUrl: "http://127.0.0.1:11434", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([ new HumanMessage({ content: [ { type: "text", text: "What is in this image?", }, { type: "image_url", image_url: `data:image/jpeg;base64,${imageData.toString("base64")}`, }, ], }), ]); // console.log({ res }); }); test.skip("test max tokens (numPredict)", async () => { const ollama = new ChatOllama({ numPredict: 10, }).pipe(new StringOutputParser()); const stream = await ollama.stream( "explain quantum physics to me in as many words as possible" ); let numTokens = 0; let response = ""; for await (const s of stream) { numTokens += 1; response += s; } // console.log({ numTokens, response }); // Ollama doesn't always stream back the exact number of tokens, so we // check for a number which is slightly above the `numPredict`. expect(numTokens).toBeLessThanOrEqual(12); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/minimax.int.test.ts
import { expect, test } from "@jest/globals"; import { ChatMessage, HumanMessage, SystemMessage, } from "@langchain/core/messages"; import { LLMResult } from "@langchain/core/outputs"; import { CallbackManager } from "@langchain/core/callbacks/manager"; import { ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts"; import { ChatMinimax } from "../minimax.js"; test.skip("Test ChatMinimax", async () => { const chat = new ChatMinimax({ modelName: "abab5.5-chat", botSetting: [ { bot_name: "MM Assistant", content: "MM Assistant is an AI Assistant developed by minimax.", }, ], }); const message = new HumanMessage("Hello!"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([message]); // console.log({ res }); }); test.skip("Test ChatMinimax with SystemChatMessage", async () => { const chat = new ChatMinimax(); const system_message = new SystemMessage("You are to chat with a user."); const message = new HumanMessage("Hello!"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([system_message, message]); // console.log({ res }); }); test.skip("Test ChatMinimax Generate", async () => { const chat = new ChatMinimax({ botSetting: [ { bot_name: "MM Assistant", content: "MM Assistant is an AI Assistant developed by minimax.", }, ], }); const message = new HumanMessage("Hello!"); const res = await chat.generate([[message], [message]]); expect(res.generations.length).toBe(2); for (const generation of res.generations) { expect(generation.length).toBe(1); for (const message of generation) { // console.log(message.text); expect(typeof message.text).toBe("string"); } } // console.log({ res }); }); test.skip("Test ChatMinimax Generate throws when one of the calls fails", async () => { const chat = new ChatMinimax({ botSetting: [ { bot_name: "MM Assistant", content: "MM Assistant is an AI Assistant developed by minimax.", }, ], }); const message = new HumanMessage("Hello!"); await expect(() => chat.generate([[message], [message]], { signal: AbortSignal.timeout(10), }) ).rejects.toThrow("TimeoutError: The operation was aborted due to timeout"); }); test.skip("Test ChatMinimax tokenUsage", async () => { let tokenUsage = { totalTokens: 0, }; const model = new ChatMinimax({ botSetting: [ { bot_name: "MM Assistant", content: "MM Assistant is an AI Assistant developed by minimax.", }, ], callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { tokenUsage = output.llmOutput?.tokenUsage; }, }), }); const message = new HumanMessage("Hello"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke([message]); // console.log({ res }); expect(tokenUsage.totalTokens).toBeGreaterThan(0); }); test.skip("Test ChatMinimax tokenUsage with a batch", async () => { let tokenUsage = { totalTokens: 0, }; const model = new ChatMinimax({ temperature: 0.01, botSetting: [ { bot_name: "MM Assistant", content: "MM Assistant is an AI Assistant developed by minimax.", }, ], callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { tokenUsage = output.llmOutput?.tokenUsage; }, }), }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.generate([ [new HumanMessage("Hello")], [new HumanMessage("Hi")], ]); // console.log({ tokenUsage }); // console.log(res); expect(tokenUsage.totalTokens).toBeGreaterThan(0); }); test.skip("Test ChatMinimax in streaming mode", async () => { let nrNewTokens = 0; let streamedCompletion = ""; const model = new ChatMinimax({ streaming: true, tokensToGenerate: 10, botSetting: [ { bot_name: "MM Assistant", content: "MM Assistant is an AI Assistant developed by minimax.", }, ], callbacks: [ { async handleLLMNewToken(token: string) { nrNewTokens += 1; streamedCompletion += token; }, }, ], }); const message = new HumanMessage("Hello!"); const result = await model.invoke([message]); // console.log(result); expect(nrNewTokens > 0).toBe(true); expect(result.content).toBe(streamedCompletion); }, 10000); test.skip("OpenAI Chat, docs, prompt templates", async () => { const chat = new ChatMinimax({ temperature: 0.01, tokensToGenerate: 10, }); const systemPrompt = PromptTemplate.fromTemplate( "You are a helpful assistant that translates {input_language} to {output_language}." ); const chatPrompt = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(systemPrompt), HumanMessagePromptTemplate.fromTemplate("{text}"), ]); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ input_language: "English", output_language: "French", text: "I love programming.", }), ]); // console.log(responseA.generations); }, 5000); test.skip("Test OpenAI with signal in call options", async () => { const model = new ChatMinimax({ tokensToGenerate: 5 }); const controller = new AbortController(); await expect(() => { const ret = model.invoke([new HumanMessage("Print hello world")], { signal: controller.signal, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); test.skip("Test OpenAI with specific roles in ChatMessage", async () => { const chat = new ChatMinimax({ tokensToGenerate: 10 }); const system_message = new ChatMessage( "You are to chat with a user.", "system" ); const user_message = new ChatMessage("Hello!", "user"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([system_message, user_message]); // console.log({ res }); }); test.skip("Function calling ", async () => { const weatherFunction = { name: "get_weather", description: " Get weather information.", parameters: { type: "object", properties: { location: { type: "string", description: " The location to get the weather", }, }, required: ["location"], }, }; const model = new ChatMinimax({ botSetting: [ { bot_name: "MM Assistant", content: "MM Assistant is an AI Assistant developed by minimax.", }, ], }).bind({ functions: [weatherFunction], }); const result = await model.invoke([ new HumanMessage({ content: " What is the weather like in NewYork tomorrow?", name: "I", }), ]); // console.log(result); expect(result.additional_kwargs.function_call?.name).toBe("get_weather"); }); test.skip("Test ChatMinimax Function calling ", async () => { const weatherFunction = { name: "get_weather", description: " Get weather information.", parameters: { type: "object", properties: { location: { type: "string", description: " The location to get the weather", }, }, required: ["location"], }, }; const model = new ChatMinimax({ botSetting: [ { bot_name: "MM Assistant", content: "MM Assistant is an AI Assistant developed by minimax.", }, ], }).bind({ functions: [weatherFunction], }); const result = await model.invoke([ new HumanMessage({ content: " What is the weather like in NewYork tomorrow?", name: "I", }), ]); // console.log(result); expect(result.additional_kwargs.function_call?.name).toBe("get_weather"); }); test.skip("Test ChatMinimax Glyph", async () => { const model = new ChatMinimax({ modelName: "abab5.5-chat", botSetting: [ { bot_name: "MM Assistant", content: "MM Assistant is an AI Assistant developed by minimax.", }, ], }).bind({ replyConstraints: { sender_type: "BOT", sender_name: "MM Assistant", glyph: { type: "raw", raw_glyph: "The translated text:{{gen 'content'}}", }, }, }); const messagesTemplate = ChatPromptTemplate.fromMessages([ HumanMessagePromptTemplate.fromTemplate( " Please help me translate the following sentence in English: {text}" ), ]); const messages = await messagesTemplate.formatMessages({ text: "你好" }); const result = await model.invoke(messages); // console.log(result); expect(result.content).toMatch(/The translated text:.*/); }); test.skip("Test ChatMinimax Plugins", async () => { const model = new ChatMinimax({ modelName: "abab5.5-chat", botSetting: [ { bot_name: "MM Assistant", content: "MM Assistant is an AI Assistant developed by minimax.", }, ], }).bind({ plugins: ["plugin_web_search"], }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const result = await model.invoke([ new HumanMessage({ content: " What is the weather like in NewYork tomorrow?", }), ]); // console.log(result); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/webllm.int.test.ts
// TODO: Fix for Node environments // import { ChatWebLLM, WebLLMInputs } from "../webllm.js"; // import * as webllm from "@mlc-ai/web-llm"; // jest.mock("@mlc-ai/web-llm", () => ({ // Engine: jest.fn().mockImplementation(() => ({ // reload: jest.fn().mockResolvedValue(undefined), // setInitProgressCallback: jest.fn(), // chat: { // completions: { // create: jest.fn().mockImplementation(() => { // const messages = [ // { // choices: [ // { // delta: { content: "Hello" }, // logprobs: null, // finish_reason: "complete", // }, // ], // }, // { // choices: [ // { // delta: { content: "How are you?" }, // logprobs: null, // finish_reason: "complete", // }, // ], // }, // ]; // return (async function* () { // for (let msg of messages) { // yield msg; // } // })(); // }), // }, // }, // })), // })); describe("ChatWebLLM Integration Tests", () => { // let chatWebLLM: ChatWebLLM; // let modelRecord = { model_id: "test-model" }; // beforeEach(() => { // const inputs: WebLLMInputs = { // modelRecord: modelRecord, // appConfig: {}, // chatOpts: {}, // }; // chatWebLLM = new ChatWebLLM(inputs); // }); test("ChatWebLLM initializes and processes messages correctly", async () => { // const options = {}; // Adjust options as necessary // const response = await chatWebLLM.invoke("Hello", options); // expect(response).toBe("Hello"); // expect(webllm.Engine).toHaveBeenCalled(); // expect(webllm.Engine().chat.completions.create).toHaveBeenCalledWith({ // stream: true, // messages: [{ role: "user", content: "Hello" }], // stop: null, // logprobs: true, // }); }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/ibm.test.ts
/* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-explicit-any */ import WatsonxAiMlVml_v1 from "@ibm-cloud/watsonx-ai/dist/watsonx-ai-ml/vml_v1.js"; import { ChatWatsonx, ChatWatsonxInput, WatsonxCallParams } from "../ibm.js"; import { authenticateAndSetInstance } from "../../utils/ibm.js"; const fakeAuthProp = { watsonxAIAuthType: "iam", watsonxAIApikey: "fake_key", }; export function getKey<K>(key: K): K { return key; } export const testProperties = ( instance: ChatWatsonx, testProps: ChatWatsonxInput, notExTestProps?: { [key: string]: any } ) => { const checkProperty = <T extends { [key: string]: any }>( testProps: T, instance: T, existing = true ) => { Object.keys(testProps).forEach((key) => { const keys = getKey<keyof T>(key); type Type = Pick<T, typeof keys>; if (typeof testProps[key as keyof T] === "object") checkProperty<Type>(testProps[key as keyof T], instance[key], existing); else { if (existing) expect(instance[key as keyof T]).toBe(testProps[key as keyof T]); else if (instance) expect(instance[key as keyof T]).toBeUndefined(); } }); }; checkProperty<typeof testProps>(testProps, instance); if (notExTestProps) checkProperty<typeof notExTestProps>(notExTestProps, instance, false); }; describe("LLM unit tests", () => { describe("Positive tests", () => { test("Test authentication function", () => { const instance = authenticateAndSetInstance({ version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, ...fakeAuthProp, }); expect(instance).toBeInstanceOf(WatsonxAiMlVml_v1); }); test("Test basic properties after init", async () => { const testProps = { model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", }; const instance = new ChatWatsonx({ ...testProps, ...fakeAuthProp }); testProperties(instance, testProps); }); test("Test methods after init", () => { const testProps: ChatWatsonxInput = { model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", }; const instance = new ChatWatsonx({ ...testProps, ...fakeAuthProp, }); expect(instance.getNumTokens).toBeDefined(); expect(instance._generate).toBeDefined(); expect(instance._streamResponseChunks).toBeDefined(); expect(instance.invocationParams).toBeDefined(); }); test("Test properties after init", async () => { const testProps: WatsonxCallParams & ChatWatsonxInput = { version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", model: "ibm/granite-13b-chat-v2", maxTokens: 100, temperature: 0.1, timeLimit: 10000, topP: 1, maxRetries: 3, maxConcurrency: 3, }; const instance = new ChatWatsonx({ ...testProps, ...fakeAuthProp }); testProperties(instance, testProps); }); }); describe("Negative tests", () => { test("Missing id", async () => { const testProps: ChatWatsonxInput = { model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, }; expect( () => new ChatWatsonx({ ...testProps, ...fakeAuthProp, }) ).toThrowError(); }); test("Missing other props", async () => { // @ts-expect-error Intentionally passing not enough parameters const testPropsProjectId: ChatWatsonxInput = { projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", }; expect( () => new ChatWatsonx({ ...testPropsProjectId, ...fakeAuthProp, }) ).toThrowError(); // @ts-expect-error Intentionally passing not enough parameters const testPropsServiceUrl: ChatWatsonxInput = { serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, }; expect( () => new ChatWatsonx({ ...testPropsServiceUrl, ...fakeAuthProp, }) ).toThrowError(); const testPropsVersion = { version: "2024-05-31", }; expect( () => new ChatWatsonx({ // @ts-expect-error Intentionally passing wrong type of an object testPropsVersion, }) ).toThrowError(); }); test("Passing more than one id", async () => { const testProps: ChatWatsonxInput = { model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", spaceId: process.env.WATSONX_AI_PROJECT_ID || "testString", }; expect( () => new ChatWatsonx({ ...testProps, ...fakeAuthProp, }) ).toThrowError(); }); test("Not existing property passed", async () => { const testProps = { model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", }; const notExTestProps = { notExisting: 12, notExObj: { notExProp: 12, }, }; const instance = new ChatWatsonx({ ...testProps, ...notExTestProps, ...fakeAuthProp, }); testProperties(instance, testProps, notExTestProps); }); }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatfireworks-agent.int.test.ts
// import { test, expect } from "@jest/globals"; // import { ChatPromptTemplate } from "@langchain/core/prompts"; // import { AgentExecutor, createToolCallingAgent } from "langchain/agents"; // import { TavilySearchResults } from "../../tools/tavily_search.js"; // import { Calculator } from "../../tools/calculator.js"; // import { ChatFireworks } from "../fireworks.js"; // const tool = new TavilySearchResults({ maxResults: 1 }); // tool.description = tool.description += " You can also use this tool to check the current weather."; // const tools = [tool, new Calculator()]; // TODO: This test breaks CI build due to dependencies. Figure out a way around it. test("createToolCallingAgent works", async () => { // const prompt = ChatPromptTemplate.fromMessages([ // ["system", "You are a helpful assistant. Use tools as often as possible"], // ["placeholder", "{chat_history}"], // ["human", "{input}"], // ["placeholder", "{agent_scratchpad}"], // ]); // const llm = new ChatFireworks({ // model: "accounts/fireworks/models/firefunction-v1", // temperature: 0, // }); // const agent = await createToolCallingAgent({ // llm, // tools, // prompt, // }); // const agentExecutor = new AgentExecutor({ // agent, // tools, // }); // const input = "What is the current weather in SF?"; // const result = await agentExecutor.invoke({ // input, // }); // console.log(result); // expect(result.input).toBe(input); // expect(typeof result.output).toBe("string"); // // Length greater than 10 because any less than that would warrant // // an investigation into why such a short generation was returned. // expect(result.output.length).toBeGreaterThan(10); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chattogetherai.standard.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelIntegrationTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { ChatTogetherAI, ChatTogetherAICallOptions } from "../togetherai.js"; class ChatTogetherAIStandardIntegrationTests extends ChatModelIntegrationTests< ChatTogetherAICallOptions, AIMessageChunk > { constructor() { super({ Cls: ChatTogetherAI, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: {}, }); } } const testClass = new ChatTogetherAIStandardIntegrationTests(); test("ChatTogetherAIStandardIntegrationTests", async () => { const testResults = await testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/ibm.standard.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelUnitTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { LangSmithParams } from "@langchain/core/language_models/chat_models"; import { ChatWatsonx, ChatWatsonxInput, WatsonxCallOptionsChat, WatsonxCallParams, } from "../ibm.js"; import { WatsonxAuth } from "../../types/ibm.js"; class ChatWatsonxStandardTests extends ChatModelUnitTests< WatsonxCallOptionsChat, AIMessageChunk, ChatWatsonxInput & WatsonxAuth & Partial<Omit<WatsonxCallParams, "tool_choice">> > { constructor() { super({ Cls: ChatWatsonx, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: { model: "mistralai/mistral-large", watsonxAIApikey: "testString", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", watsonxAIAuthType: "iam", }, }); } expectedLsParams(): Partial<LangSmithParams> { console.warn( "ChatWatsonx does not support stop sequences. Overwrite params." ); return { ls_provider: "watsonx", ls_model_name: "string", ls_model_type: "chat", ls_temperature: 0, ls_max_tokens: 0, }; } } const testClass = new ChatWatsonxStandardTests(); test("ChatWatsonxStandardTests", () => { const testResults = testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatfriendli.int.test.ts
import { test } from "@jest/globals"; import { HumanMessage } from "@langchain/core/messages"; import { ChatFriendli } from "../friendli.js"; describe.skip("ChatFriendli", () => { test("call", async () => { const chatFriendli = new ChatFriendli({ maxTokens: 20 }); const message = new HumanMessage("1 + 1 = "); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chatFriendli.invoke([message]); // console.log({ res }); }); test("generate", async () => { const chatFriendli = new ChatFriendli({ maxTokens: 20 }); const message = new HumanMessage("1 + 1 = "); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chatFriendli.generate([[message]]); // console.log(JSON.stringify(res, null, 2)); }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatbedrock.int.test.ts
// libs/langchain-community/src/chat_models/tests/chatbedrock.int.test.ts /* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-non-null-assertion */ import { test, expect } from "@jest/globals"; import { HumanMessage } from "@langchain/core/messages"; import { AgentExecutor, createToolCallingAgent } from "langchain/agents"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { concat } from "@langchain/core/utils/stream"; import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { ChatOpenAI } from "@langchain/openai"; import { BedrockChat as BedrockChatWeb } from "../bedrock/web.js"; import { TavilySearchResults } from "../../tools/tavily_search.js"; void testChatModel( "Test Bedrock chat model Generating search queries: Command-r", "us-west-2", "cohere.command-r-v1:0", "Who is more popular: Nsync or Backstreet Boys?", { search_queries_only: true, } ); void testChatModel( "Test Bedrock chat model: Command-r", "us-west-2", "cohere.command-r-v1:0", "What is your name?", {} ); void testChatModel( "Test Bedrock chat model: Command-r", "us-west-2", "cohere.command-r-v1:0", "What are the characteristics of the emperor penguin?", { documents: [ { title: "Tall penguins", snippet: "Emperor penguins are the tallest." }, { title: "Penguin habitats", snippet: "Emperor penguins only live in Antarctica.", }, ], } ); void testChatStreamingModel( "Test Bedrock chat model streaming: Command-r", "us-west-2", "cohere.command-r-v1:0", "What is your name and something about yourself?", {} ); void testChatStreamingModel( "Test Bedrock chat model streaming: Command-r", "us-west-2", "cohere.command-r-v1:0", "What are the characteristics of the emperor penguin?", { documents: [ { title: "Tall penguins", snippet: "Emperor penguins are the tallest." }, { title: "Penguin habitats", snippet: "Emperor penguins only live in Antarctica.", }, ], } ); void testChatHandleLLMNewToken( "Test Bedrock chat model HandleLLMNewToken: Command-r", "us-west-2", "cohere.command-r-v1:0", "What is your name and something about yourself?" ); void testChatModel( "Test Bedrock chat model: Mistral-7b-instruct", "us-west-2", "mistral.mistral-7b-instruct-v0:2", "What is your name?", {} ); void testChatStreamingModel( "Test Bedrock chat model streaming: Mistral-7b-instruct", "us-west-2", "mistral.mistral-7b-instruct-v0:2", "What is your name and something about yourself?", {} ); void testChatHandleLLMNewToken( "Test Bedrock chat model HandleLLMNewToken: Mistral-7b-instruct", "us-west-2", "mistral.mistral-7b-instruct-v0:2", "What is your name and something about yourself?" ); void testChatModel( "Test Bedrock chat model: Claude-3", "us-west-2", "anthropic.claude-3-sonnet-20240229-v1:0", "What is your name?", {} // "ENABLED", // "<your-guardrail-id>", // "DRAFT", // { tagSuffix: "test", streamProcessingMode: "SYNCHRONOUS" } ); void testChatStreamingModel( "Test Bedrock chat model streaming: Claude-3", "us-west-2", "anthropic.claude-3-sonnet-20240229-v1:0", "What is your name and something about yourself?", {} // "ENABLED", // "<your-guardrail-id>", // "DRAFT", // { tagSuffix: "test", streamProcessingMode: "SYNCHRONOUS" } ); void testChatHandleLLMNewToken( "Test Bedrock chat model HandleLLMNewToken: Claude-3", "us-west-2", "anthropic.claude-3-sonnet-20240229-v1:0", "What is your name and something about yourself?" // "ENABLED", // "<your-guardrail-id>", // "DRAFT", // { tagSuffix: "test", streamProcessingMode: "SYNCHRONOUS" } ); /** * Tests a BedrockChat model * @param title The name of the test to run * @param defaultRegion The AWS region to default back to if not set via environment * @param model The model string to test * @param message The prompt test to send to the LLM * @param modelKwargs Optional guardrail configuration * @param trace Optional trace setting * @param guardrailIdentifier Optional guardrail identifier * @param guardrailVersion Optional guardrail version * @param guardrailConfig Optional guardrail configuration */ async function testChatModel( title: string, defaultRegion: string, model: string, message: string, modelKwargs?: Record<string, unknown>, trace?: "ENABLED" | "DISABLED", guardrailIdentifier?: string, guardrailVersion?: string, guardrailConfig?: { tagSuffix: string; streamProcessingMode: "SYNCHRONOUS" | "ASYNCHRONOUS"; } ) { test(title, async () => { const region = process.env.BEDROCK_AWS_REGION ?? defaultRegion; const bedrock = new BedrockChatWeb({ maxTokens: 200, region, model, maxRetries: 0, credentials: { secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!, accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!, // sessionToken: process.env.BEDROCK_AWS_SESSION_TOKEN, }, modelKwargs, ...(trace && guardrailIdentifier && guardrailVersion && { trace, guardrailIdentifier, guardrailVersion, guardrailConfig, }), }); const res = await bedrock.invoke([new HumanMessage(message)]); // console.log(res, res.content); expect(res).toBeDefined(); if (trace && guardrailIdentifier && guardrailVersion) { expect(bedrock.trace).toBe(trace); expect(bedrock.guardrailIdentifier).toBe(guardrailIdentifier); expect(bedrock.guardrailVersion).toBe(guardrailVersion); expect(bedrock.guardrailConfig).toEqual(guardrailConfig); } }); } /** * Tests a BedrockChat model with a streaming response * @param title The name of the test to run * @param defaultRegion The AWS region to default back to if not set via environment * @param model The model string to test * @param message The prompt test to send to the LLM * @param modelKwargs Optional guardrail configuration * @param trace Optional trace setting * @param guardrailIdentifier Optional guardrail identifier * @param guardrailVersion Optional guardrail version * @param guardrailConfig Optional guardrail configuration */ async function testChatStreamingModel( title: string, defaultRegion: string, model: string, message: string, modelKwargs?: Record<string, unknown>, trace?: "ENABLED" | "DISABLED", guardrailIdentifier?: string, guardrailVersion?: string, guardrailConfig?: { tagSuffix: string; streamProcessingMode: "SYNCHRONOUS" | "ASYNCHRONOUS"; } ) { test(title, async () => { const region = process.env.BEDROCK_AWS_REGION ?? defaultRegion; const bedrock = new BedrockChatWeb({ maxTokens: 200, region, model, maxRetries: 0, credentials: { secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!, accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!, // sessionToken: process.env.BEDROCK_AWS_SESSION_TOKEN, }, modelKwargs, ...(trace && guardrailIdentifier && guardrailVersion && { trace, guardrailIdentifier, guardrailVersion, guardrailConfig, }), }); const stream = await bedrock.stream([ new HumanMessage({ content: message, }), ]); const chunks = []; for await (const chunk of stream) { // console.log(chunk); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); }); } /** * Tests a BedrockChat model with a streaming response using a new token callback * @param title The name of the test to run * @param defaultRegion The AWS region to default back to if not set via environment * @param model The model string to test * @param message The prompt test to send to the LLM * @param trace Optional trace setting * @param guardrailIdentifier Optional guardrail identifier * @param guardrailVersion Optional guardrail version * @param guardrailConfig Optional guardrail configuration */ async function testChatHandleLLMNewToken( title: string, defaultRegion: string, model: string, message: string, trace?: "ENABLED" | "DISABLED", guardrailIdentifier?: string, guardrailVersion?: string, guardrailConfig?: { tagSuffix: string; streamProcessingMode: "SYNCHRONOUS" | "ASYNCHRONOUS"; } ) { test(title, async () => { const region = process.env.BEDROCK_AWS_REGION ?? defaultRegion; const tokens: string[] = []; const bedrock = new BedrockChatWeb({ maxTokens: 200, region, model, maxRetries: 0, credentials: { secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!, accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!, // sessionToken: process.env.BEDROCK_AWS_SESSION_TOKEN, }, streaming: true, callbacks: [ { handleLLMNewToken: (token) => { tokens.push(token); }, handleLLMEnd(_output) { // console.log(output); }, }, ], ...(trace && guardrailIdentifier && guardrailVersion && { trace, guardrailIdentifier, guardrailVersion, guardrailConfig, }), }); const stream = await bedrock.invoke([new HumanMessage(message)]); expect(tokens.length).toBeGreaterThan(1); expect(stream.content).toEqual(tokens.join("")); }); } test.skip("Tool calling agent with Anthropic", async () => { const tools = [new TavilySearchResults({ maxResults: 1 })]; const region = process.env.BEDROCK_AWS_REGION; const bedrock = new BedrockChatWeb({ maxTokens: 200, region, model: "anthropic.claude-3-sonnet-20240229-v1:0", maxRetries: 0, credentials: { secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!, accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!, }, }); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful assistant"], ["placeholder", "{chat_history}"], ["human", "{input}"], ["placeholder", "{agent_scratchpad}"], ]); const agent = await createToolCallingAgent({ llm: bedrock, tools, prompt, }); const agentExecutor = new AgentExecutor({ agent, tools, }); const input = "what is the current weather in SF?"; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const result = await agentExecutor.invoke({ input, }); // console.log(result); }); test.skip.each([ "amazon.titan-text-express-v1", // These models should be supported in the future // "amazon.titan-text-lite-v1", // "amazon.titan-text-agile-v1", ])("Test Bedrock base chat model: %s", async (model) => { const region = process.env.BEDROCK_AWS_REGION ?? "us-west-2"; const bedrock = new BedrockChatWeb({ region, model, maxRetries: 0, modelKwargs: {}, credentials: { secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!, accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!, // sessionToken: process.env.BEDROCK_AWS_SESSION_TOKEN, }, }); const res = await bedrock.invoke([new HumanMessage("What is your name?")]); // console.log(res); expect(res.content.length).toBeGreaterThan(1); }); test.skip("withStructuredOutput", async () => { const weatherTool = z .object({ city: z.string().describe("The city to get the weather for"), state: z.string().describe("The state to get the weather for").optional(), }) .describe("Get the weather for a city"); const model = new BedrockChatWeb({ region: process.env.BEDROCK_AWS_REGION, model: "anthropic.claude-3-sonnet-20240229-v1:0", maxRetries: 0, credentials: { secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!, accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!, }, }); const modelWithTools = model.withStructuredOutput(weatherTool, { name: "weather", }); const response = await modelWithTools.invoke( "Whats the weather like in san francisco?" ); expect(response.city.toLowerCase()).toBe("san francisco"); }); test.skip(".bind tools", async () => { const weatherTool = z .object({ city: z.string().describe("The city to get the weather for"), state: z.string().describe("The state to get the weather for").optional(), }) .describe("Get the weather for a city"); const model = new BedrockChatWeb({ region: process.env.BEDROCK_AWS_REGION, model: "anthropic.claude-3-sonnet-20240229-v1:0", maxRetries: 0, credentials: { secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!, accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!, }, }); const modelWithTools = model.bind({ tools: [ { name: "weather_tool", description: weatherTool.description, input_schema: zodToJsonSchema(weatherTool), }, ], }); const response = await modelWithTools.invoke( "Whats the weather like in san francisco?" ); // console.log(response); if (!response.tool_calls?.[0]) { throw new Error("No tool calls found in response"); } const { tool_calls } = response; expect(tool_calls[0].name.toLowerCase()).toBe("weather_tool"); }); test.skip(".bindTools with openai tool format", async () => { const weatherTool = z .object({ city: z.string().describe("The city to get the weather for"), state: z.string().describe("The state to get the weather for").optional(), }) .describe("Get the weather for a city"); const model = new BedrockChatWeb({ region: process.env.BEDROCK_AWS_REGION, model: "anthropic.claude-3-sonnet-20240229-v1:0", maxRetries: 0, credentials: { secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!, accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!, }, }); const modelWithTools = model.bind({ tools: [ { type: "function", function: { name: "weather_tool", description: weatherTool.description, parameters: zodToJsonSchema(weatherTool), }, }, ], }); const response = await modelWithTools.invoke( "Whats the weather like in san francisco?" ); // console.log(response); if (!response.tool_calls?.[0]) { throw new Error("No tool calls found in response"); } const { tool_calls } = response; expect(tool_calls[0].name.toLowerCase()).toBe("weather_tool"); }); test("Streaming tool calls with Anthropic", async () => { const weatherTool = z .object({ city: z.string().describe("The city to get the weather for"), state: z.string().describe("The state to get the weather for").optional(), }) .describe("Get the weather for a city"); const model = new BedrockChatWeb({ region: process.env.BEDROCK_AWS_REGION, model: "anthropic.claude-3-sonnet-20240229-v1:0", maxRetries: 0, credentials: { secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!, accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!, }, }); const modelWithTools = model.bind({ tools: [ { name: "weather_tool", description: weatherTool.description, input_schema: zodToJsonSchema(weatherTool), }, ], }); const stream = await modelWithTools.stream( "Whats the weather like in san francisco?" ); let finalChunk; for await (const chunk of stream) { if (finalChunk !== undefined) { finalChunk = concat(finalChunk, chunk); } else { finalChunk = chunk; } } if (finalChunk?.tool_calls?.[0] === undefined) { throw new Error("No tool calls found in response"); } expect(finalChunk?.tool_calls?.[0].name).toBe("weather_tool"); expect(finalChunk?.tool_calls?.[0].args?.city).toBeDefined(); }); test("withStructuredOutput result should be compatible with OpenAI typing", async () => { const testSchema = z.object({ thinking_process: z .string() .describe( "Think before generating variants and put your reasoning here." ), variants: z .array( z.object({ name: z.string(), value: z.string(), }) ) .describe("Variants of the input"), }); const _prepareClient = () => { if (Math.random() > 0.5) { return new ChatOpenAI(); } return new BedrockChatWeb(); }; _prepareClient().withStructuredOutput(testSchema); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatbedrock.standard.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelUnitTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { BaseChatModelCallOptions } from "@langchain/core/language_models/chat_models"; import { BedrockChat } from "../bedrock/index.js"; class BedrockChatStandardUnitTests extends ChatModelUnitTests< BaseChatModelCallOptions, AIMessageChunk > { constructor() { super({ Cls: BedrockChat, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: { model: "anthropic.claude-3-sonnet-20240229-v1:0", }, }); process.env.BEDROCK_AWS_SECRET_ACCESS_KEY = "test"; process.env.BEDROCK_AWS_ACCESS_KEY_ID = "test"; process.env.BEDROCK_AWS_SESSION_TOKEN = "test"; process.env.AWS_DEFAULT_REGION = "us-east-1"; } testChatModelInitApiKey() { this.skipTestMessage( "testChatModelInitApiKey", "BedrockChat", this.multipleApiKeysRequiredMessage ); } } const testClass = new BedrockChatStandardUnitTests(); test("BedrockChatStandardUnitTests", () => { const testResults = testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatarcjet.test.ts
/* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-non-null-assertion */ import { BaseChatModel } from "@langchain/core/language_models/chat_models"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { BaseMessage, MessageType } from "@langchain/core/messages"; import { ChatGeneration, ChatResult } from "@langchain/core/outputs"; import { ArcjetRedact } from "../arcjet.js"; class MockChatModel extends BaseChatModel { callback?: (input: BaseMessage[]) => ChatGeneration[]; constructor(callback?: (input: BaseMessage[]) => ChatGeneration[]) { super({}); this.callback = callback; } _llmType(): string { return "mock_chat_model"; } async _generate( messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun | undefined ): Promise<ChatResult> { return { generations: this.callback ? this.callback(messages) : [], }; } } class GenericMessage extends BaseMessage { constructor(text: string) { super(text); } _getType(): MessageType { return "human"; } } test("It passes messages through correctly", async () => { const generationA = { message: new GenericMessage("this is the output"), text: "this is the output", }; const callback = (input: BaseMessage[]) => { expect(input[0].content).toEqual("this is the input"); expect(input[1].content).toEqual("this is a second input"); return [generationA]; }; const mockLLM = new MockChatModel(callback); const options = { chatModel: mockLLM, }; const arcjetRedact = new ArcjetRedact(options); const output = await arcjetRedact.invoke([ "this is the input", "this is a second input", ]); expect(output.content).toEqual("this is the output"); }); test("It passes messages through correctly in the streaming interface", async () => { const generationA = { message: new GenericMessage("this is the output"), text: "this is the output", }; const callback = (input: BaseMessage[]) => { expect(input[0].content).toEqual("this is the input"); expect(input[1].content).toEqual("this is a second input"); return [generationA]; }; const mockLLM = new MockChatModel(callback); const options = { chatModel: mockLLM, }; const arcjetRedact = new ArcjetRedact(options); const stream = await arcjetRedact.stream([ "this is the input", "this is a second input", ]); const output = await stream.next(); expect(output.value.content).toEqual("this is the output"); }); test("It redacts built in entities across multiple messages and unredacts them in the response", async () => { const generationA = { message: new GenericMessage( "Your email is <Redacted email #1> and your card number is <Redacted credit card number #2>" ), text: "Your email is <Redacted email #1> and your card number is <Redacted credit card number #2>", }; const callback = (input: BaseMessage[]) => { expect(input[0].content).toEqual("my email address is <Redacted email #1>"); expect(input[1].content).toEqual( "my card number is <Redacted credit card number #2>" ); return [generationA]; }; const mockLLM = new MockChatModel(callback); const options = { chatModel: mockLLM, }; const arcjetRedact = new ArcjetRedact(options); const output = await arcjetRedact.stream([ "my email address is test@example.com", "my card number is 4242424242424242", ]); const first = await output.next(); expect(first.value.content).toEqual( "Your email is test@example.com and your card number is 4242424242424242" ); }); test("it redacts and unredacts correctly", async () => { const generationA = { message: new GenericMessage( "Your email is <Redacted email #1> and your card number is <Redacted credit card number #2>" ), text: "Your email is <Redacted email #1> and your card number is <Redacted credit card number #2>", }; const callback = (input: BaseMessage[]) => { expect(input[0].content).toEqual("my email address is <Redacted email #1>"); expect(input[1].content).toEqual( "my card number is <Redacted credit card number #2>" ); return [generationA]; }; const mockLLM = new MockChatModel(callback); const options = { chatModel: mockLLM, }; const arcjetRedact = new ArcjetRedact(options); const output = await arcjetRedact.stream([ "my email address is test@example.com", "my card number is 4242424242424242", ]); const first = await output.next(); expect(first.value.content).toEqual( "Your email is test@example.com and your card number is 4242424242424242" ); }); test("it redacts and unredacts correctly", async () => { const generationA = { message: new GenericMessage( "Your email is <Redacted email #1> and your card number is <Redacted credit card number #2>" ), text: "Your email is <Redacted email #1> and your card number is <Redacted credit card number #2>", }; const callback = (input: BaseMessage[]) => { expect(input[0].content).toEqual("my email address is <Redacted email #1>"); expect(input[1].content).toEqual( "my card number is <Redacted credit card number #2>" ); return [generationA]; }; const mockLLM = new MockChatModel(callback); const options = { chatModel: mockLLM, }; const arcjetRedact = new ArcjetRedact(options); const output = await arcjetRedact.invoke([ "my email address is test@example.com", "my card number is 4242424242424242", ]); expect(output.content).toEqual( "Your email is test@example.com and your card number is 4242424242424242" ); }); test("it handles custom detect functions correctly", async () => { const generationA = { message: new GenericMessage("custom <Redacted custom-entity #1>"), text: "custom <Redacted custom-entity #0>", }; const callback = (input: BaseMessage[]) => { expect(input[0].content).toEqual("custom <Redacted custom-entity #1>"); return [generationA]; }; const mockLLM = new MockChatModel(callback); const customDetector = (tokens: string[]) => { return tokens.map((t) => t === "my-custom-string-to-be-detected" ? "custom-entity" : undefined ); }; const options = { chatModel: mockLLM, entities: ["custom-entity" as const], detect: customDetector, }; const arcjetRedact = new ArcjetRedact(options); const output = await arcjetRedact.invoke([ "custom my-custom-string-to-be-detected", ]); expect(output.content).toEqual("custom my-custom-string-to-be-detected"); }); test("it handles custom replace functions correctly", async () => { const generationA = { message: new GenericMessage( "custom is <Redacted custom-entity #1> email is redacted@example.com" ), text: "custom is <Redacted custom-entity #1> email is redacted@example.com", }; const callback = (input: BaseMessage[]) => { expect(input[0].content).toEqual( "custom <Redacted custom-entity #1> email redacted@example.com" ); return [generationA]; }; const mockLLM = new MockChatModel(callback); const customDetector = (tokens: string[]) => { return tokens.map((t) => t === "my-custom-string-to-be-detected" ? "custom-entity" : undefined ); }; const customReplacer = (detected: string) => { return detected === "email" ? "redacted@example.com" : undefined; }; const options = { chatModel: mockLLM, entities: ["custom-entity" as const, "email" as const], detect: customDetector, replace: customReplacer, }; const arcjetRedact = new ArcjetRedact(options); const output = await arcjetRedact.invoke([ "custom my-custom-string-to-be-detected email test@example.com", ]); expect(output.content).toEqual( "custom is my-custom-string-to-be-detected email is test@example.com" ); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chattogetherai.int.test.ts
import { describe, test } from "@jest/globals"; import { ChatMessage, HumanMessage } from "@langchain/core/messages"; import { PromptTemplate, ChatPromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts"; import { formatToOpenAITool } from "@langchain/openai"; import { StructuredTool } from "@langchain/core/tools"; import { z } from "zod"; import { ChatTogetherAI } from "../togetherai.js"; describe("ChatTogetherAI", () => { test("invoke", async () => { const chat = new ChatTogetherAI(); const message = new HumanMessage("Hello!"); const res = await chat.invoke([message]); // console.log({ res }); expect(res.content.length).toBeGreaterThan(10); }); test("generate", async () => { const chat = new ChatTogetherAI(); const message = new HumanMessage("Hello!"); const res = await chat.generate([[message]]); // console.log(JSON.stringify(res, null, 2)); expect(res.generations[0][0].text.length).toBeGreaterThan(10); }); test("custom messages", async () => { const chat = new ChatTogetherAI(); const res = await chat.invoke([new ChatMessage("Hello!", "user")]); // console.log({ res }); expect(res.content.length).toBeGreaterThan(10); }); test("prompt templates", async () => { const chat = new ChatTogetherAI(); // PaLM doesn't support translation yet const systemPrompt = PromptTemplate.fromTemplate( "You are a helpful assistant who must always respond like a {job}." ); const chatPrompt = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(systemPrompt), HumanMessagePromptTemplate.fromTemplate("{text}"), ]); const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ job: "pirate", text: "What would be a good company name a company that makes colorful socks?", }), ]); // console.log(responseA.generations); expect(responseA.generations[0][0].text.length).toBeGreaterThan(10); }); test("longer chain of messages", async () => { const chat = new ChatTogetherAI(); const chatPrompt = ChatPromptTemplate.fromMessages([ HumanMessagePromptTemplate.fromTemplate(`Hi, my name is Joe!`), AIMessagePromptTemplate.fromTemplate(`Nice to meet you, Joe!`), HumanMessagePromptTemplate.fromTemplate("{text}"), ]); const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ text: "What did I just say my name was?", }), ]); // console.log(responseA.generations); expect(responseA.generations[0][0].text.length).toBeGreaterThan(10); }); test("JSON mode", async () => { const responseSchema = { type: "object", properties: { orderedArray: { type: "array", items: { type: "number", }, }, }, required: ["orderedArray"], }; const chat = new ChatTogetherAI().bind({ response_format: { type: "json_object", schema: responseSchema, }, }); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful assistant who responds in JSON."], ["human", "Please list this output in order of DESC [1, 4, 2, 8]."], ]); const res = await prompt.pipe(chat).invoke({}); // console.log({ res }); expect(typeof res.content).toEqual("string"); expect(JSON.parse(res.content as string)).toMatchObject({ orderedArray: expect.any(Array), }); }); test("Tool calls", async () => { class CalculatorTool extends StructuredTool { name = "Calculator"; schema = z.object({ a: z.number(), b: z.number(), }); description = "A simple calculator tool."; constructor() { super(); } async _call(input: { a: number; b: number }) { return JSON.stringify({ total: input.a + input.b }); } } const tool = formatToOpenAITool(new CalculatorTool()); const chat = new ChatTogetherAI().bind({ tools: [tool], tool_choice: tool, }); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful assistant."], ["human", "What is 1273926 times 27251?"], ]); const res = await prompt.pipe(chat).invoke({}); // console.log({ res }); expect(res.additional_kwargs.tool_calls?.length).toBeGreaterThan(0); expect( JSON.parse(res.additional_kwargs.tool_calls?.[0].function.arguments ?? "") ).toMatchObject({ a: expect.any(Number), b: expect.any(Number) }); }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatmoonshot.int.test.ts
import { test, expect } from "@jest/globals"; import { SystemMessage, HumanMessage } from "@langchain/core/messages"; import { ChatMoonshot } from "../moonshot.js"; interface TestConfig { modelName: string | undefined; config: { description?: string; temperature?: number; topP?: number; streaming?: boolean; callbacks?: Array<{ nrNewTokens?: number; streamedCompletion?: string; handleLLMNewToken?: (token: string) => Promise<void>; }>; }; system?: string; message?: string; shouldThrow?: boolean; } const runTest = async ({ modelName, config, system = "", message = "Hello!", shouldThrow = false, }: TestConfig) => { const description = `Test ChatMoonshot ${modelName || "default model"} ${ config.description || "" }`.trim(); let nrNewTokens = 0; let streamedCompletion = ""; const passedConfig = { ...config }; if (passedConfig.streaming) { passedConfig.callbacks = [ { async handleLLMNewToken(token: string) { nrNewTokens += 1; streamedCompletion += token; }, }, ]; } test.skip(description, async () => { const chat = new ChatMoonshot({ modelName, ...config, }); const messages = []; if (system) { messages.push(new SystemMessage(system)); } messages.push(new HumanMessage(message)); if (shouldThrow) { await expect(chat.invoke(messages)).rejects.toThrow(); return; } const res = await chat.invoke(messages, passedConfig); // console.log({ res }); if (passedConfig.streaming) { expect(nrNewTokens > 0).toBe(true); expect(res.content).toBe(streamedCompletion); } }); }; const testConfigs: TestConfig[] = [ { modelName: undefined, config: {} }, { modelName: "moonshot-v1-8k", config: {} }, { modelName: "moonshot-v1-8k", config: { description: "with temperature", temperature: 1 }, }, { modelName: "moonshot-v1-8k", config: { description: "with topP", topP: 1 }, }, { modelName: "moonshot-v1-8k", config: { description: "with repetitionPenalty" }, }, { modelName: "moonshot-v1-8k", config: { description: "in streaming mode", streaming: true, }, message: "您好,请讲个长笑话", }, { modelName: "moonshot-v1-8k", config: { description: "illegal input should throw an error", temperature: 0, }, shouldThrow: true, }, { modelName: "moonshot-v1-8k", config: { description: "illegal input in streaming mode should throw an error", streaming: true, temperature: 0, }, shouldThrow: true, }, { modelName: "moonshot-v1-128k", config: {} }, { modelName: "moonshot-v1-128k", config: { description: "in streaming mode", streaming: true, }, message: "您好,请讲个长笑话", }, { modelName: "moonshot-v1-128k", config: { description: "with system message", }, system: "你是一个说文言文的人", }, { modelName: "moonshot-v1-128k", config: {}, }, ]; testConfigs.forEach((testConfig) => { void runTest(testConfig); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatfireworks.standard.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelUnitTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { ChatFireworks, ChatFireworksCallOptions } from "../fireworks.js"; class ChatFireworksStandardUnitTests extends ChatModelUnitTests< ChatFireworksCallOptions, AIMessageChunk > { constructor() { super({ Cls: ChatFireworks, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: {}, }); process.env.FIREWORKS_API_KEY = "test"; } testChatModelInitApiKey() { // Unset the API key env var here so this test can properly check // the API key class arg. process.env.FIREWORKS_API_KEY = ""; super.testChatModelInitApiKey(); // Re-set the API key env var here so other tests can run properly. process.env.FIREWORKS_API_KEY = "test"; } } const testClass = new ChatFireworksStandardUnitTests(); test("ChatFireworksStandardUnitTests", () => { const testResults = testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/ibm.standard.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelIntegrationTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { ChatWatsonx, ChatWatsonxInput, WatsonxCallOptionsChat, WatsonxCallParams, } from "../ibm.js"; import { WatsonxAuth } from "../../types/ibm.js"; class ChatWatsonxStandardIntegrationTests extends ChatModelIntegrationTests< WatsonxCallOptionsChat, AIMessageChunk, ChatWatsonxInput & WatsonxAuth & Partial<Omit<WatsonxCallParams, "tool_choice">> > { constructor() { if (!process.env.WATSONX_AI_APIKEY) { throw new Error("Cannot run tests. Api key not provided"); } super({ Cls: ChatWatsonx, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: { model: "meta-llama/llama-3-1-70b-instruct", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", temperature: 0, }, }); } } const testClass = new ChatWatsonxStandardIntegrationTests(); test("ChatWatsonxStandardIntegrationTests", async () => { const testResults = await testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatdeepinfra.int.test.ts
import { test } from "@jest/globals"; import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { HumanMessage } from "@langchain/core/messages"; import { ChatDeepInfra } from "../deepinfra.js"; describe("ChatDeepInfra", () => { test("call", async () => { const deepInfraChat = new ChatDeepInfra({ maxTokens: 20 }); const message = new HumanMessage("1 + 1 = "); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await deepInfraChat.invoke([message]); // console.log({ res }); }); test("generate", async () => { const deepInfraChat = new ChatDeepInfra({ maxTokens: 20 }); const message = new HumanMessage("1 + 1 = "); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await deepInfraChat.generate([[message]]); // console.log(JSON.stringify(res, null, 2)); }); test("Tool calling", async () => { const zodSchema = z .object({ location: z .string() .describe("The name of city to get the weather for."), }) .describe( "Get the weather of a specific location and return the temperature in Celsius." ); const deepInfraChat = new ChatDeepInfra().bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: zodToJsonSchema(zodSchema), }, }, ], }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await deepInfraChat.invoke( "What is the current weather in SF?" ); // console.log({ res }); }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chattencenthunyuan.int.test.ts
import { test, expect } from "@jest/globals"; import { BaseMessage, SystemMessage, HumanMessage, } from "@langchain/core/messages"; import { ChatTencentHunyuan } from "../tencent_hunyuan/index.js"; interface TestConfig { model: string | undefined; config: { description?: string; temperature?: number; topP?: number; streaming?: boolean; callbacks?: Array<{ nrNewTokens?: number; streamedCompletion?: string; handleLLMNewToken?: (token: string) => Promise<void>; }>; }; messages?: BaseMessage[]; shouldThrow?: boolean; } const runTest = async ({ model, config, messages = [new HumanMessage("Hello!")], shouldThrow = false, }: TestConfig) => { const description = `Test ChatTencentHunyuan ${model || "default model"} ${ config.description || "" }`.trim(); let nrNewTokens = 0; let streamedCompletion = ""; if (config.streaming) { // eslint-disable-next-line no-param-reassign config.callbacks = [ { async handleLLMNewToken(token: string) { nrNewTokens += 1; streamedCompletion += token; }, }, ]; } test.skip(description, async () => { const chat = new ChatTencentHunyuan({ model, ...config, }); if (shouldThrow) { await expect(chat.invoke(messages)).rejects.toThrow(); return; } const res = await chat.invoke(messages); // console.log({ res }); if (config.streaming) { expect(nrNewTokens > 0).toBe(true); expect(res.text).toBe(streamedCompletion); } }); }; const testConfigs: TestConfig[] = [ { model: undefined, config: {} }, { model: "hunyuan-lite", config: {} }, { model: "hunyuan-lite", config: { description: "with temperature", temperature: 1 }, }, { model: "hunyuan-lite", config: { description: "with topP", topP: 1 } }, { model: "hunyuan-lite", config: { description: "with penaltyScore" }, }, { model: "hunyuan-lite", config: { description: "in streaming mode", streaming: true, }, messages: [new HumanMessage("您好,请讲个长笑话")], }, { model: "hunyuan-lite", config: { description: "illegal input should throw an error", temperature: 0, }, shouldThrow: true, }, { model: "hunyuan-lite", config: { description: "illegal input in streaming mode should throw an error", streaming: true, temperature: 0, }, shouldThrow: true, }, { model: "hunyuan-pro", config: {} }, { model: "hunyuan-pro", config: { description: "in streaming mode", streaming: true, }, messages: [new HumanMessage("您好,请讲个长笑话")], }, { model: "hunyuan-pro", config: { description: "with system message", }, messages: [ new SystemMessage("你是一个说文言文的人"), new HumanMessage("Hello!"), ], }, { model: "hunyuan-standard", config: {}, }, { model: "hunyuan-lite", config: {}, }, { model: "hunyuan-standard-256K", config: {}, }, ]; testConfigs.forEach((testConfig) => { // eslint-disable-next-line no-void void runTest(testConfig); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatiflytekxinghuo.int.test.ts
import { HumanMessage } from "@langchain/core/messages"; import { ChatIflytekXinghuo } from "../iflytek_xinghuo/index.js"; test.skip("Iflytek Xinghuo Call", async () => { const model = new ChatIflytekXinghuo({ iflytekAppid: "", iflytekApiKey: "", iflytekApiSecret: "", }); const messages = [new HumanMessage("Nice to meet you!")]; await model.invoke(messages); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/chatllama_cpp.int.test.ts
/* eslint-disable @typescript-eslint/no-non-null-assertion */ import { test } from "@jest/globals"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { SystemMessage, AIMessage, HumanMessage, } from "@langchain/core/messages"; import { ChatLlamaCpp } from "../llama_cpp.js"; const llamaPath = getEnvironmentVariable("LLAMA_PATH")!; test.skip("Test predict", async () => { const llamaCpp = await ChatLlamaCpp.initialize({ modelPath: llamaPath }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const response = await llamaCpp.invoke("Where do Llamas come from?"); // console.log({ response }); }); test.skip("Test call", async () => { const llamaCpp = await ChatLlamaCpp.initialize({ modelPath: llamaPath }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const response = await llamaCpp.invoke([ new HumanMessage({ content: "My name is Nigel." }), ]); // console.log({ response }); }); test.skip("Test multiple messages", async () => { const llamaCpp = await ChatLlamaCpp.initialize({ modelPath: llamaPath }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const response = await llamaCpp.invoke([ new HumanMessage("My name is Nigel."), new AIMessage( "Hello Nigel! It is great to meet you, how can I help you today?" ), new HumanMessage("What did I say my name was?"), ]); // console.log({ response }); }); test.skip("Test system message", async () => { const llamaCpp = await ChatLlamaCpp.initialize({ modelPath: llamaPath }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const response = await llamaCpp.invoke([ new SystemMessage( "You are a pirate, responses must be very verbose and in pirate dialect, add 'Arr, m'hearty!' to each sentence." ), new HumanMessage("Tell me where Llamas come from?"), ]); // console.log({ response }); }); test.skip("test streaming call", async () => { const llamaCpp = await ChatLlamaCpp.initialize({ modelPath: llamaPath, temperature: 0.7, }); const stream = await llamaCpp.stream( "Tell me a short story about a happy Llama." ); const chunks = []; for await (const chunk of stream) { chunks.push(chunk.content); // console.log(chunk.content); } expect(chunks.length).toBeGreaterThan(1); }); test.skip("test multi-mesage streaming call", async () => { const llamaCpp = await ChatLlamaCpp.initialize({ modelPath: llamaPath, temperature: 0.7, }); const stream = await llamaCpp.stream([ new SystemMessage( "You are a pirate, responses must be very verbose and in pirate dialect." ), new HumanMessage("Tell me about Llamas?"), ]); const chunks = []; for await (const chunk of stream) { chunks.push(chunk.content); // console.log(chunk.content); } expect(chunks.length).toBeGreaterThan(1); }); test.skip("test multi-mesage streaming call and abort after 5s", async () => { const llamaCpp = await ChatLlamaCpp.initialize({ modelPath: llamaPath, temperature: 0.7, }); const controller = new AbortController(); setTimeout(() => { controller.abort(); }, 5000); const chunks: string[] = []; try { await llamaCpp.invoke( [ new SystemMessage( "You are a pirate, responses must be very verbose and in pirate dialect." ), new HumanMessage("Tell me about Llamas?"), ], { signal: controller.signal, callbacks: [ { handleLLMNewToken(token) { // console.log(token); chunks.push(token); }, }, ], } ); } catch (err) { if ((err as Error).message === "AbortError") { expect(chunks.length).toBeGreaterThan(0); } else { throw err; } } });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tests/ibm.int.test.ts
/* eslint-disable no-process-env */ import { AIMessage, AIMessageChunk, HumanMessage, SystemMessage, } from "@langchain/core/messages"; import { z } from "zod"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { CallbackManager } from "@langchain/core/callbacks/manager"; import { LLMResult } from "@langchain/core/outputs"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { tool } from "@langchain/core/tools"; import { NewTokenIndices } from "@langchain/core/callbacks/base"; import { ChatWatsonx } from "../ibm.js"; describe("Tests for chat", () => { describe("Test ChatWatsonx invoke and generate", () => { test("Basic invoke", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const res = await service.invoke("Print hello world"); expect(res).toBeInstanceOf(AIMessage); }); test("Basic generate", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const message = new HumanMessage("Hello"); const res = await service.generate([[message], [message]]); expect(res.generations.length).toBe(2); }); test("Invoke with system message", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const messages = [ new SystemMessage("Translate the following from English into Italian"), new HumanMessage("hi!"), ]; const res = await service.invoke(messages); expect(res).toBeInstanceOf(AIMessage); }); test("Invoke with output parser", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const parser = new StringOutputParser(); const messages = [ new SystemMessage("Translate the following from English into Italian"), new HumanMessage("hi!"), ]; const res = await service.invoke(messages); const parsed = await parser.invoke(res); expect(typeof parsed).toBe("string"); }); test("Invoke with prompt", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const systemTemplate = "Translate the following into {language}:"; const promptTemplate = ChatPromptTemplate.fromMessages([ ["system", systemTemplate], ["user", "{text}"], ]); const llmChain = promptTemplate.pipe(service); const res = await llmChain.invoke({ language: "italian", text: "hi" }); expect(res).toBeInstanceOf(AIMessage); }); test("Invoke with chat conversation", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const res = await service.invoke([ { role: "user", content: "Hi! I'm Bob" }, { role: "assistant", content: "Hello Bob! How can I assist you today?", }, { role: "user", content: "What's my name?" }, ]); expect(res).toBeInstanceOf(AIMessage); }); test("Token usage", async () => { let tokenUsage = { completion_tokens: 0, prompt_tokens: 0, totalTokens: 0, }; const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { tokenUsage = output.llmOutput?.tokenUsage; }, }), }); const message = new HumanMessage("Hello"); await service.invoke([message]); expect(tokenUsage.prompt_tokens).toBeGreaterThan(0); }); test("Timeout", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); await expect(() => service.invoke("Print hello world", { timeout: 10, }) ).rejects.toThrow(); }, 5000); test("Controller options", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const controller = new AbortController(); await expect(() => { const res = service.invoke("Print hello world", { signal: controller.signal, }); controller.abort(); return res; }).rejects.toThrow(); }); }); describe("Test ChatWatsonx invoke and generate with stream mode", () => { test("Basic invoke", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const res = await service.invoke("Print hello world"); expect(res).toBeInstanceOf(AIMessage); }); test("Basic generate", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const message = new HumanMessage("Hello"); const res = await service.generate([[message], [message]]); expect(res.generations.length).toBe(2); }); test("Generate with n>1", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", n: 3, }); const message = new HumanMessage("Print hello world"); const res = await service.generate([[message]]); for (const generation of res.generations) { expect(generation.length).toBe(3); for (const gen of generation) { expect(typeof gen.text).toBe("string"); } } }); test("Generate with n>1 token count", async () => { process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; let tokenUsage = { input_tokens: 0, output_tokens: 0, total_tokens: 0, }; const generationsStreamed = [ ["", ""], ["", ""], ]; let tokenUsed = 0; const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", n: 2, maxTokens: 5, streaming: true, callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { const usage = output.llmOutput?.tokenUsage; tokenUsage = { input_tokens: usage.input_tokens + tokenUsage.input_tokens, output_tokens: usage.output_tokens + tokenUsage.output_tokens, total_tokens: usage.total_tokens + tokenUsage.total_tokens, }; }, async handleLLMNewToken(token: string, idx: NewTokenIndices) { const { prompt, completion } = idx; generationsStreamed[prompt][completion] += token; tokenUsed += 1; }, }), }); const message = new HumanMessage("Print hello world"); const res = await service.generate([[message], [message]]); for (const generation of res.generations) { expect(generation.length).toBe(2); for (const gen of generation) { expect(typeof gen.text).toBe("string"); } } expect(tokenUsed).toBe(tokenUsage.output_tokens); expect(res.generations.map((g) => g.map((gg) => gg.text))).toEqual( generationsStreamed ); }); test("Invoke with system message", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const messages = [ new SystemMessage("Translate the following from English into Italian"), new HumanMessage("hi!"), ]; const res = await service.invoke(messages); expect(res).toBeInstanceOf(AIMessage); }); test("Invoke with output parser", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const parser = new StringOutputParser(); const messages = [ new SystemMessage("Translate the following from English into Italian"), new HumanMessage("hi!"), ]; const res = await service.invoke(messages); const parsed = await parser.invoke(res); expect(typeof parsed).toBe("string"); }); test("Invoke with prompt", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const systemTemplate = "Translate the following into {language}:"; const promptTemplate = ChatPromptTemplate.fromMessages([ ["system", systemTemplate], ["user", "{text}"], ]); const llmChain = promptTemplate.pipe(service); const res = await llmChain.invoke({ language: "italian", text: "hi" }); expect(res).toBeInstanceOf(AIMessage); }); test("Invoke with chat conversation", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const res = await service.invoke([ { role: "user", content: "Hi! I'm Bob" }, { role: "assistant", content: "Hello Bob! How can I assist you today?", }, { role: "user", content: "What's my name?" }, ]); expect(res).toBeInstanceOf(AIMessage); }); test("Token usage", async () => { let tokenUsage = { completion_tokens: 0, prompt_tokens: 0, totalTokens: 0, }; const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { tokenUsage = output.llmOutput?.tokenUsage; }, }), }); const message = new HumanMessage("Hello"); await service.invoke([message]); expect(tokenUsage.prompt_tokens).toBeGreaterThan(0); }); test("Timeout", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); await expect(() => service.invoke("Print hello world", { timeout: 10, }) ).rejects.toThrow(); }, 5000); test("Controller options", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const controller = new AbortController(); await expect(() => { const res = service.invoke("Print hello world", { signal: controller.signal, }); controller.abort(); return res; }).rejects.toThrow(); }); }); describe("Test ChatWatsonx stream", () => { test("Basic stream", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful assistant"], ["human", "{input}"], ]); const res = await prompt.pipe(service).stream({ input: "Print hello world.", }); const chunks = []; for await (const chunk of res) { chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); expect(chunks.join("").length).toBeGreaterThan(1); }); test("Timeout", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); await expect(() => service.stream("Print hello world", { timeout: 10, }) ).rejects.toThrow(); }, 5000); test("Controller options", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const controller = new AbortController(); await expect(async () => { const res = await service.stream("Print hello world", { signal: controller.signal, }); let hasEntered = false; for await (const chunk of res) { hasEntered = true; expect(chunk).toBeDefined(); controller.abort(); } expect(hasEntered).toBe(true); }).rejects.toThrow(); }); test("Token count and response equality", async () => { let generation = ""; const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { generation = output.generations[0][0].text; }, }), }); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful assistant"], ["human", "{input}"], ]); const res = await prompt.pipe(service).stream({ input: "Print hello world", }); let tokenCount = 0; const chunks = []; for await (const chunk of res) { tokenCount += 1; chunks.push(chunk.content); } expect(tokenCount).toBeGreaterThan(1); expect(chunks.join("")).toBe(generation); }); test("Token count usage_metadata", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); let res: AIMessageChunk | null = null; let outputCount = 0; const stream = await service.stream("Why is the sky blue? Be concise."); for await (const chunk of stream) { res = chunk; outputCount += 1; } expect(res?.usage_metadata).toBeDefined(); if (!res?.usage_metadata) { return; } expect(res.usage_metadata.input_tokens).toBeGreaterThan(1); expect(res.usage_metadata.output_tokens).toBe(outputCount); expect(res.usage_metadata.total_tokens).toBe( res.usage_metadata.input_tokens + res.usage_metadata.output_tokens ); }); }); describe("Test tool usage", () => { test("Passing tool to chat model", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const calculatorSchema = z.object({ operation: z .enum(["add", "subtract", "multiply", "divide"]) .describe("The type of operation to execute."), number1: z.number().describe("The first number to operate on."), number2: z.number().describe("The second number to operate on."), }); const calculatorTool = tool( async ({ operation, number1, number2, }: { operation: string; number1: number; number2: number; }) => { // Functions must return strings if (operation === "add") { return `${number1 + number2}`; } else if (operation === "subtract") { return `${number1 - number2}`; } else if (operation === "multiply") { return `${number1 * number2}`; } else if (operation === "divide") { return `${number1 / number2}`; } else { throw new Error("Invalid operation."); } }, { name: "calculator", description: "Can perform mathematical operations.", schema: calculatorSchema, } ); const llmWithTools = service.bindTools([calculatorTool]); const res = await llmWithTools.invoke("What is 3 * 12"); expect(res).toBeInstanceOf(AIMessage); expect(res.tool_calls?.[0].name).toBe("calculator"); expect(typeof res.tool_calls?.[0].args?.operation).toBe("string"); expect(typeof res.tool_calls?.[0].args?.number1).toBe("number"); expect(typeof res.tool_calls?.[0].args?.number2).toBe("number"); expect(res.response_metadata.finish_reason).toBe("tool_calls"); }); test("Passing tool to chat model extended", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const calculatorSchema = z.object({ operation: z .enum(["add", "subtract", "multiply", "divide"]) .describe("The type of operation to execute."), number1: z.number().describe("The first number to operate on."), number2: z.number().describe("The second number to operate on."), }); const calculatorTool = tool( async ({ operation, number1, number2, }: { operation: string; number1: number; number2: number; }) => { // Functions must return strings if (operation === "add") { return `${number1 + number2}`; } else if (operation === "subtract") { return `${number1 - number2}`; } else if (operation === "multiply") { return `${number1 * number2}`; } else if (operation === "divide") { return `${number1 / number2}`; } else { throw new Error("Invalid operation."); } }, { name: "calculator", description: "Can perform mathematical operations.", schema: calculatorSchema, } ); const llmWithTools = service.bindTools([calculatorTool]); const res = await llmWithTools.invoke( "What is 3 * 12? Also, what is 11 + 49?" ); expect(res).toBeInstanceOf(AIMessage); expect(res.tool_calls).toBeDefined(); if (!res.tool_calls) return; expect(res.tool_calls.length).toBe(2); for (const tool_call of res.tool_calls) { expect(tool_call.name).toBe("calculator"); expect(typeof tool_call.args?.operation).toBe("string"); expect(typeof tool_call.args?.number1).toBe("number"); expect(typeof tool_call.args?.number2).toBe("number"); } }); test("Binding model-specific formats", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const modelWithTools = service.bind({ tools: [ { type: "function", function: { name: "calculator", description: "Can perform mathematical operations.", parameters: { type: "object", properties: { operation: { type: "string", description: "The type of operation to execute.", enum: ["add", "subtract", "multiply", "divide"], }, number1: { type: "number", description: "First integer" }, number2: { type: "number", description: "Second integer" }, }, required: ["number1", "number2"], }, }, }, ], }); const res = await modelWithTools.invoke("What is 32 * 122"); expect(res).toBeInstanceOf(AIMessage); expect(res.tool_calls?.[0].name).toBe("calculator"); expect(typeof res.tool_calls?.[0].args?.operation).toBe("string"); expect(typeof res.tool_calls?.[0].args?.number1).toBe("number"); expect(typeof res.tool_calls?.[0].args?.number2).toBe("number"); expect(res.response_metadata.finish_reason).toBe("tool_calls"); }); test("Passing tool to chat model", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const addTool = tool( async (input) => { return input.a + input.b; }, { name: "add", description: "Adds a and b.", schema: z.object({ a: z.number(), b: z.number(), }), } ); const multiplyTool = tool( async (input) => { return input.a * input.b; }, { name: "multiply", description: "Multiplies a and b.", schema: z.object({ a: z.number(), b: z.number(), }), } ); const tools = [addTool, multiplyTool]; const modelWithTools = service.bindTools(tools); const res = await modelWithTools.invoke( "What is 3 * 12? Also, what is 11 + 49?" ); expect(res).toBeInstanceOf(AIMessage); expect(res.tool_calls).toBeDefined(); if (!res.tool_calls) return; expect(res.tool_calls.length).toBe(2); expect(res.tool_calls[0].name).not.toBe(res.tool_calls[1].name); expect(res.tool_calls[0].args.a).not.toBe(res.tool_calls[1].args.a); expect(res.tool_calls[0].args.b).not.toBe(res.tool_calls[1].args.b); }); }); describe("Test withStructuredOutput usage", () => { test("Schema with zod", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); const joke = z.object({ setup: z.string().describe("The setup of the joke"), punchline: z.string().describe("The punchline to the joke"), rating: z .number() .optional() .describe("How funny the joke is, from 1 to 10"), }); const structuredLlm = service.withStructuredOutput(joke); const res = await structuredLlm.invoke("Tell me a joke about cats"); expect("setup" in res).toBe(true); expect("punchline" in res).toBe(true); }); test("Schema with zod and stream", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", temperature: 0.2, }); const joke = z.object({ setup: z.string().describe("The setup of the joke"), punchline: z.string().describe("The punchline to the joke"), rating: z .number() .optional() .describe("How funny the joke is, from 1 to 10"), }); const structuredLlm = service.withStructuredOutput(joke); const res = await structuredLlm.stream("Tell me a joke about cats"); let object = {}; for await (const chunk of res) { expect(typeof chunk).toBe("object"); object = chunk; } expect("setup" in object).toBe(true); expect("punchline" in object).toBe(true); }); test("Schema with object", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", temperature: 0.2, }); const structuredLlm = service.withStructuredOutput({ name: "joke", description: "Joke to tell user.", parameters: { title: "Joke", type: "object", properties: { setup: { type: "string", description: "The setup for the joke" }, punchline: { type: "string", description: "The joke's punchline" }, }, required: ["setup", "punchline"], }, }); const res = await structuredLlm.invoke("Tell me a joke about cats"); expect(res).toBeDefined(); expect(typeof res.setup).toBe("string"); expect(typeof res.punchline).toBe("string"); }); test("Schema with rawOutput", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", temperature: 0.2, }); const structuredLlm = service.withStructuredOutput( { name: "joke", description: "Joke to tell user.", parameters: { title: "Joke", type: "object", properties: { setup: { type: "string", description: "The setup for the joke" }, punchline: { type: "string", description: "The joke's punchline", }, }, required: ["setup", "punchline"], }, }, { includeRaw: true } ); const res = await structuredLlm.invoke("Tell me a joke about cats"); expect(res.raw).toBeInstanceOf(AIMessage); expect(typeof res.parsed.setup).toBe("string"); expect(typeof res.parsed.setup).toBe("string"); }); test("Schema with zod and JSON mode", async () => { const service = new ChatWatsonx({ model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", temperature: 0, }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = service.withStructuredOutput( calculatorSchema, { name: "calculator", method: "jsonMode", } ); const prompt = ChatPromptTemplate.fromMessages([ { role: "system", content: `Reply structure should be type of JSON as followed: 'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide', 'number1': the first number to operate on, 'number2': the second number to operate on. `, }, { role: "human", content: "What is 21 * 12?" }, ]); const modelWithStructuredOutoputJson = prompt.pipe( modelWithStructuredOutput ); const result = await modelWithStructuredOutoputJson.invoke(""); expect(typeof result.operation).toBe("string"); expect(typeof result.number1).toBe("number"); expect(typeof result.number2).toBe("number"); }); }); });
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tencent_hunyuan/web.ts
import { type BaseChatModelParams } from "@langchain/core/language_models/chat_models"; import { sign } from "../../utils/tencent_hunyuan/web.js"; import { ChatTencentHunyuan as BaseChatTencentHunyuan, TencentHunyuanChatInput, } from "./base.js"; /** * Wrapper around Tencent Hunyuan large language models that use the Chat endpoint. * * To use you should have the `TENCENT_SECRET_ID` and `TENCENT_SECRET_KEY` * environment variable set. * * @augments BaseLLM * @augments TencentHunyuanInput * @example * ```typescript * const messages = [new HumanMessage("Hello")]; * * const hunyuanLite = new ChatTencentHunyuan({ * model: "hunyuan-lite", * tencentSecretId: "YOUR-SECRET-ID", * tencentSecretKey: "YOUR-SECRET-KEY", * }); * * let res = await hunyuanLite.call(messages); * * const hunyuanPro = new ChatTencentHunyuan({ * model: "hunyuan-pro", * temperature: 1, * tencentSecretId: "YOUR-SECRET-ID", * tencentSecretKey: "YOUR-SECRET-KEY", * }); * * res = await hunyuanPro.call(messages); * ``` */ export class ChatTencentHunyuan extends BaseChatTencentHunyuan { constructor(fields?: Partial<TencentHunyuanChatInput> & BaseChatModelParams) { super({ ...fields, sign } ?? { sign }); } } export { TencentHunyuanChatInput } from "./base.js";
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tencent_hunyuan/index.ts
import { type BaseChatModelParams } from "@langchain/core/language_models/chat_models"; import { sign } from "../../utils/tencent_hunyuan/index.js"; import { ChatTencentHunyuan as BaseChatTencentHunyuan, TencentHunyuanChatInput, } from "./base.js"; /** * Wrapper around Tencent Hunyuan large language models that use the Chat endpoint. * * To use you should have the `TENCENT_SECRET_ID` and `TENCENT_SECRET_KEY` * environment variable set. * * @augments BaseLLM * @augments TencentHunyuanInput * @example * ```typescript * const messages = [new HumanMessage("Hello")]; * * const hunyuanLite = new ChatTencentHunyuan({ * model: "hunyuan-lite", * tencentSecretId: "YOUR-SECRET-ID", * tencentSecretKey: "YOUR-SECRET-KEY", * }); * * let res = await hunyuanLite.call(messages); * * const hunyuanPro = new ChatTencentHunyuan({ * model: "hunyuan-pro", * temperature: 1, * tencentSecretId: "YOUR-SECRET-ID", * tencentSecretKey: "YOUR-SECRET-KEY", * }); * * res = await hunyuanPro.call(messages); * ``` */ export class ChatTencentHunyuan extends BaseChatTencentHunyuan { constructor(fields?: Partial<TencentHunyuanChatInput> & BaseChatModelParams) { super({ ...fields, sign } ?? { sign }); } } export { TencentHunyuanChatInput } from "./base.js";
0
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models
lc_public_repos/langchainjs/libs/langchain-community/src/chat_models/tencent_hunyuan/base.ts
import { BaseChatModel, type BaseChatModelParams, } from "@langchain/core/language_models/chat_models"; import { AIMessage, BaseMessage, ChatMessage, AIMessageChunk, } from "@langchain/core/messages"; import { ChatGeneration, ChatResult, ChatGenerationChunk, } from "@langchain/core/outputs"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { IterableReadableStream } from "@langchain/core/utils/stream"; import { sign } from "../../utils/tencent_hunyuan/common.js"; /** * Type representing the role of a message in the Hunyuan chat model. */ export type HunyuanMessageRole = "system" | "assistant" | "user"; /** * Interface representing a message in the Hunyuan chat model. */ interface HunyuanMessage { Role: HunyuanMessageRole; Content: string; } /** * Models available, see https://cloud.tencent.com/document/product/1729/104753. */ type ModelName = | (string & NonNullable<unknown>) // hunyuan-lite | "hunyuan-lite" // context size: 4k, input size: 3k, output size: 1k // hunyuan-standard | "hunyuan-standard" // alias for hunyuan-standard-32K | "hunyuan-standard-32K" // context size: 32k, input size: 30k, output size: 2k | "hunyuan-standard-256K" // context size: 256k, input size: 250k, output size: 6k // hunyuan-pro | "hunyuan-pro"; // context size: 32k, input size: 28k, output size: 4k /** * Interface representing the usage of tokens in a chat completion. * See https://cloud.tencent.com/document/api/1729/101838#Usage. */ interface Usage { TotalTokens?: number; PromptTokens?: number; CompletionTokens?: number; } /** * Interface representing a request for a chat completion. * See https://cloud.tencent.com/document/api/1729/105701. */ interface ChatCompletionRequest { Model: ModelName; Messages: HunyuanMessage[]; Stream?: boolean; StreamModeration?: boolean; EnableEnhancement?: boolean; Temperature?: number; TopP?: number; } /** * Interface representing a chat completion choice message. * See https://cloud.tencent.com/document/api/1729/101838#Message. */ interface ChoiceMessage { Role: string; Content: string; } /** * Interface representing a chat completion choice. * See https://cloud.tencent.com/document/api/1729/101838#Choice. */ interface Choice { FinishReason: "stop" | "sensitive" | ""; Delta: ChoiceMessage; Message: ChoiceMessage; } /** * Interface representing a error response from a chat completion. */ interface Error { Code: string; Message: string; } /** * Interface representing a response from a chat completion. * See https://cloud.tencent.com/document/product/1729/105701. */ interface ChatCompletionResponse { Created: number; Usage: Usage; Note: string; Choices: Choice[]; Id?: string; RequestId?: string; Error?: Error; ErrorMsg?: Error; } /** * Interface defining the input to the ChatTencentHunyuan class. */ export interface TencentHunyuanChatInput { /** * Tencent Cloud API Host. * @default "hunyuan.tencentcloudapi.com" */ host?: string; /** * Model name to use. * @default "hunyuan-pro" */ model: ModelName; /** * Whether to stream the results or not. Defaults to false. * @default false */ streaming?: boolean; /** * SecretID to use when making requests, can be obtained from https://console.cloud.tencent.com/cam/capi. * Defaults to the value of `TENCENT_SECRET_ID` environment variable. */ tencentSecretId?: string; /** * Secret key to use when making requests, can be obtained from https://console.cloud.tencent.com/cam/capi. * Defaults to the value of `TENCENT_SECRET_KEY` environment variable. */ tencentSecretKey?: string; /** * Amount of randomness injected into the response. Ranges * from 0.0 to 2.0. Use temp closer to 0 for analytical / * multiple choice, and temp closer to 1 for creative * and generative tasks. Defaults to 1.0.95. */ temperature?: number; /** * Total probability mass of tokens to consider at each step. Range * from 0 to 1.0. Defaults to 1.0. */ topP?: number; } /** * Interface defining the input to the ChatTencentHunyuan class. */ interface TencentHunyuanChatInputWithSign extends TencentHunyuanChatInput { /** * Tencent Cloud API v3 sign method. */ sign: sign; } /** * Function that converts a base message to a Hunyuan message role. * @param message Base message to convert. * @returns The Hunyuan message role. */ function messageToRole(message: BaseMessage): HunyuanMessageRole { const type = message._getType(); switch (type) { case "ai": return "assistant"; case "human": return "user"; case "system": return "system"; case "function": throw new Error("Function messages not supported"); case "generic": { if (!ChatMessage.isInstance(message)) { throw new Error("Invalid generic chat message"); } if (["system", "assistant", "user"].includes(message.role)) { return message.role as HunyuanMessageRole; } throw new Error(`Unknown message role: ${message.role}`); } default: throw new Error(`Unknown message type: ${type}`); } } /** * Wrapper around Tencent Hunyuan large language models that use the Chat endpoint. * * To use you should have the `TENCENT_SECRET_ID` and `TENCENT_SECRET_KEY` * environment variable set. * * @augments BaseLLM * @augments TencentHunyuanInput * @example * ```typescript * const messages = [new HumanMessage("Hello")]; * * const hunyuanLite = new ChatTencentHunyuan({ * model: "hunyuan-lite", * tencentSecretId: "YOUR-SECRET-ID", * tencentSecretKey: "YOUR-SECRET-KEY", * }); * * let res = await hunyuanLite.call(messages); * * const hunyuanPro = new ChatTencentHunyuan({ * model: "hunyuan-pro", * temperature: 1, * tencentSecretId: "YOUR-SECRET-ID", * tencentSecretKey: "YOUR-SECRET-KEY", * }); * * res = await hunyuanPro.call(messages); * ``` */ export class ChatTencentHunyuan extends BaseChatModel implements TencentHunyuanChatInputWithSign { static lc_name() { return "ChatTencentHunyuan"; } get callKeys(): string[] { return ["stop", "signal", "options"]; } get lc_secrets(): { [key: string]: string } | undefined { return { tencentSecretId: "TENCENT_SECRET_ID", tencentSecretKey: "TENCENT_SECRET_KEY", }; } get lc_aliases(): { [key: string]: string } | undefined { return undefined; } lc_serializable = true; tencentSecretId?: string; tencentSecretKey?: string; streaming = false; host = "hunyuan.tencentcloudapi.com"; model = "hunyuan-pro"; temperature?: number | undefined; topP?: number | undefined; sign: sign; constructor( fields?: Partial<TencentHunyuanChatInputWithSign> & BaseChatModelParams ) { super(fields ?? {}); this.tencentSecretId = fields?.tencentSecretId ?? getEnvironmentVariable("TENCENT_SECRET_ID"); if (!this.tencentSecretId) { throw new Error("Tencent SecretID not found"); } this.tencentSecretKey = fields?.tencentSecretKey ?? getEnvironmentVariable("TENCENT_SECRET_KEY"); if (!this.tencentSecretKey) { throw new Error("Tencent SecretKey not found"); } this.host = fields?.host ?? this.host; this.topP = fields?.topP ?? this.topP; this.model = fields?.model ?? this.model; this.streaming = fields?.streaming ?? this.streaming; this.temperature = fields?.temperature ?? this.temperature; if (!fields?.sign) { throw new Error("Sign method undefined"); } this.sign = fields?.sign; } /** * Get the parameters used to invoke the model */ invocationParams(): Omit<ChatCompletionRequest, "Messages"> { return { TopP: this.topP, Model: this.model, Stream: this.streaming, Temperature: this.temperature, }; } /** * Get the HTTP headers used to invoke the model */ invocationHeaders(request: object, timestamp: number): HeadersInit { const headers = { "Content-Type": "application/json", "X-TC-Action": "ChatCompletions", "X-TC-Version": "2023-09-01", "X-TC-Timestamp": timestamp.toString(), Authorization: "", }; headers.Authorization = this.sign( this.host, request, timestamp, this.tencentSecretId ?? "", this.tencentSecretKey ?? "", headers ); return headers; } async *_streamResponseChunks( messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): AsyncGenerator<ChatGenerationChunk> { const stream = await this.caller.call(async () => this.createStream( { ...this.invocationParams(), Messages: messages.map((message) => ({ Role: messageToRole(message), Content: message.content as string, })), }, options?.signal ) ); for await (const chunk of stream) { // handle streaming error if (chunk.ErrorMsg?.Message) { throw new Error(`[${chunk.Id}] ${chunk.ErrorMsg?.Message}`); } const { Choices: [ { Delta: { Content }, FinishReason, }, ], } = chunk; yield new ChatGenerationChunk({ text: Content, message: new AIMessageChunk({ content: Content }), generationInfo: FinishReason ? { usage: chunk.Usage, request_id: chunk.Id, finish_reason: FinishReason, } : undefined, }); await runManager?.handleLLMNewToken(Content); } } private async *createStream( request: ChatCompletionRequest, signal?: AbortSignal ): AsyncGenerator<ChatCompletionResponse> { const timestamp = Math.trunc(Date.now() / 1000); const headers = this.invocationHeaders(request, timestamp); const response = await fetch(`https://${this.host}`, { headers, method: "POST", body: JSON.stringify(request), signal, }); if (!response.ok) { const text = await response.text(); throw new Error( `Hunyuan call failed with status code ${response.status}: ${text}` ); } if ( !response.headers.get("content-type")?.startsWith("text/event-stream") ) { const text = await response.text(); try { const data = JSON.parse(text); if (data?.Response?.Error?.Message) { throw new Error( `[${data?.Response?.RequestId}] ${data?.Response?.Error?.Message}` ); } } catch (e) { throw new Error( `Could not begin Hunyuan stream, received a non-JSON parseable response: ${text}.` ); } } if (!response.body) { throw new Error( `Could not begin Hunyuan stream, received empty body response.` ); } const decoder = new TextDecoder("utf-8"); const stream = IterableReadableStream.fromReadableStream(response.body); let extra = ""; for await (const chunk of stream) { const decoded = extra + decoder.decode(chunk); const lines = decoded.split("\n"); extra = lines.pop() || ""; for (const line of lines) { if (!line.startsWith("data:")) { continue; } try { yield JSON.parse(line.slice("data:".length).trim()); } catch (e) { console.warn(`Received a non-JSON parseable chunk: ${line}`); } } } } /** @ignore */ async _generate( messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<ChatResult> { const params = this.invocationParams(); if (params.Stream) { let usage: Usage = {}; const stream = this._streamResponseChunks( messages, options ?? {}, runManager ); const generations: ChatGeneration[] = []; for await (const chunk of stream) { const text = chunk.text ?? ""; generations.push({ text, message: new AIMessage(text), }); usage = chunk.generationInfo?.usage; } return { generations, llmOutput: { tokenUsage: { totalTokens: usage.TotalTokens, promptTokens: usage.PromptTokens, completionTokens: usage.CompletionTokens, }, }, }; } const data = await this.completionWithRetry( { ...params, Messages: messages.map((message) => ({ Role: messageToRole(message), Content: message.content as string, })), }, options?.signal ).then<ChatCompletionResponse>((data) => { const response: ChatCompletionResponse = data?.Response; if (response?.Error?.Message) { throw new Error(`[${response.RequestId}] ${response.Error.Message}`); } return response; }); const text = data.Choices[0]?.Message?.Content ?? ""; const { TotalTokens = 0, PromptTokens = 0, CompletionTokens = 0, } = data.Usage; return { generations: [ { text, message: new AIMessage(text), }, ], llmOutput: { tokenUsage: { totalTokens: TotalTokens, promptTokens: PromptTokens, completionTokens: CompletionTokens, }, }, }; } /** @ignore */ async completionWithRetry( request: ChatCompletionRequest, signal?: AbortSignal ) { return this.caller.call(async () => { const timestamp = Math.trunc(Date.now() / 1000); const headers = this.invocationHeaders(request, timestamp); const response = await fetch(`https://${this.host}`, { headers, method: "POST", body: JSON.stringify(request), signal, }); return response.json(); }); } _llmType() { return "tencenthunyuan"; } /** @ignore */ _combineLLMOutput() { return []; } }
0
lc_public_repos/langchainjs/libs/langchain-community
lc_public_repos/langchainjs/libs/langchain-community/scripts/jest-setup-after-env.js
import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { afterAll, jest } from "@jest/globals"; afterAll(awaitAllCallbacks); // Allow console.log to be disabled in tests if (process.env.DISABLE_CONSOLE_LOGS === "true") { console.log = jest.fn(); }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-xai/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": ["ES2021", "ES2022.Object", "DOM"], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": ["src/**/*"], "exclude": ["node_modules", "dist", "docs"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-xai/LICENSE
The MIT License Copyright (c) 2024 LangChain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-xai/jest.config.cjs
/** @type {import('ts-jest').JestConfigWithTsJest} */ module.exports = { preset: "ts-jest/presets/default-esm", testEnvironment: "./jest.env.cjs", modulePathIgnorePatterns: ["dist/", "docs/"], moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, transform: { "^.+\\.tsx?$": ["@swc/jest"], }, transformIgnorePatterns: [ "/node_modules/", "\\.pnp\\.[^\\/]+$", "./scripts/jest-setup-after-env.js", ], setupFiles: ["dotenv/config"], testTimeout: 20_000, passWithNoTests: true, };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-xai/jest.env.cjs
const { TestEnvironment } = require("jest-environment-node"); class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { constructor(config, context) { // Make `instanceof Float32Array` return true in tests // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 super(config, context); this.global.Float32Array = Float32Array; } } module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-xai/README.md
# @langchain/xai This package contains the LangChain.js integrations for xAI. ## Installation ```bash npm2yarn npm install @langchain/xai @langchain/core ``` ## Chat models This package adds support for xAI chat model inference. Set the necessary environment variable (or pass it in via the constructor): ```bash export XAI_API_KEY= ``` ```typescript import { ChatXAI } from "@langchain/xai"; import { HumanMessage } from "@langchain/core/messages"; const model = new ChatXAI({ apiKey: process.env.XAI_API_KEY, // Default value. }); const message = new HumanMessage("What color is the sky?"); const res = await model.invoke([message]); ``` ## Development To develop the `@langchain/xai` package, you'll need to follow these instructions: ### Install dependencies ```bash yarn install ``` ### Build the package ```bash yarn build ``` Or from the repo root: ```bash yarn build --filter=@langchain/xai ``` ### Run tests Test files should live within a `tests/` file in the `src/` folder. Unit tests should end in `.test.ts` and integration tests should end in `.int.test.ts`: ```bash $ yarn test $ yarn test:int ``` ### Lint & Format Run the linter & formatter to ensure your code is up to standard: ```bash yarn lint && yarn format ``` ### Adding new entrypoints If you add a new file to be exported, either import & re-export from `src/index.ts`, or add it to the `entrypoints` field in the `config` variable located inside `langchain.config.js` and run `yarn build` to generate the new entrypoint.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-xai/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": [ "--workspaces-update=false" ] } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-xai/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof"], ignorePatterns: [ ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-xai/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals: [/node\:/, /@langchain\/core\//], entrypoints: { index: "index", }, tsConfigPath: resolve("./tsconfig.json"), cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-xai/package.json
{ "name": "@langchain/xai", "version": "0.0.1", "description": "xAI integration for LangChain.js", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-xai/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/xai", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "clean": "rm -rf .turbo dist/", "prepack": "yarn build", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard:unit": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard": "yarn test:standard:unit && yarn test:standard:int", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "@langchain/openai": "~0.3.0" }, "peerDependencies": { "@langchain/core": ">=0.2.21 <0.4.0" }, "devDependencies": { "@jest/globals": "^29.5.0", "@langchain/core": "workspace:*", "@langchain/openai": "workspace:^", "@langchain/scripts": ">=0.1.0 <0.2.0", "@langchain/standard-tests": "0.0.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@tsconfig/recommended": "^1.0.3", "@types/uuid": "^9", "@typescript-eslint/eslint-plugin": "^6.12.0", "@typescript-eslint/parser": "^6.12.0", "dotenv": "^16.3.1", "dpdm": "^3.12.0", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "prettier": "^2.8.3", "release-it": "^17.6.0", "rollup": "^4.5.2", "ts-jest": "^29.1.0", "typescript": "<5.2.0", "zod": "^3.22.4", "zod-to-json-schema": "^3.23.1" }, "publishConfig": { "access": "public" }, "exports": { ".": { "types": { "import": "./index.d.ts", "require": "./index.d.cts", "default": "./index.d.ts" }, "import": "./index.js", "require": "./index.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "index.cjs", "index.js", "index.d.ts", "index.d.cts" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-xai/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": ["node_modules", "dist", "docs", "**/tests"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-xai/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-xai/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/libs/langchain-xai
lc_public_repos/langchainjs/libs/langchain-xai/src/index.ts
export * from "./chat_models.js";
0
lc_public_repos/langchainjs/libs/langchain-xai
lc_public_repos/langchainjs/libs/langchain-xai/src/chat_models.ts
import { BaseChatModelCallOptions, BindToolsInput, LangSmithParams, type BaseChatModelParams, } from "@langchain/core/language_models/chat_models"; import { Serialized } from "@langchain/core/load/serializable"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { type OpenAICoreRequestOptions, type OpenAIClient, ChatOpenAI, OpenAIToolChoice, } from "@langchain/openai"; type ChatXAIToolType = BindToolsInput | OpenAIClient.ChatCompletionTool; export interface ChatXAICallOptions extends BaseChatModelCallOptions { headers?: Record<string, string>; tools?: ChatXAIToolType[]; tool_choice?: OpenAIToolChoice | string | "auto" | "any"; } export interface ChatXAIInput extends BaseChatModelParams { /** * The xAI API key to use for requests. * @default process.env.XAI_API_KEY */ apiKey?: string; /** * The name of the model to use. * @default "grok-beta" */ model?: string; /** * Up to 4 sequences where the API will stop generating further tokens. The * returned text will not contain the stop sequence. * Alias for `stopSequences` */ stop?: Array<string>; /** * Up to 4 sequences where the API will stop generating further tokens. The * returned text will not contain the stop sequence. */ stopSequences?: Array<string>; /** * Whether or not to stream responses. */ streaming?: boolean; /** * The temperature to use for sampling. * @default 0.7 */ temperature?: number; /** * The maximum number of tokens that the model can process in a single response. * This limits ensures computational efficiency and resource management. */ maxTokens?: number; } /** * xAI chat model integration. * * The xAI API is compatible to the OpenAI API with some limitations. * * Setup: * Install `@langchain/xai` and set an environment variable named `XAI_API_KEY`. * * ```bash * npm install @langchain/xai * export XAI_API_KEY="your-api-key" * ``` * * ## [Constructor args](https://api.js.langchain.com/classes/langchain_xai.ChatXAI.html#constructor) * * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_xai.ChatXAICallOptions.html) * * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc. * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below: * * ```typescript * // When calling `.bind`, call options should be passed via the first argument * const llmWithArgsBound = llm.bind({ * stop: ["\n"], * tools: [...], * }); * * // When calling `.bindTools`, call options should be passed via the second argument * const llmWithTools = llm.bindTools( * [...], * { * tool_choice: "auto", * } * ); * ``` * * ## Examples * * <details open> * <summary><strong>Instantiate</strong></summary> * * ```typescript * import { ChatXAI } from '@langchain/xai'; * * const llm = new ChatXAI({ * model: "grok-beta", * temperature: 0, * // other params... * }); * ``` * </details> * * <br /> * * <details> * <summary><strong>Invoking</strong></summary> * * ```typescript * const input = `Translate "I love programming" into French.`; * * // Models also accept a list of chat messages or a formatted prompt * const result = await llm.invoke(input); * console.log(result); * ``` * * ```txt * AIMessage { * "content": "The French translation of \"I love programming\" is \"J'aime programmer\". In this sentence, \"J'aime\" is the first person singular conjugation of the French verb \"aimer\" which means \"to love\", and \"programmer\" is the French infinitive for \"to program\". I hope this helps! Let me know if you have any other questions.", * "additional_kwargs": {}, * "response_metadata": { * "tokenUsage": { * "completionTokens": 82, * "promptTokens": 20, * "totalTokens": 102 * }, * "finish_reason": "stop" * }, * "tool_calls": [], * "invalid_tool_calls": [] * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Streaming Chunks</strong></summary> * * ```typescript * for await (const chunk of await llm.stream(input)) { * console.log(chunk); * } * ``` * * ```txt * AIMessageChunk { * "content": "", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": "The", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": " French", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": " translation", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": " of", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": " \"", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": "I", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": " love", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * ... * AIMessageChunk { * "content": ".", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": "", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": "stop" * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Aggregate Streamed Chunks</strong></summary> * * ```typescript * import { AIMessageChunk } from '@langchain/core/messages'; * import { concat } from '@langchain/core/utils/stream'; * * const stream = await llm.stream(input); * let full: AIMessageChunk | undefined; * for await (const chunk of stream) { * full = !full ? chunk : concat(full, chunk); * } * console.log(full); * ``` * * ```txt * AIMessageChunk { * "content": "The French translation of \"I love programming\" is \"J'aime programmer\". In this sentence, \"J'aime\" is the first person singular conjugation of the French verb \"aimer\" which means \"to love\", and \"programmer\" is the French infinitive for \"to program\". I hope this helps! Let me know if you have any other questions.", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": "stop" * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Bind tools</strong></summary> * * ```typescript * import { z } from 'zod'; * * const llmForToolCalling = new ChatXAI({ * model: "grok-beta", * temperature: 0, * // other params... * }); * * const GetWeather = { * name: "GetWeather", * description: "Get the current weather in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const GetPopulation = { * name: "GetPopulation", * description: "Get the current population in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]); * const aiMsg = await llmWithTools.invoke( * "Which city is hotter today and which is bigger: LA or NY?" * ); * console.log(aiMsg.tool_calls); * ``` * * ```txt * [ * { * name: 'GetWeather', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call', * id: 'call_cd34' * }, * { * name: 'GetWeather', * args: { location: 'New York, NY' }, * type: 'tool_call', * id: 'call_68rf' * }, * { * name: 'GetPopulation', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call', * id: 'call_f81z' * }, * { * name: 'GetPopulation', * args: { location: 'New York, NY' }, * type: 'tool_call', * id: 'call_8byt' * } * ] * ``` * </details> * * <br /> * * <details> * <summary><strong>Structured Output</strong></summary> * * ```typescript * import { z } from 'zod'; * * const Joke = z.object({ * setup: z.string().describe("The setup of the joke"), * punchline: z.string().describe("The punchline to the joke"), * rating: z.number().optional().describe("How funny the joke is, from 1 to 10") * }).describe('Joke to tell user.'); * * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: "Joke" }); * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats"); * console.log(jokeResult); * ``` * * ```txt * { * setup: "Why don't cats play poker in the wild?", * punchline: 'Because there are too many cheetahs.' * } * ``` * </details> * * <br /> */ export class ChatXAI extends ChatOpenAI<ChatXAICallOptions> { static lc_name() { return "ChatXAI"; } _llmType() { return "xAI"; } get lc_secrets(): { [key: string]: string } | undefined { return { apiKey: "XAI_API_KEY", }; } lc_serializable = true; lc_namespace = ["langchain", "chat_models", "xai"]; constructor(fields?: Partial<ChatXAIInput>) { const apiKey = fields?.apiKey || getEnvironmentVariable("XAI_API_KEY"); if (!apiKey) { throw new Error( `xAI API key not found. Please set the XAI_API_KEY environment variable or provide the key into "apiKey" field.` ); } super({ ...fields, model: fields?.model || "grok-beta", apiKey, configuration: { baseURL: "https://api.x.ai/v1", }, }); } toJSON(): Serialized { const result = super.toJSON(); if ( "kwargs" in result && typeof result.kwargs === "object" && result.kwargs != null ) { delete result.kwargs.openai_api_key; delete result.kwargs.configuration; } return result; } getLsParams(options: this["ParsedCallOptions"]): LangSmithParams { const params = super.getLsParams(options); params.ls_provider = "xai"; return params; } async completionWithRetry( request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions ): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>; async completionWithRetry( request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions ): Promise<OpenAIClient.Chat.Completions.ChatCompletion>; /** * Calls the xAI API with retry logic in case of failures. * @param request The request to send to the xAI API. * @param options Optional configuration for the API call. * @returns The response from the xAI API. */ async completionWithRetry( request: | OpenAIClient.Chat.ChatCompletionCreateParamsStreaming | OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions ): Promise< | AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk> | OpenAIClient.Chat.Completions.ChatCompletion > { delete request.frequency_penalty; delete request.presence_penalty; delete request.logit_bias; delete request.functions; const newRequestMessages = request.messages.map((msg) => { if (!msg.content) { return { ...msg, content: "", }; } return msg; }); const newRequest = { ...request, messages: newRequestMessages, }; if (newRequest.stream === true) { return super.completionWithRetry(newRequest, options); } return super.completionWithRetry(newRequest, options); } }
0
lc_public_repos/langchainjs/libs/langchain-xai/src
lc_public_repos/langchainjs/libs/langchain-xai/src/tests/chat_models.standard.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelIntegrationTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { ChatXAI, ChatXAICallOptions } from "../chat_models.js"; class ChatXAIStandardIntegrationTests extends ChatModelIntegrationTests< ChatXAICallOptions, AIMessageChunk > { constructor() { if (!process.env.XAI_API_KEY) { throw new Error( "Can not run xAI integration tests because XAI_API_KEY is not set" ); } super({ Cls: ChatXAI, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: { maxRetries: 1, temperature: 0, }, }); } } const testClass = new ChatXAIStandardIntegrationTests(); test("ChatXAIStandardIntegrationTests", async () => { console.warn = (..._args: unknown[]) => { // no-op }; const testResults = await testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-xai/src
lc_public_repos/langchainjs/libs/langchain-xai/src/tests/chat_models.standard.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelUnitTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { ChatXAI, ChatXAICallOptions } from "../chat_models.js"; class ChatXAIStandardUnitTests extends ChatModelUnitTests< ChatXAICallOptions, AIMessageChunk > { constructor() { super({ Cls: ChatXAI, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: {}, }); // This must be set so method like `.bindTools` or `.withStructuredOutput` // which we call after instantiating the model will work. // (constructor will throw if API key is not set) process.env.XAI_API_KEY = "test"; } testChatModelInitApiKey() { // Unset the API key env var here so this test can properly check // the API key class arg. process.env.XAI_API_KEY = ""; super.testChatModelInitApiKey(); // Re-set the API key env var here so other tests can run properly. process.env.XAI_API_KEY = "test"; } } const testClass = new ChatXAIStandardUnitTests(); test("ChatXAIStandardUnitTests", () => { const testResults = testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-xai/src
lc_public_repos/langchainjs/libs/langchain-xai/src/tests/chat_models.int.test.ts
import { test } from "@jest/globals"; import { AIMessage, AIMessageChunk, HumanMessage, ToolMessage, } from "@langchain/core/messages"; import { tool } from "@langchain/core/tools"; import { z } from "zod"; import { concat } from "@langchain/core/utils/stream"; import { ChatXAI } from "../chat_models.js"; test("invoke", async () => { const chat = new ChatXAI({ maxRetries: 0, }); const message = new HumanMessage("What color is the sky?"); const res = await chat.invoke([message]); // console.log({ res }); expect(res.content.length).toBeGreaterThan(10); }); test("invoke with stop sequence", async () => { const chat = new ChatXAI({ maxRetries: 0, }); const message = new HumanMessage("Count to ten."); const res = await chat.bind({ stop: ["5", "five"] }).invoke([message]); // console.log({ res }); expect((res.content as string).toLowerCase()).not.toContain("6"); expect((res.content as string).toLowerCase()).not.toContain("six"); }); test("stream should respect passed headers", async () => { const chat = new ChatXAI({ maxRetries: 0, }); const message = new HumanMessage("Count to ten."); await expect(async () => { await chat.stream([message], { headers: { Authorization: "badbadbad" }, }); }).rejects.toThrowError(); }); test("generate", async () => { const chat = new ChatXAI(); const message = new HumanMessage("Hello!"); const res = await chat.generate([[message]]); // console.log(JSON.stringify(res, null, 2)); expect(res.generations[0][0].text.length).toBeGreaterThan(10); }); test("streaming", async () => { const chat = new ChatXAI(); const message = new HumanMessage("What color is the sky?"); const stream = await chat.stream([message]); let iters = 0; let finalRes = ""; for await (const chunk of stream) { iters += 1; finalRes += chunk.content; } // console.log({ finalRes, iters }); expect(iters).toBeGreaterThan(1); }); test("invoke with bound tools", async () => { const chat = new ChatXAI({ maxRetries: 0, model: "grok-beta", }); const message = new HumanMessage("What is the current weather in Hawaii?"); const res = await chat .bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, }, ], tool_choice: "auto", }) .invoke([message]); // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls?.length).toEqual(1); expect( JSON.parse( res.additional_kwargs?.tool_calls?.[0].function.arguments ?? "{}" ) ).toEqual(res.tool_calls?.[0].args); }); test("stream with bound tools, yielding a single chunk", async () => { const chat = new ChatXAI({ maxRetries: 0, }); const message = new HumanMessage("What is the current weather in Hawaii?"); const stream = await chat .bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, }, ], tool_choice: "auto", }) .stream([message]); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(JSON.stringify(chunk)); } }); test("Few shotting with tool calls", async () => { const chat = new ChatXAI({ model: "grok-beta", temperature: 0, }).bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, }, ], tool_choice: "auto", }); const res = await chat.invoke([ new HumanMessage("What is the weather in SF?"), new AIMessage({ content: "", tool_calls: [ { id: "12345", name: "get_current_weather", args: { location: "SF", }, }, ], }), new ToolMessage({ tool_call_id: "12345", content: "It is currently 24 degrees with hail in SF.", }), new AIMessage("It is currently 24 degrees in SF with hail in SF."), new HumanMessage("What did you say the weather was?"), ]); // console.log(res); expect(res.content).toContain("24"); }); test("Groq can stream tool calls", async () => { const model = new ChatXAI({ model: "grok-beta", temperature: 0, }); const weatherTool = tool((_) => "The temperature is 24 degrees with hail.", { name: "get_current_weather", schema: z.object({ location: z .string() .describe("The location to get the current weather for."), }), description: "Get the current weather in a given location.", }); const modelWithTools = model.bindTools([weatherTool]); const stream = await modelWithTools.stream( "What is the weather in San Francisco?" ); let finalMessage: AIMessageChunk | undefined; for await (const chunk of stream) { finalMessage = !finalMessage ? chunk : concat(finalMessage, chunk); } expect(finalMessage).toBeDefined(); if (!finalMessage) return; expect(finalMessage.tool_calls?.[0]).toBeDefined(); if (!finalMessage.tool_calls?.[0]) return; expect(finalMessage.tool_calls?.[0].name).toBe("get_current_weather"); expect(finalMessage.tool_calls?.[0].args).toHaveProperty("location"); expect(finalMessage.tool_calls?.[0].id).toBeDefined(); });
0
lc_public_repos/langchainjs/libs/langchain-xai/src
lc_public_repos/langchainjs/libs/langchain-xai/src/tests/chat_models.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatXAI } from "../chat_models.js"; test("Serialization", () => { const model = new ChatXAI({ apiKey: "foo", }); expect(JSON.stringify(model)).toEqual( `{"lc":1,"type":"constructor","id":["langchain","chat_models","xai","ChatXAI"],"kwargs":{"api_key":{"lc":1,"type":"secret","id":["XAI_API_KEY"]}}}` ); }); test("Serialization with no params", () => { process.env.GROQ_API_KEY = "foo"; const model = new ChatXAI(); expect(JSON.stringify(model)).toEqual( `{"lc":1,"type":"constructor","id":["langchain","chat_models","xai","ChatXAI"],"kwargs":{"api_key":{"lc":1,"type":"secret","id":["XAI_API_KEY"]}}}` ); });
0
lc_public_repos/langchainjs/libs/langchain-xai/src
lc_public_repos/langchainjs/libs/langchain-xai/src/tests/chat_models_structured_output.int.test.ts
import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { AIMessage } from "@langchain/core/messages"; import { ChatXAI } from "../chat_models.js"; test("withStructuredOutput zod schema function calling", async () => { const model = new ChatXAI({ temperature: 0, model: "grok-beta", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( calculatorSchema, { name: "calculator", } ); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are VERY bad at math and must always use a calculator."], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput zod schema JSON mode", async () => { const model = new ChatXAI({ temperature: 0, model: "grok-beta", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( calculatorSchema, { name: "calculator", method: "jsonMode", } ); const prompt = ChatPromptTemplate.fromMessages([ [ "system", `You are VERY bad at math and must always use a calculator. Respond with a JSON object containing three keys: 'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide', 'number1': the first number to operate on, 'number2': the second number to operate on. `, ], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput JSON schema function calling", async () => { const model = new ChatXAI({ temperature: 0, model: "grok-beta", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( zodToJsonSchema(calculatorSchema), { name: "calculator", } ); const prompt = ChatPromptTemplate.fromMessages([ ["system", `You are VERY bad at math and must always use a calculator.`], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput OpenAI function definition function calling", async () => { const model = new ChatXAI({ temperature: 0, model: "grok-beta", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput({ name: "calculator", parameters: zodToJsonSchema(calculatorSchema), }); const prompt = ChatPromptTemplate.fromMessages([ "system", `You are VERY bad at math and must always use a calculator.`, "human", "Please help me!! What is 2 + 2?", ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput JSON schema JSON mode", async () => { const model = new ChatXAI({ temperature: 0, model: "grok-beta", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( zodToJsonSchema(calculatorSchema), { name: "calculator", method: "jsonMode", } ); const prompt = ChatPromptTemplate.fromMessages([ [ "system", `You are VERY bad at math and must always use a calculator. Respond with a JSON object containing three keys: 'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide', 'number1': the first number to operate on, 'number2': the second number to operate on. `, ], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput JSON schema", async () => { const model = new ChatXAI({ temperature: 0, model: "grok-beta", }); const jsonSchema = { title: "calculator", description: "A simple calculator", type: "object", properties: { operation: { type: "string", enum: ["add", "subtract", "multiply", "divide"], }, number1: { type: "number" }, number2: { type: "number" }, }, }; const modelWithStructuredOutput = model.withStructuredOutput(jsonSchema); const prompt = ChatPromptTemplate.fromMessages([ [ "system", `You are VERY bad at math and must always use a calculator. Respond with a JSON object containing three keys: 'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide', 'number1': the first number to operate on, 'number2': the second number to operate on. `, ], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput includeRaw true", async () => { const model = new ChatXAI({ temperature: 0, model: "grok-beta", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( calculatorSchema, { name: "calculator", includeRaw: true, } ); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are VERY bad at math and must always use a calculator."], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("parsed" in result).toBe(true); // Need to make TS happy :) if (!("parsed" in result)) { throw new Error("parsed not in result"); } const { parsed } = result; expect("operation" in parsed).toBe(true); expect("number1" in parsed).toBe(true); expect("number2" in parsed).toBe(true); expect("raw" in result).toBe(true); // Need to make TS happy :) if (!("raw" in result)) { throw new Error("raw not in result"); } const { raw } = result as { raw: AIMessage }; expect(raw.tool_calls?.[0].args).toBeDefined(); if (!raw.tool_calls?.[0].args) { throw new Error("args not in tool call"); } expect(raw.tool_calls?.length).toBeGreaterThan(0); expect(raw.tool_calls?.[0].name).toBe("calculator"); expect("operation" in raw.tool_calls[0].args).toBe(true); expect("number1" in raw.tool_calls[0].args).toBe(true); expect("number2" in raw.tool_calls[0].args).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-xai
lc_public_repos/langchainjs/libs/langchain-xai/scripts/jest-setup-after-env.js
import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { afterAll, jest } from "@jest/globals"; afterAll(awaitAllCallbacks); // Allow console.log to be disabled in tests if (process.env.DISABLE_CONSOLE_LOGS === "true") { console.log = jest.fn(); }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-weaviate/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": ["ES2021", "ES2022.Object", "DOM"], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": ["src/**/*"], "exclude": ["node_modules", "dist", "docs"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-weaviate/LICENSE
The MIT License Copyright (c) 2023 LangChain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-weaviate/jest.config.cjs
/** @type {import('ts-jest').JestConfigWithTsJest} */ module.exports = { preset: "ts-jest/presets/default-esm", testEnvironment: "./jest.env.cjs", modulePathIgnorePatterns: ["dist/", "docs/"], moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, transform: { "^.+\\.tsx?$": ["@swc/jest"], }, transformIgnorePatterns: [ "/node_modules/", "\\.pnp\\.[^\\/]+$", "./scripts/jest-setup-after-env.js", ], setupFiles: ["dotenv/config"], testTimeout: 20_000, passWithNoTests: true, };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-weaviate/jest.env.cjs
const { TestEnvironment } = require("jest-environment-node"); class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { constructor(config, context) { // Make `instanceof Float32Array` return true in tests // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 super(config, context); this.global.Float32Array = Float32Array; } } module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-weaviate/README.md
# @langchain/weaviate This package contains the LangChain.js integrations for Weaviate with the `weaviate-ts-client` SDK. ## Installation ```bash npm2yarn npm install @langchain/weaviate @langchain/core ``` ## Vectorstore This package adds support for Weaviate vectorstore. To follow along with this example install the `@langchain/openai` package for their Embeddings model. ```bash npm install @langchain/openai ``` Now set the necessary environment variables (or pass them in via the client object): ```bash export WEAVIATE_SCHEME= export WEAVIATE_HOST= export WEAVIATE_API_KEY= ``` ```typescript import weaviate, { ApiKey } from 'weaviate-ts-client'; import { WeaviateStore } from "@langchain/weaviate"; // Weaviate SDK has a TypeScript issue so we must do this. const client = (weaviate as any).client({ scheme: process.env.WEAVIATE_SCHEME || "https", host: process.env.WEAVIATE_HOST || "localhost", apiKey: new ApiKey( process.env.WEAVIATE_API_KEY || "default" ), }); // Create a store and fill it with some texts + metadata await WeaviateStore.fromTexts( ["hello world", "hi there", "how are you", "bye now"], [{ foo: "bar" }, { foo: "baz" }, { foo: "qux" }, { foo: "bar" }], new OpenAIEmbeddings(), { client, indexName: "Test", textKey: "text", metadataKeys: ["foo"], } ); ``` ## Development To develop the `@langchain/weaviate` package, you'll need to follow these instructions: ### Install dependencies ```bash yarn install ``` ### Build the package ```bash yarn build ``` Or from the repo root: ```bash yarn build --filter=@langchain/weaviate ``` ### Run tests Test files should live within a `tests/` file in the `src/` folder. Unit tests should end in `.test.ts` and integration tests should end in `.int.test.ts`: ```bash $ yarn test $ yarn test:int ``` ### Lint & Format Run the linter & formatter to ensure your code is up to standard: ```bash yarn lint && yarn format ``` ### Adding new entrypoints If you add a new file to be exported, either import & re-export from `src/index.ts`, or add it to the `entrypoints` field in the `config` variable located inside `langchain.config.js` and run `yarn build` to generate the new entrypoint.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-weaviate/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": [ "--workspaces-update=false" ] } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-weaviate/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof"], ignorePatterns: [ ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-weaviate/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals: [/node\:/, /@langchain\/core\//], entrypoints: { index: "index", }, tsConfigPath: resolve("./tsconfig.json"), cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-weaviate/package.json
{ "name": "@langchain/weaviate", "version": "0.1.0", "description": "Weaviate integration for LangChain.js", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-weaviate/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/weaviate", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "clean": "rm -rf .turbo dist/", "prepack": "yarn build", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "uuid": "^10.0.0", "weaviate-ts-client": "^2.0.0" }, "peerDependencies": { "@langchain/core": ">=0.2.21 <0.4.0" }, "devDependencies": { "@jest/globals": "^29.5.0", "@langchain/core": "workspace:*", "@langchain/openai": "workspace:^", "@langchain/scripts": ">=0.1.0 <0.2.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@tsconfig/recommended": "^1.0.3", "@types/uuid": "^9", "@typescript-eslint/eslint-plugin": "^6.12.0", "@typescript-eslint/parser": "^6.12.0", "dotenv": "^16.3.1", "dpdm": "^3.12.0", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "langchain": "workspace:*", "prettier": "^2.8.3", "release-it": "^17.6.0", "rollup": "^4.5.2", "ts-jest": "^29.1.0", "typescript": "<5.2.0" }, "publishConfig": { "access": "public" }, "exports": { ".": { "types": { "import": "./index.d.ts", "require": "./index.d.cts", "default": "./index.d.ts" }, "import": "./index.js", "require": "./index.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "index.cjs", "index.js", "index.d.ts", "index.d.cts" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-weaviate/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": ["node_modules", "dist", "docs", "**/tests"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-weaviate/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-weaviate/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/libs/langchain-weaviate
lc_public_repos/langchainjs/libs/langchain-weaviate/src/vectorstores.ts
import * as uuid from "uuid"; import type { WeaviateClient, WeaviateObject, WhereFilter, } from "weaviate-ts-client"; import { MaxMarginalRelevanceSearchOptions, VectorStore, } from "@langchain/core/vectorstores"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { Document } from "@langchain/core/documents"; import { maximalMarginalRelevance } from "@langchain/core/utils/math"; // Note this function is not generic, it is designed specifically for Weaviate // https://weaviate.io/developers/weaviate/config-refs/datatypes#introduction export const flattenObjectForWeaviate = ( // eslint-disable-next-line @typescript-eslint/no-explicit-any obj: Record<string, any> ) => { // eslint-disable-next-line @typescript-eslint/no-explicit-any const flattenedObject: Record<string, any> = {}; for (const key in obj) { if (!Object.hasOwn(obj, key)) { continue; } const value = obj[key]; if (typeof obj[key] === "object" && !Array.isArray(value)) { const recursiveResult = flattenObjectForWeaviate(value); for (const deepKey in recursiveResult) { if (Object.hasOwn(obj, key)) { flattenedObject[`${key}_${deepKey}`] = recursiveResult[deepKey]; } } } else if (Array.isArray(value)) { if (value.length === 0) { flattenedObject[key] = value; } else if ( typeof value[0] !== "object" && // eslint-disable-next-line @typescript-eslint/no-explicit-any value.every((el: any) => typeof el === typeof value[0]) ) { // Weaviate only supports arrays of primitive types, // where all elements are of the same type flattenedObject[key] = value; } } else { flattenedObject[key] = value; } } return flattenedObject; }; /** * Interface that defines the arguments required to create a new instance * of the `WeaviateStore` class. It includes the Weaviate client, the name * of the class in Weaviate, and optional keys for text and metadata. */ export interface WeaviateLibArgs { client: WeaviateClient; /** * The name of the class in Weaviate. Must start with a capital letter. */ indexName: string; textKey?: string; metadataKeys?: string[]; tenant?: string; } interface ResultRow { // eslint-disable-next-line @typescript-eslint/no-explicit-any [key: string]: any; } /** * Interface that defines a filter for querying data from Weaviate. It * includes a distance and a `WhereFilter`. */ export interface WeaviateFilter { distance?: number; where: WhereFilter; } /** * Class that extends the `VectorStore` base class. It provides methods to * interact with a Weaviate index, including adding vectors and documents, * deleting data, and performing similarity searches. */ export class WeaviateStore extends VectorStore { declare FilterType: WeaviateFilter; private client: WeaviateClient; private indexName: string; private textKey: string; private queryAttrs: string[]; private tenant?: string; _vectorstoreType(): string { return "weaviate"; } constructor(public embeddings: EmbeddingsInterface, args: WeaviateLibArgs) { super(embeddings, args); this.client = args.client; this.indexName = args.indexName; this.textKey = args.textKey || "text"; this.queryAttrs = [this.textKey]; this.tenant = args.tenant; if (args.metadataKeys) { this.queryAttrs = [ ...new Set([ ...this.queryAttrs, ...args.metadataKeys.filter((k) => { // https://spec.graphql.org/June2018/#sec-Names // queryAttrs need to be valid GraphQL Names const keyIsValid = /^[_A-Za-z][_0-9A-Za-z]*$/.test(k); if (!keyIsValid) { console.warn( `Skipping metadata key ${k} as it is not a valid GraphQL Name` ); } return keyIsValid; }), ]), ]; } } /** * Method to add vectors and corresponding documents to the Weaviate * index. * @param vectors Array of vectors to be added. * @param documents Array of documents corresponding to the vectors. * @param options Optional parameter that can include specific IDs for the documents. * @returns An array of document IDs. */ async addVectors( vectors: number[][], documents: Document[], options?: { ids?: string[] } ) { const documentIds = options?.ids ?? documents.map((_) => uuid.v4()); const batch: WeaviateObject[] = documents.map((document, index) => { if (Object.hasOwn(document.metadata, "id")) throw new Error( "Document inserted to Weaviate vectorstore should not have `id` in their metadata." ); const flattenedMetadata = flattenObjectForWeaviate(document.metadata); return { ...(this.tenant ? { tenant: this.tenant } : {}), class: this.indexName, id: documentIds[index], vector: vectors[index], properties: { [this.textKey]: document.pageContent, ...flattenedMetadata, }, }; }); try { const responses = await this.client.batch .objectsBatcher() .withObjects(...batch) .do(); // if storing vectors fails, we need to know why const errorMessages: string[] = []; responses.forEach((response) => { if (response?.result?.errors?.error) { errorMessages.push( ...response.result.errors.error.map( (err) => err.message ?? "!! Unfortunately no error message was presented in the API response !!" ) ); } }); if (errorMessages.length > 0) { throw new Error(errorMessages.join("\n")); } } catch (e) { throw Error(`Error adding vectors: ${e}`); } return documentIds; } /** * Method to add documents to the Weaviate index. It first generates * vectors for the documents using the embeddings, then adds the vectors * and documents to the index. * @param documents Array of documents to be added. * @param options Optional parameter that can include specific IDs for the documents. * @returns An array of document IDs. */ async addDocuments(documents: Document[], options?: { ids?: string[] }) { return this.addVectors( await this.embeddings.embedDocuments(documents.map((d) => d.pageContent)), documents, options ); } /** * Method to delete data from the Weaviate index. It can delete data based * on specific IDs or a filter. * @param params Object that includes either an array of IDs or a filter for the data to be deleted. * @returns Promise that resolves when the deletion is complete. */ async delete(params: { ids?: string[]; filter?: WeaviateFilter; }): Promise<void> { const { ids, filter } = params; if (ids && ids.length > 0) { for (const id of ids) { let deleter = this.client.data .deleter() .withClassName(this.indexName) .withId(id); if (this.tenant) { deleter = deleter.withTenant(this.tenant); } await deleter.do(); } } else if (filter) { let batchDeleter = this.client.batch .objectsBatchDeleter() .withClassName(this.indexName) .withWhere(filter.where); if (this.tenant) { batchDeleter = batchDeleter.withTenant(this.tenant); } await batchDeleter.do(); } else { throw new Error( `This method requires either "ids" or "filter" to be set in the input object` ); } } /** * Method to perform a similarity search on the stored vectors in the * Weaviate index. It returns the top k most similar documents and their * similarity scores. * @param query The query vector. * @param k The number of most similar documents to return. * @param filter Optional filter to apply to the search. * @returns An array of tuples, where each tuple contains a document and its similarity score. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: WeaviateFilter ): Promise<[Document, number][]> { const resultsWithEmbedding = await this.similaritySearchVectorWithScoreAndEmbedding(query, k, filter); return resultsWithEmbedding.map(([document, score, _embedding]) => [ document, score, ]); } /** * Method to perform a similarity search on the stored vectors in the * Weaviate index. It returns the top k most similar documents, their * similarity scores and embedding vectors. * @param query The query vector. * @param k The number of most similar documents to return. * @param filter Optional filter to apply to the search. * @returns An array of tuples, where each tuple contains a document, its similarity score and its embedding vector. */ async similaritySearchVectorWithScoreAndEmbedding( query: number[], k: number, filter?: WeaviateFilter ): Promise<[Document, number, number[]][]> { try { let builder = this.client.graphql .get() .withClassName(this.indexName) .withFields( `${this.queryAttrs.join(" ")} _additional { distance vector id }` ) .withNearVector({ vector: query, distance: filter?.distance, }) .withLimit(k); if (this.tenant) { builder = builder.withTenant(this.tenant); } if (filter?.where) { builder = builder.withWhere(filter.where); } const result = await builder.do(); const documents: [Document, number, number[]][] = []; for (const data of result.data.Get[this.indexName]) { const { [this.textKey]: text, _additional, ...rest }: ResultRow = data; documents.push([ new Document({ pageContent: text, metadata: rest, id: _additional.id, }), _additional.distance, _additional.vector, ]); } return documents; } catch (e) { throw Error(`Error in similaritySearch ${e}`); } } /** * Return documents selected using the maximal marginal relevance. * Maximal marginal relevance optimizes for similarity to the query AND diversity * among selected documents. * * @param {string} query - Text to look up documents similar to. * @param {number} options.k - Number of documents to return. * @param {number} options.fetchK - Number of documents to fetch before passing to the MMR algorithm. * @param {number} options.lambda - Number between 0 and 1 that determines the degree of diversity among the results, * where 0 corresponds to maximum diversity and 1 to minimum diversity. * @param {this["FilterType"]} options.filter - Optional filter * @param _callbacks * * @returns {Promise<Document[]>} - List of documents selected by maximal marginal relevance. */ override async maxMarginalRelevanceSearch( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]>, _callbacks?: undefined ): Promise<Document[]> { const { k, fetchK = 20, lambda = 0.5, filter } = options; const queryEmbedding: number[] = await this.embeddings.embedQuery(query); const allResults: [Document, number, number[]][] = await this.similaritySearchVectorWithScoreAndEmbedding( queryEmbedding, fetchK, filter ); const embeddingList = allResults.map( ([_doc, _score, embedding]) => embedding ); const mmrIndexes = maximalMarginalRelevance( queryEmbedding, embeddingList, lambda, k ); return mmrIndexes .filter((idx) => idx !== -1) .map((idx) => allResults[idx][0]); } /** * Static method to create a new `WeaviateStore` instance from a list of * texts. It first creates documents from the texts and metadata, then * adds the documents to the Weaviate index. * @param texts Array of texts. * @param metadatas Metadata for the texts. Can be a single object or an array of objects. * @param embeddings Embeddings to be used for the texts. * @param args Arguments required to create a new `WeaviateStore` instance. * @returns A new `WeaviateStore` instance. */ static fromTexts( texts: string[], metadatas: object | object[], embeddings: EmbeddingsInterface, args: WeaviateLibArgs ): Promise<WeaviateStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return WeaviateStore.fromDocuments(docs, embeddings, args); } /** * Static method to create a new `WeaviateStore` instance from a list of * documents. It adds the documents to the Weaviate index. * @param docs Array of documents. * @param embeddings Embeddings to be used for the documents. * @param args Arguments required to create a new `WeaviateStore` instance. * @returns A new `WeaviateStore` instance. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, args: WeaviateLibArgs ): Promise<WeaviateStore> { const instance = new this(embeddings, args); await instance.addDocuments(docs); return instance; } /** * Static method to create a new `WeaviateStore` instance from an existing * Weaviate index. * @param embeddings Embeddings to be used for the Weaviate index. * @param args Arguments required to create a new `WeaviateStore` instance. * @returns A new `WeaviateStore` instance. */ static async fromExistingIndex( embeddings: EmbeddingsInterface, args: WeaviateLibArgs ): Promise<WeaviateStore> { return new this(embeddings, args); } }
0
lc_public_repos/langchainjs/libs/langchain-weaviate
lc_public_repos/langchainjs/libs/langchain-weaviate/src/index.ts
export * from "./vectorstores.js"; export * from "./translator.js";
0
lc_public_repos/langchainjs/libs/langchain-weaviate
lc_public_repos/langchainjs/libs/langchain-weaviate/src/translator.ts
import { isFilterEmpty, isString, isInt, isFloat, BaseTranslator, Comparator, Comparators, Comparison, NOT, Operation, Operator, Operators, StructuredQuery, Visitor, } from "@langchain/core/structured_query"; import { WeaviateFilter, WeaviateStore } from "./vectorstores.js"; type AllowedOperator = Exclude<Operator, NOT>; type WeaviateOperatorValues = { valueText: string; valueInt: number; valueNumber: number; valueBoolean: boolean; }; type WeaviateOperatorKeys = keyof WeaviateOperatorValues; type ExclusiveOperatorValue = { [L in WeaviateOperatorKeys]: { [key in L]: WeaviateOperatorValues[key]; } & Omit<{ [key in WeaviateOperatorKeys]?: never }, L>; }[WeaviateOperatorKeys]; export type WeaviateVisitorResult = | WeaviateOperationResult | WeaviateComparisonResult | WeaviateStructuredQueryResult; export type WeaviateOperationResult = { operator: string; operands: WeaviateVisitorResult[]; }; export type WeaviateComparisonResult = { path: [string]; operator: string; } & ExclusiveOperatorValue; export type WeaviateStructuredQueryResult = { filter?: { where?: WeaviateComparisonResult | WeaviateOperationResult; }; }; /** * A class that translates or converts data into a format that can be used * with Weaviate, a vector search engine. It extends the `BaseTranslator` * class and provides specific implementation for Weaviate. * @example * ```typescript * const selfQueryRetriever = new SelfQueryRetriever({ * llm: new ChatOpenAI(), * vectorStore: new WeaviateStore(), * documentContents: "Brief summary of a movie", * attributeInfo: [], * structuredQueryTranslator: new WeaviateTranslator(), * }); * * const relevantDocuments = await selfQueryRetriever.getRelevantDocuments( * "Which movies are rated higher than 8.5?", * ); * ``` */ export class WeaviateTranslator< T extends WeaviateStore > extends BaseTranslator<T> { declare VisitOperationOutput: WeaviateOperationResult; declare VisitComparisonOutput: WeaviateComparisonResult; allowedOperators: Operator[] = [Operators.and, Operators.or]; allowedComparators: Comparator[] = [ Comparators.eq, Comparators.ne, Comparators.lt, Comparators.lte, Comparators.gt, Comparators.gte, ]; /** * Formats the given function into a string representation. Throws an * error if the function is not a known comparator or operator, or if it * is not allowed. * @param func The function to format, which can be an Operator or Comparator. * @returns A string representation of the function. */ formatFunction(func: Operator | Comparator): string { if (func in Comparators) { if ( this.allowedComparators.length > 0 && this.allowedComparators.indexOf(func as Comparator) === -1 ) { throw new Error( `Comparator ${func} not allowed. Allowed operators: ${this.allowedComparators.join( ", " )}` ); } } else if (func in Operators) { if ( this.allowedOperators.length > 0 && this.allowedOperators.indexOf(func as Operator) === -1 ) { throw new Error( `Operator ${func} not allowed. Allowed operators: ${this.allowedOperators.join( ", " )}` ); } } else { throw new Error("Unknown comparator or operator"); } const dict = { and: "And", or: "Or", eq: "Equal", ne: "NotEqual", lt: "LessThan", lte: "LessThanEqual", gt: "GreaterThan", gte: "GreaterThanEqual", }; return dict[func as Comparator | AllowedOperator]; } /** * Visits an operation and returns a WeaviateOperationResult. The * operation's arguments are visited and the operator is formatted. * @param operation The operation to visit. * @returns A WeaviateOperationResult. */ visitOperation(operation: Operation): this["VisitOperationOutput"] { const args = operation.args?.map((arg) => arg.accept(this as Visitor) ) as WeaviateVisitorResult[]; return { operator: this.formatFunction(operation.operator), operands: args, }; } /** * Visits a comparison and returns a WeaviateComparisonResult. The * comparison's value is checked for type and the comparator is formatted. * Throws an error if the value type is not supported. * @param comparison The comparison to visit. * @returns A WeaviateComparisonResult. */ visitComparison(comparison: Comparison): this["VisitComparisonOutput"] { if (isString(comparison.value)) { return { path: [comparison.attribute], operator: this.formatFunction(comparison.comparator), valueText: comparison.value as string, }; } if (isInt(comparison.value)) { return { path: [comparison.attribute], operator: this.formatFunction(comparison.comparator), valueInt: parseInt(comparison.value as string, 10), }; } if (isFloat(comparison.value)) { return { path: [comparison.attribute], operator: this.formatFunction(comparison.comparator), valueNumber: parseFloat(comparison.value as string), }; } throw new Error("Value type is not supported"); } /** * Visits a structured query and returns a WeaviateStructuredQueryResult. * If the query has a filter, it is visited. * @param query The structured query to visit. * @returns A WeaviateStructuredQueryResult. */ visitStructuredQuery( query: StructuredQuery ): this["VisitStructuredQueryOutput"] { let nextArg = {}; if (query.filter) { nextArg = { filter: { where: query.filter.accept(this as Visitor) }, }; } return nextArg; } /** * Merges two filters into one. If both filters are empty, returns * undefined. If one filter is empty or the merge type is 'replace', * returns the other filter. If the merge type is 'and' or 'or', returns a * new filter with the merged results. Throws an error for unknown merge * types. * @param defaultFilter The default filter to merge. * @param generatedFilter The generated filter to merge. * @param mergeType The type of merge to perform. Can be 'and', 'or', or 'replace'. Defaults to 'and'. * @returns A merged WeaviateFilter, or undefined if both filters are empty. */ mergeFilters( defaultFilter: WeaviateFilter | undefined, generatedFilter: WeaviateFilter | undefined, mergeType = "and" ): WeaviateFilter | undefined { if ( isFilterEmpty(defaultFilter?.where) && isFilterEmpty(generatedFilter?.where) ) { return undefined; } if (isFilterEmpty(defaultFilter?.where) || mergeType === "replace") { if (isFilterEmpty(generatedFilter?.where)) { return undefined; } return generatedFilter; } if (isFilterEmpty(generatedFilter?.where)) { if (mergeType === "and") { return undefined; } return defaultFilter; } const merged: WeaviateOperationResult = { operator: "And", operands: [ // eslint-disable-next-line @typescript-eslint/no-non-null-assertion defaultFilter!.where as WeaviateVisitorResult, // eslint-disable-next-line @typescript-eslint/no-non-null-assertion generatedFilter!.where as WeaviateVisitorResult, ], }; if (mergeType === "and") { return { where: merged, } as WeaviateFilter; } else if (mergeType === "or") { merged.operator = "Or"; return { where: merged, } as WeaviateFilter; } else { throw new Error("Unknown merge type"); } } }
0
lc_public_repos/langchainjs/libs/langchain-weaviate/src
lc_public_repos/langchainjs/libs/langchain-weaviate/src/tests/translator.int.test.ts
/* eslint-disable no-process-env */ import { test } from "@jest/globals"; import weaviate from "weaviate-ts-client"; import { Document } from "@langchain/core/documents"; import { OpenAIEmbeddings, OpenAI } from "@langchain/openai"; import { AttributeInfo } from "langchain/chains/query_constructor"; import { SelfQueryRetriever } from "langchain/retrievers/self_query"; import { WeaviateStore } from "../vectorstores.js"; import { WeaviateTranslator } from "../translator.js"; test.skip("Weaviate Self Query Retriever Test", async () => { const docs = [ new Document({ pageContent: "A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata: { year: 1993, rating: 7.7, genre: "science fiction" }, }), new Document({ pageContent: "Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 }, }), new Document({ pageContent: "A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 }, }), new Document({ pageContent: "A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 }, }), new Document({ pageContent: "Toys come alive and have a blast doing so", metadata: { year: 1995, genre: "animated" }, }), new Document({ pageContent: "Three men walk into the Zone, three men walk out of the Zone", metadata: { year: 1979, director: "Andrei Tarkovsky", genre: "science fiction", rating: 9.9, }, }), ]; const attributeInfo: AttributeInfo[] = [ { name: "genre", description: "The genre of the movie", type: "string or array of strings", }, { name: "year", description: "The year the movie was released", type: "number", }, { name: "director", description: "The director of the movie", type: "string", }, { name: "rating", description: "The rating of the movie (1-10)", type: "number", }, { name: "length", description: "The length of the movie in minutes", type: "number", }, ]; const embeddings = new OpenAIEmbeddings(); const llm = new OpenAI({ modelName: "gpt-3.5-turbo", }); // eslint-disable-next-line @typescript-eslint/no-explicit-any const client = (weaviate as any).client({ scheme: process.env.WEAVIATE_SCHEME || (process.env.WEAVIATE_HOST ? "https" : "http"), host: process.env.WEAVIATE_HOST || "localhost:8080", apiKey: process.env.WEAVIATE_API_KEY ? // eslint-disable-next-line @typescript-eslint/no-explicit-any new (weaviate as any).ApiKey(process.env.WEAVIATE_API_KEY) : undefined, }); const documentContents = "Brief summary of a movie"; const vectorStore = await WeaviateStore.fromDocuments(docs, embeddings, { client, indexName: "Test", textKey: "text", metadataKeys: ["year", "director", "rating", "genre"], }); const selfQueryRetriever = SelfQueryRetriever.fromLLM({ llm, vectorStore, documentContents, attributeInfo, structuredQueryTranslator: new WeaviateTranslator(), }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); const query4 = await selfQueryRetriever.getRelevantDocuments( "Wau wau wau wau hello gello hello?" ); // console.log(query2, query3, query4); // query4 has to return empty array expect(query4.length).toBe(0); }); test.skip("Weaviate Vector Store Self Query Retriever Test With Default Filter Or Merge Operator", async () => { const docs = [ new Document({ pageContent: "A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata: { year: 1993, rating: 7.7, genre: "science fiction", type: "movie", }, }), new Document({ pageContent: "Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2, type: "movie", }, }), new Document({ pageContent: "A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6, type: "movie", }, }), new Document({ pageContent: "A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3, type: "movie", }, }), new Document({ pageContent: "Toys come alive and have a blast doing so", metadata: { year: 1995, genre: "animated", type: "movie" }, }), new Document({ pageContent: "Three men walk into the Zone, three men walk out of the Zone", metadata: { year: 1979, director: "Andrei Tarkovsky", genre: "science fiction", rating: 9.9, type: "movie", }, }), new Document({ pageContent: "10x the previous gecs", metadata: { year: 2023, title: "10000 gecs", artist: "100 gecs", rating: 9.9, type: "album", }, }), ]; const attributeInfo: AttributeInfo[] = [ { name: "genre", description: "The genre of the movie", type: "string or array of strings", }, { name: "year", description: "The year the movie was released", type: "number", }, { name: "director", description: "The director of the movie", type: "string", }, { name: "rating", description: "The rating of the movie (1-10)", type: "number", }, { name: "length", description: "The length of the movie in minutes", type: "number", }, ]; const embeddings = new OpenAIEmbeddings(); const llm = new OpenAI({ modelName: "gpt-3.5-turbo", }); // eslint-disable-next-line @typescript-eslint/no-explicit-any const client = (weaviate as any).client({ scheme: process.env.WEAVIATE_SCHEME || (process.env.WEAVIATE_HOST ? "https" : "http"), host: process.env.WEAVIATE_HOST || "localhost:8080", apiKey: process.env.WEAVIATE_API_KEY ? // eslint-disable-next-line @typescript-eslint/no-explicit-any new (weaviate as any).ApiKey(process.env.WEAVIATE_API_KEY) : undefined, }); const documentContents = "Brief summary of a movie"; const vectorStore = await WeaviateStore.fromDocuments(docs, embeddings, { client, indexName: "Test", textKey: "text", metadataKeys: ["year", "director", "rating", "genre", "type"], }); const selfQueryRetriever = SelfQueryRetriever.fromLLM({ llm, vectorStore, documentContents, attributeInfo, structuredQueryTranslator: new WeaviateTranslator(), searchParams: { filter: { where: { operator: "Equal", path: ["type"], valueText: "movie", }, }, mergeFiltersOperator: "or", k: docs.length, }, }); const query4 = await selfQueryRetriever.getRelevantDocuments( "Wau wau wau wau hello gello hello?" ); // console.log(query4); // query4 has to return documents, since the default filter takes over with expect(query4.length).toEqual(7); }); test.skip("Weaviate Vector Store Self Query Retriever Test With Default Filter And Merge Operator", async () => { const docs = [ new Document({ pageContent: "A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata: { year: 1993, rating: 7.7, genre: "science fiction", type: "movie", }, }), new Document({ pageContent: "Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2, type: "movie", }, }), new Document({ pageContent: "A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6, type: "movie", }, }), new Document({ pageContent: "A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3, type: "movie", }, }), new Document({ pageContent: "Toys come alive and have a blast doing so", metadata: { year: 1995, genre: "animated", type: "movie" }, }), new Document({ pageContent: "Three men walk into the Zone, three men walk out of the Zone", metadata: { year: 1979, director: "Andrei Tarkovsky", genre: "science fiction", rating: 9.9, type: "movie", }, }), new Document({ pageContent: "10x the previous gecs", metadata: { year: 2023, title: "10000 gecs", artist: "100 gecs", rating: 9.9, type: "album", }, }), ]; const attributeInfo: AttributeInfo[] = [ { name: "genre", description: "The genre of the movie", type: "string or array of strings", }, { name: "year", description: "The year the movie was released", type: "number", }, { name: "director", description: "The director of the movie", type: "string", }, { name: "rating", description: "The rating of the movie (1-10)", type: "number", }, { name: "length", description: "The length of the movie in minutes", type: "number", }, ]; const embeddings = new OpenAIEmbeddings(); const llm = new OpenAI({ modelName: "gpt-3.5-turbo", }); // eslint-disable-next-line @typescript-eslint/no-explicit-any const client = (weaviate as any).client({ scheme: process.env.WEAVIATE_SCHEME || (process.env.WEAVIATE_HOST ? "https" : "http"), host: process.env.WEAVIATE_HOST || "localhost:8080", apiKey: process.env.WEAVIATE_API_KEY ? // eslint-disable-next-line @typescript-eslint/no-explicit-any new (weaviate as any).ApiKey(process.env.WEAVIATE_API_KEY) : undefined, }); const documentContents = "Brief summary of a movie"; const vectorStore = await WeaviateStore.fromDocuments(docs, embeddings, { client, indexName: "Test", textKey: "text", metadataKeys: ["year", "director", "rating", "genre", "type"], }); const selfQueryRetriever = SelfQueryRetriever.fromLLM({ llm, vectorStore, documentContents, attributeInfo, structuredQueryTranslator: new WeaviateTranslator(), searchParams: { filter: { where: { operator: "Equal", path: ["type"], valueText: "movie", }, }, mergeFiltersOperator: "and", k: docs.length, }, }); const query4 = await selfQueryRetriever.getRelevantDocuments( "Wau wau wau wau hello gello hello?" ); // console.log(query4); // query4 has to return empty array, since the default filter takes over with and filter expect(query4.length).toEqual(0); });
0
lc_public_repos/langchainjs/libs/langchain-weaviate/src
lc_public_repos/langchainjs/libs/langchain-weaviate/src/tests/vectorstores.test.ts
import { test, expect } from "@jest/globals"; import { flattenObjectForWeaviate } from "../vectorstores.js"; test("flattenObjectForWeaviate", () => { expect( flattenObjectForWeaviate({ array2: [{}, "a"], deep: { string: "deep string", array: ["1", 2], array3: [1, 3], deepdeep: { string: "even a deeper string", }, }, emptyArray: [], }) ).toMatchInlineSnapshot(` { "deep_array3": [ 1, 3, ], "deep_deepdeep_string": "even a deeper string", "deep_string": "deep string", "emptyArray": [], } `); });
0
lc_public_repos/langchainjs/libs/langchain-weaviate/src
lc_public_repos/langchainjs/libs/langchain-weaviate/src/tests/vectorstores.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import weaviate, { ApiKey } from "weaviate-ts-client"; import { OpenAIEmbeddings } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; import { WeaviateStore } from "../vectorstores.js"; test("WeaviateStore", async () => { // Something wrong with the weaviate-ts-client types, so we need to disable // eslint-disable-next-line @typescript-eslint/no-explicit-any const client = (weaviate as any).client({ scheme: process.env.WEAVIATE_SCHEME || (process.env.WEAVIATE_HOST ? "https" : "http"), host: process.env.WEAVIATE_HOST || "localhost:8080", apiKey: process.env.WEAVIATE_API_KEY ? new ApiKey(process.env.WEAVIATE_API_KEY) : undefined, }); const store = await WeaviateStore.fromTexts( ["hello world", "hi there", "how are you", "bye now"], [{ foo: "bar" }, { foo: "baz" }, { foo: "qux" }, { foo: "bar" }], new OpenAIEmbeddings(), { client, indexName: "Test", textKey: "text", metadataKeys: ["foo"], } ); const results = await store.similaritySearch("hello world", 1); expect(results).toEqual([ new Document({ pageContent: "hello world", metadata: { foo: "bar" } }), ]); const results2 = await store.similaritySearch("hello world", 1, { where: { operator: "Equal", path: ["foo"], valueText: "baz", }, }); expect(results2).toEqual([ new Document({ pageContent: "hi there", metadata: { foo: "baz" } }), ]); const testDocumentWithObjectMetadata = new Document({ pageContent: "this is the deep document world!", metadata: { deep: { string: "deep string", deepdeep: { string: "even a deeper string", }, }, }, }); const documentStore = await WeaviateStore.fromDocuments( [testDocumentWithObjectMetadata], new OpenAIEmbeddings(), { client, indexName: "DocumentTest", textKey: "text", metadataKeys: ["deep_string", "deep_deepdeep_string"], } ); const result3 = await documentStore.similaritySearch( "this is the deep document world!", 1, { where: { operator: "Equal", path: ["deep_string"], valueText: "deep string", }, } ); expect(result3).toEqual([ new Document({ pageContent: "this is the deep document world!", metadata: { deep_string: "deep string", deep_deepdeep_string: "even a deeper string", }, }), ]); }); test("WeaviateStore upsert + delete", async () => { // Something wrong with the weaviate-ts-client types, so we need to disable // eslint-disable-next-line @typescript-eslint/no-explicit-any const client = (weaviate as any).client({ scheme: process.env.WEAVIATE_SCHEME || (process.env.WEAVIATE_HOST ? "https" : "http"), host: process.env.WEAVIATE_HOST || "localhost:8080", apiKey: process.env.WEAVIATE_API_KEY ? new ApiKey(process.env.WEAVIATE_API_KEY) : undefined, }); const createdAt = new Date().getTime(); const store = await WeaviateStore.fromDocuments( [ new Document({ pageContent: "testing", metadata: { deletionTest: createdAt.toString() }, }), ], new OpenAIEmbeddings(), { client, indexName: "DocumentTest", textKey: "pageContent", metadataKeys: ["deletionTest"], } ); const ids = await store.addDocuments([ { pageContent: "hello world", metadata: { deletionTest: (createdAt + 1).toString() }, }, { pageContent: "hello world", metadata: { deletionTest: (createdAt + 1).toString() }, }, ]); const results = await store.similaritySearch("hello world", 4, { where: { operator: "Equal", path: ["deletionTest"], valueText: (createdAt + 1).toString(), }, }); expect(results).toEqual([ new Document({ pageContent: "hello world", metadata: { deletionTest: (createdAt + 1).toString() }, }), new Document({ pageContent: "hello world", metadata: { deletionTest: (createdAt + 1).toString() }, }), ]); const ids2 = await store.addDocuments( [ { pageContent: "hello world upserted", metadata: { deletionTest: (createdAt + 1).toString() }, }, { pageContent: "hello world upserted", metadata: { deletionTest: (createdAt + 1).toString() }, }, ], { ids } ); expect(ids2).toEqual(ids); const results2 = await store.similaritySearch("hello world", 4, { where: { operator: "Equal", path: ["deletionTest"], valueText: (createdAt + 1).toString(), }, }); expect(results2).toEqual([ new Document({ pageContent: "hello world upserted", metadata: { deletionTest: (createdAt + 1).toString() }, }), new Document({ pageContent: "hello world upserted", metadata: { deletionTest: (createdAt + 1).toString() }, }), ]); await store.delete({ ids: ids.slice(0, 1) }); const results3 = await store.similaritySearch("hello world", 1, { where: { operator: "Equal", path: ["deletionTest"], valueText: (createdAt + 1).toString(), }, }); expect(results3).toEqual([ new Document({ pageContent: "hello world upserted", metadata: { deletionTest: (createdAt + 1).toString() }, }), ]); }); test("WeaviateStore delete with filter", async () => { // eslint-disable-next-line @typescript-eslint/no-explicit-any const client = (weaviate as any).client({ scheme: process.env.WEAVIATE_SCHEME || (process.env.WEAVIATE_HOST ? "https" : "http"), host: process.env.WEAVIATE_HOST || "localhost:8080", apiKey: process.env.WEAVIATE_API_KEY ? new ApiKey(process.env.WEAVIATE_API_KEY) : undefined, }); const store = await WeaviateStore.fromTexts( ["hello world", "hi there", "how are you", "bye now"], [{ foo: "bar" }, { foo: "baz" }, { foo: "qux" }, { foo: "bar" }], new OpenAIEmbeddings(), { client, indexName: "FilterDeletionTest", textKey: "text", metadataKeys: ["foo"], } ); const results = await store.similaritySearch("hello world", 1); expect(results).toEqual([ new Document({ pageContent: "hello world", metadata: { foo: "bar" } }), ]); await store.delete({ filter: { where: { operator: "Equal", path: ["foo"], valueText: "bar", }, }, }); const results2 = await store.similaritySearch("hello world", 1, { where: { operator: "Equal", path: ["foo"], valueText: "bar", }, }); expect(results2).toEqual([]); }); test("Initializing via constructor", () => { // eslint-disable-next-line @typescript-eslint/no-explicit-any const client = (weaviate as any).client({ scheme: process.env.WEAVIATE_SCHEME || (process.env.WEAVIATE_HOST ? "https" : "http"), host: process.env.WEAVIATE_HOST || "localhost:8080", apiKey: process.env.WEAVIATE_API_KEY ? new ApiKey(process.env.WEAVIATE_API_KEY) : undefined, }); const store = new WeaviateStore(new OpenAIEmbeddings(), { client, indexName: "Test", textKey: "text", metadataKeys: ["foo"], }); expect(store).toBeDefined(); expect(store._vectorstoreType()).toBe("weaviate"); }); test("addDocuments & addVectors method works", async () => { // eslint-disable-next-line @typescript-eslint/no-explicit-any const client = (weaviate as any).client({ scheme: process.env.WEAVIATE_SCHEME || (process.env.WEAVIATE_HOST ? "https" : "http"), host: process.env.WEAVIATE_HOST || "localhost:8080", apiKey: process.env.WEAVIATE_API_KEY ? new ApiKey(process.env.WEAVIATE_API_KEY) : undefined, }); const store = new WeaviateStore(new OpenAIEmbeddings(), { client, indexName: "Test", textKey: "text", metadataKeys: ["foo"], }); const documents = [ new Document({ pageContent: "hello world", metadata: { foo: "bar" } }), new Document({ pageContent: "hi there", metadata: { foo: "baz" } }), new Document({ pageContent: "how are you", metadata: { foo: "qux" } }), new Document({ pageContent: "bye now", metadata: { foo: "bar" } }), ]; const embeddings = await store.embeddings.embedDocuments( documents.map((d) => d.pageContent) ); const vectors = await store.addVectors(embeddings, documents); expect(vectors).toHaveLength(4); }); test("maxMarginalRelevanceSearch", async () => { // eslint-disable-next-line @typescript-eslint/no-explicit-any const client = (weaviate as any).client({ scheme: process.env.WEAVIATE_SCHEME || (process.env.WEAVIATE_HOST ? "https" : "http"), host: process.env.WEAVIATE_HOST || "localhost:8080", apiKey: process.env.WEAVIATE_API_KEY ? new ApiKey(process.env.WEAVIATE_API_KEY) : undefined, }); const createdAt = new Date().getTime(); const fatherDoc = new Document({ pageContent: "hello father", metadata: { deletionTest: (createdAt + 3).toString() }, }); const store = await WeaviateStore.fromDocuments( [ new Document({ pageContent: "testing", metadata: { deletionTest: createdAt.toString() }, }), new Document({ pageContent: "hello world", metadata: { deletionTest: (createdAt + 1).toString() }, }), new Document({ pageContent: "hello mother", metadata: { deletionTest: (createdAt + 2).toString() }, }), fatherDoc, ], new OpenAIEmbeddings(), { client, indexName: "DocumentTest", textKey: "pageContent", metadataKeys: ["deletionTest"], } ); const result = await store.maxMarginalRelevanceSearch("father", { k: 1 }); expect(result[0].pageContent).toEqual(fatherDoc.pageContent); }); test("fromExistingIndex", async () => { // eslint-disable-next-line @typescript-eslint/no-explicit-any const client = (weaviate as any).client({ scheme: process.env.WEAVIATE_SCHEME || (process.env.WEAVIATE_HOST ? "https" : "http"), host: process.env.WEAVIATE_HOST || "localhost:8080", apiKey: process.env.WEAVIATE_API_KEY ? new ApiKey(process.env.WEAVIATE_API_KEY) : undefined, }); const store = await WeaviateStore.fromExistingIndex(new OpenAIEmbeddings(), { client, indexName: "DocumentTest", textKey: "pageContent", metadataKeys: ["deletionTest"], }); expect(store).toBeDefined(); expect(store._vectorstoreType()).toBe("weaviate"); });
0
lc_public_repos/langchainjs/libs/langchain-weaviate
lc_public_repos/langchainjs/libs/langchain-weaviate/scripts/jest-setup-after-env.js
import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { afterAll, jest } from "@jest/globals"; afterAll(awaitAllCallbacks); // Allow console.log to be disabled in tests if (process.env.DISABLE_CONSOLE_LOGS === "true") { console.log = jest.fn(); }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-textsplitters/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": ["ES2021", "ES2022.Object", "DOM"], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": ["src/**/*"], "exclude": ["node_modules", "dist", "docs"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-textsplitters/LICENSE
The MIT License Copyright (c) 2023 LangChain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-textsplitters/jest.config.cjs
/** @type {import('ts-jest').JestConfigWithTsJest} */ module.exports = { preset: "ts-jest/presets/default-esm", testEnvironment: "./jest.env.cjs", modulePathIgnorePatterns: ["dist/", "docs/"], moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, transform: { "^.+\\.tsx?$": ["@swc/jest"], }, transformIgnorePatterns: [ "/node_modules/", "\\.pnp\\.[^\\/]+$", "./scripts/jest-setup-after-env.js", ], setupFiles: ["dotenv/config"], testTimeout: 20_000, passWithNoTests: true, collectCoverageFrom: ["src/**/*.ts"], };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-textsplitters/jest.env.cjs
const { TestEnvironment } = require("jest-environment-node"); class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { constructor(config, context) { // Make `instanceof Float32Array` return true in tests // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 super(config, context); this.global.Float32Array = Float32Array; } } module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-textsplitters/README.md
# 🦜✂️ @langchain/textsplitters This package contains various implementations of LangChain.js text splitters, most commonly used as part of retrieval-augmented generation (RAG) pipelines. ## Installation ```bash npm2yarn npm install @langchain/textsplitters @langchain/core ``` ## Development To develop the `@langchain/textsplitters` package, you'll need to follow these instructions: ### Install dependencies ```bash yarn install ``` ### Build the package ```bash yarn build ``` Or from the repo root: ```bash yarn build --filter=@langchain/textsplitters ``` ### Run tests Test files should live within a `tests/` file in the `src/` folder. Unit tests should end in `.test.ts` and integration tests should end in `.int.test.ts`: ```bash $ yarn test $ yarn test:int ``` ### Lint & Format Run the linter & formatter to ensure your code is up to standard: ```bash yarn lint && yarn format ``` ### Adding new entrypoints If you add a new file to be exported, either import & re-export from `src/index.ts`, or add it to the `entrypoints` field in the `config` variable located inside `langchain.config.js` and run `yarn build` to generate the new entrypoint.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-textsplitters/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": ["--workspaces-update=false"] } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-textsplitters/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof"], ignorePatterns: [ ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-textsplitters/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals: [/node\:/, /@langchain\/core\//], entrypoints: { index: "index", }, requiresOptionalDependency: [], tsConfigPath: resolve("./tsconfig.json"), cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-textsplitters/package.json
{ "name": "@langchain/textsplitters", "version": "0.1.0", "description": "Various implementations of LangChain.js text splitters", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-textsplitters/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/textsplitters", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "clean": "rm -rf .turbo dist/", "prepack": "yarn build", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "js-tiktoken": "^1.0.12" }, "peerDependencies": { "@langchain/core": ">=0.2.21 <0.4.0" }, "devDependencies": { "@jest/globals": "^29.5.0", "@langchain/core": "workspace:*", "@langchain/scripts": ">=0.1.0 <0.2.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@tsconfig/recommended": "^1.0.3", "@typescript-eslint/eslint-plugin": "^6.12.0", "@typescript-eslint/parser": "^6.12.0", "dotenv": "^16.3.1", "dpdm": "^3.12.0", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "prettier": "^2.8.3", "release-it": "^17.6.0", "rollup": "^4.5.2", "ts-jest": "^29.1.0", "typescript": "<5.2.0" }, "publishConfig": { "access": "public" }, "exports": { ".": { "types": { "import": "./index.d.ts", "require": "./index.d.cts", "default": "./index.d.ts" }, "import": "./index.js", "require": "./index.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "index.cjs", "index.js", "index.d.ts", "index.d.cts" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-textsplitters/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": ["node_modules", "dist", "docs", "**/tests"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-textsplitters/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-textsplitters/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/libs/langchain-textsplitters
lc_public_repos/langchainjs/libs/langchain-textsplitters/src/text_splitter.ts
import type * as tiktoken from "js-tiktoken"; import { Document, BaseDocumentTransformer } from "@langchain/core/documents"; import { getEncoding } from "@langchain/core/utils/tiktoken"; export interface TextSplitterParams { chunkSize: number; chunkOverlap: number; keepSeparator: boolean; lengthFunction?: | ((text: string) => number) | ((text: string) => Promise<number>); } export type TextSplitterChunkHeaderOptions = { chunkHeader?: string; chunkOverlapHeader?: string; appendChunkOverlapHeader?: boolean; }; export abstract class TextSplitter extends BaseDocumentTransformer implements TextSplitterParams { lc_namespace = ["langchain", "document_transformers", "text_splitters"]; chunkSize = 1000; chunkOverlap = 200; keepSeparator = false; lengthFunction: | ((text: string) => number) | ((text: string) => Promise<number>); constructor(fields?: Partial<TextSplitterParams>) { super(fields); this.chunkSize = fields?.chunkSize ?? this.chunkSize; this.chunkOverlap = fields?.chunkOverlap ?? this.chunkOverlap; this.keepSeparator = fields?.keepSeparator ?? this.keepSeparator; this.lengthFunction = fields?.lengthFunction ?? ((text: string) => text.length); if (this.chunkOverlap >= this.chunkSize) { throw new Error("Cannot have chunkOverlap >= chunkSize"); } } async transformDocuments( documents: Document[], chunkHeaderOptions: TextSplitterChunkHeaderOptions = {} ): Promise<Document[]> { return this.splitDocuments(documents, chunkHeaderOptions); } abstract splitText(text: string): Promise<string[]>; protected splitOnSeparator(text: string, separator: string): string[] { let splits; if (separator) { if (this.keepSeparator) { const regexEscapedSeparator = separator.replace( /[/\-\\^$*+?.()|[\]{}]/g, "\\$&" ); splits = text.split(new RegExp(`(?=${regexEscapedSeparator})`)); } else { splits = text.split(separator); } } else { splits = text.split(""); } return splits.filter((s) => s !== ""); } async createDocuments( texts: string[], // eslint-disable-next-line @typescript-eslint/no-explicit-any metadatas: Record<string, any>[] = [], chunkHeaderOptions: TextSplitterChunkHeaderOptions = {} ): Promise<Document[]> { // if no metadata is provided, we create an empty one for each text // eslint-disable-next-line @typescript-eslint/no-explicit-any const _metadatas: Record<string, any>[] = metadatas.length > 0 ? metadatas : [...Array(texts.length)].map(() => ({})); const { chunkHeader = "", chunkOverlapHeader = "(cont'd) ", appendChunkOverlapHeader = false, } = chunkHeaderOptions; const documents = new Array<Document>(); for (let i = 0; i < texts.length; i += 1) { const text = texts[i]; let lineCounterIndex = 1; let prevChunk = null; let indexPrevChunk = -1; for (const chunk of await this.splitText(text)) { let pageContent = chunkHeader; // we need to count the \n that are in the text before getting removed by the splitting const indexChunk = text.indexOf(chunk, indexPrevChunk + 1); if (prevChunk === null) { const newLinesBeforeFirstChunk = this.numberOfNewLines( text, 0, indexChunk ); lineCounterIndex += newLinesBeforeFirstChunk; } else { const indexEndPrevChunk = indexPrevChunk + (await this.lengthFunction(prevChunk)); if (indexEndPrevChunk < indexChunk) { const numberOfIntermediateNewLines = this.numberOfNewLines( text, indexEndPrevChunk, indexChunk ); lineCounterIndex += numberOfIntermediateNewLines; } else if (indexEndPrevChunk > indexChunk) { const numberOfIntermediateNewLines = this.numberOfNewLines( text, indexChunk, indexEndPrevChunk ); lineCounterIndex -= numberOfIntermediateNewLines; } if (appendChunkOverlapHeader) { pageContent += chunkOverlapHeader; } } const newLinesCount = this.numberOfNewLines(chunk); const loc = _metadatas[i].loc && typeof _metadatas[i].loc === "object" ? { ..._metadatas[i].loc } : {}; loc.lines = { from: lineCounterIndex, to: lineCounterIndex + newLinesCount, }; const metadataWithLinesNumber = { ..._metadatas[i], loc, }; pageContent += chunk; documents.push( new Document({ pageContent, metadata: metadataWithLinesNumber, }) ); lineCounterIndex += newLinesCount; prevChunk = chunk; indexPrevChunk = indexChunk; } } return documents; } private numberOfNewLines(text: string, start?: number, end?: number) { const textSection = text.slice(start, end); return (textSection.match(/\n/g) || []).length; } async splitDocuments( documents: Document[], chunkHeaderOptions: TextSplitterChunkHeaderOptions = {} ): Promise<Document[]> { const selectedDocuments = documents.filter( (doc) => doc.pageContent !== undefined ); const texts = selectedDocuments.map((doc) => doc.pageContent); const metadatas = selectedDocuments.map((doc) => doc.metadata); return this.createDocuments(texts, metadatas, chunkHeaderOptions); } private joinDocs(docs: string[], separator: string): string | null { const text = docs.join(separator).trim(); return text === "" ? null : text; } async mergeSplits(splits: string[], separator: string): Promise<string[]> { const docs: string[] = []; const currentDoc: string[] = []; let total = 0; for (const d of splits) { const _len = await this.lengthFunction(d); if ( total + _len + currentDoc.length * separator.length > this.chunkSize ) { if (total > this.chunkSize) { console.warn( `Created a chunk of size ${total}, + which is longer than the specified ${this.chunkSize}` ); } if (currentDoc.length > 0) { const doc = this.joinDocs(currentDoc, separator); if (doc !== null) { docs.push(doc); } // Keep on popping if: // - we have a larger chunk than in the chunk overlap // - or if we still have any chunks and the length is long while ( total > this.chunkOverlap || (total + _len + currentDoc.length * separator.length > this.chunkSize && total > 0) ) { total -= await this.lengthFunction(currentDoc[0]); currentDoc.shift(); } } } currentDoc.push(d); total += _len; } const doc = this.joinDocs(currentDoc, separator); if (doc !== null) { docs.push(doc); } return docs; } } export interface CharacterTextSplitterParams extends TextSplitterParams { separator: string; } export class CharacterTextSplitter extends TextSplitter implements CharacterTextSplitterParams { static lc_name() { return "CharacterTextSplitter"; } separator = "\n\n"; constructor(fields?: Partial<CharacterTextSplitterParams>) { super(fields); this.separator = fields?.separator ?? this.separator; } async splitText(text: string): Promise<string[]> { // First we naively split the large input into a bunch of smaller ones. const splits = this.splitOnSeparator(text, this.separator); return this.mergeSplits(splits, this.keepSeparator ? "" : this.separator); } } export interface RecursiveCharacterTextSplitterParams extends TextSplitterParams { separators: string[]; } export const SupportedTextSplitterLanguages = [ "cpp", "go", "java", "js", "php", "proto", "python", "rst", "ruby", "rust", "scala", "swift", "markdown", "latex", "html", "sol", ] as const; export type SupportedTextSplitterLanguage = (typeof SupportedTextSplitterLanguages)[number]; export class RecursiveCharacterTextSplitter extends TextSplitter implements RecursiveCharacterTextSplitterParams { static lc_name() { return "RecursiveCharacterTextSplitter"; } separators: string[] = ["\n\n", "\n", " ", ""]; constructor(fields?: Partial<RecursiveCharacterTextSplitterParams>) { super(fields); this.separators = fields?.separators ?? this.separators; this.keepSeparator = fields?.keepSeparator ?? true; } private async _splitText(text: string, separators: string[]) { const finalChunks: string[] = []; // Get appropriate separator to use let separator: string = separators[separators.length - 1]; let newSeparators; for (let i = 0; i < separators.length; i += 1) { const s = separators[i]; if (s === "") { separator = s; break; } if (text.includes(s)) { separator = s; newSeparators = separators.slice(i + 1); break; } } // Now that we have the separator, split the text const splits = this.splitOnSeparator(text, separator); // Now go merging things, recursively splitting longer texts. let goodSplits: string[] = []; const _separator = this.keepSeparator ? "" : separator; for (const s of splits) { if ((await this.lengthFunction(s)) < this.chunkSize) { goodSplits.push(s); } else { if (goodSplits.length) { const mergedText = await this.mergeSplits(goodSplits, _separator); finalChunks.push(...mergedText); goodSplits = []; } if (!newSeparators) { finalChunks.push(s); } else { const otherInfo = await this._splitText(s, newSeparators); finalChunks.push(...otherInfo); } } } if (goodSplits.length) { const mergedText = await this.mergeSplits(goodSplits, _separator); finalChunks.push(...mergedText); } return finalChunks; } async splitText(text: string): Promise<string[]> { return this._splitText(text, this.separators); } static fromLanguage( language: SupportedTextSplitterLanguage, options?: Partial<RecursiveCharacterTextSplitterParams> ) { return new RecursiveCharacterTextSplitter({ ...options, separators: RecursiveCharacterTextSplitter.getSeparatorsForLanguage(language), }); } static getSeparatorsForLanguage(language: SupportedTextSplitterLanguage) { if (language === "cpp") { return [ // Split along class definitions "\nclass ", // Split along function definitions "\nvoid ", "\nint ", "\nfloat ", "\ndouble ", // Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\nswitch ", "\ncase ", // Split by the normal type of lines "\n\n", "\n", " ", "", ]; } else if (language === "go") { return [ // Split along function definitions "\nfunc ", "\nvar ", "\nconst ", "\ntype ", // Split along control flow statements "\nif ", "\nfor ", "\nswitch ", "\ncase ", // Split by the normal type of lines "\n\n", "\n", " ", "", ]; } else if (language === "java") { return [ // Split along class definitions "\nclass ", // Split along method definitions "\npublic ", "\nprotected ", "\nprivate ", "\nstatic ", // Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\nswitch ", "\ncase ", // Split by the normal type of lines "\n\n", "\n", " ", "", ]; } else if (language === "js") { return [ // Split along function definitions "\nfunction ", "\nconst ", "\nlet ", "\nvar ", "\nclass ", // Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\nswitch ", "\ncase ", "\ndefault ", // Split by the normal type of lines "\n\n", "\n", " ", "", ]; } else if (language === "php") { return [ // Split along function definitions "\nfunction ", // Split along class definitions "\nclass ", // Split along control flow statements "\nif ", "\nforeach ", "\nwhile ", "\ndo ", "\nswitch ", "\ncase ", // Split by the normal type of lines "\n\n", "\n", " ", "", ]; } else if (language === "proto") { return [ // Split along message definitions "\nmessage ", // Split along service definitions "\nservice ", // Split along enum definitions "\nenum ", // Split along option definitions "\noption ", // Split along import statements "\nimport ", // Split along syntax declarations "\nsyntax ", // Split by the normal type of lines "\n\n", "\n", " ", "", ]; } else if (language === "python") { return [ // First, try to split along class definitions "\nclass ", "\ndef ", "\n\tdef ", // Now split by the normal type of lines "\n\n", "\n", " ", "", ]; } else if (language === "rst") { return [ // Split along section titles "\n===\n", "\n---\n", "\n***\n", // Split along directive markers "\n.. ", // Split by the normal type of lines "\n\n", "\n", " ", "", ]; } else if (language === "ruby") { return [ // Split along method definitions "\ndef ", "\nclass ", // Split along control flow statements "\nif ", "\nunless ", "\nwhile ", "\nfor ", "\ndo ", "\nbegin ", "\nrescue ", // Split by the normal type of lines "\n\n", "\n", " ", "", ]; } else if (language === "rust") { return [ // Split along function definitions "\nfn ", "\nconst ", "\nlet ", // Split along control flow statements "\nif ", "\nwhile ", "\nfor ", "\nloop ", "\nmatch ", "\nconst ", // Split by the normal type of lines "\n\n", "\n", " ", "", ]; } else if (language === "scala") { return [ // Split along class definitions "\nclass ", "\nobject ", // Split along method definitions "\ndef ", "\nval ", "\nvar ", // Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\nmatch ", "\ncase ", // Split by the normal type of lines "\n\n", "\n", " ", "", ]; } else if (language === "swift") { return [ // Split along function definitions "\nfunc ", // Split along class definitions "\nclass ", "\nstruct ", "\nenum ", // Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\ndo ", "\nswitch ", "\ncase ", // Split by the normal type of lines "\n\n", "\n", " ", "", ]; } else if (language === "markdown") { return [ // First, try to split along Markdown headings (starting with level 2) "\n## ", "\n### ", "\n#### ", "\n##### ", "\n###### ", // Note the alternative syntax for headings (below) is not handled here // Heading level 2 // --------------- // End of code block "```\n\n", // Horizontal lines "\n\n***\n\n", "\n\n---\n\n", "\n\n___\n\n", // Note that this splitter doesn't handle horizontal lines defined // by *three or more* of ***, ---, or ___, but this is not handled "\n\n", "\n", " ", "", ]; } else if (language === "latex") { return [ // First, try to split along Latex sections "\n\\chapter{", "\n\\section{", "\n\\subsection{", "\n\\subsubsection{", // Now split by environments "\n\\begin{enumerate}", "\n\\begin{itemize}", "\n\\begin{description}", "\n\\begin{list}", "\n\\begin{quote}", "\n\\begin{quotation}", "\n\\begin{verse}", "\n\\begin{verbatim}", // Now split by math environments "\n\\begin{align}", "$$", "$", // Now split by the normal type of lines "\n\n", "\n", " ", "", ]; } else if (language === "html") { return [ // First, try to split along HTML tags "<body>", "<div>", "<p>", "<br>", "<li>", "<h1>", "<h2>", "<h3>", "<h4>", "<h5>", "<h6>", "<span>", "<table>", "<tr>", "<td>", "<th>", "<ul>", "<ol>", "<header>", "<footer>", "<nav>", // Head "<head>", "<style>", "<script>", "<meta>", "<title>", // Normal type of lines " ", "", ]; } else if (language === "sol") { return [ // Split along compiler informations definitions "\npragma ", "\nusing ", // Split along contract definitions "\ncontract ", "\ninterface ", "\nlibrary ", // Split along method definitions "\nconstructor ", "\ntype ", "\nfunction ", "\nevent ", "\nmodifier ", "\nerror ", "\nstruct ", "\nenum ", // Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\ndo while ", "\nassembly ", // Split by the normal type of lines "\n\n", "\n", " ", "", ]; } else { throw new Error(`Language ${language} is not supported.`); } } } export interface TokenTextSplitterParams extends TextSplitterParams { encodingName: tiktoken.TiktokenEncoding; allowedSpecial: "all" | Array<string>; disallowedSpecial: "all" | Array<string>; } /** * Implementation of splitter which looks at tokens. */ export class TokenTextSplitter extends TextSplitter implements TokenTextSplitterParams { static lc_name() { return "TokenTextSplitter"; } encodingName: tiktoken.TiktokenEncoding; allowedSpecial: "all" | Array<string>; disallowedSpecial: "all" | Array<string>; private tokenizer: tiktoken.Tiktoken; constructor(fields?: Partial<TokenTextSplitterParams>) { super(fields); this.encodingName = fields?.encodingName ?? "gpt2"; this.allowedSpecial = fields?.allowedSpecial ?? []; this.disallowedSpecial = fields?.disallowedSpecial ?? "all"; } async splitText(text: string): Promise<string[]> { if (!this.tokenizer) { this.tokenizer = await getEncoding(this.encodingName); } const splits: string[] = []; const input_ids = this.tokenizer.encode( text, this.allowedSpecial, this.disallowedSpecial ); let start_idx = 0; while (start_idx < input_ids.length) { if (start_idx > 0) { start_idx -= this.chunkOverlap; } const end_idx = Math.min(start_idx + this.chunkSize, input_ids.length); const chunk_ids = input_ids.slice(start_idx, end_idx); splits.push(this.tokenizer.decode(chunk_ids)); start_idx = end_idx; } return splits; } } export type MarkdownTextSplitterParams = TextSplitterParams; export class MarkdownTextSplitter extends RecursiveCharacterTextSplitter implements MarkdownTextSplitterParams { constructor(fields?: Partial<MarkdownTextSplitterParams>) { super({ ...fields, separators: RecursiveCharacterTextSplitter.getSeparatorsForLanguage("markdown"), }); } } export type LatexTextSplitterParams = TextSplitterParams; export class LatexTextSplitter extends RecursiveCharacterTextSplitter implements LatexTextSplitterParams { constructor(fields?: Partial<LatexTextSplitterParams>) { super({ ...fields, separators: RecursiveCharacterTextSplitter.getSeparatorsForLanguage("latex"), }); } }
0
lc_public_repos/langchainjs/libs/langchain-textsplitters
lc_public_repos/langchainjs/libs/langchain-textsplitters/src/index.ts
export * from "./text_splitter.js";
0
lc_public_repos/langchainjs/libs/langchain-textsplitters/src
lc_public_repos/langchainjs/libs/langchain-textsplitters/src/tests/text_splitter.test.ts
import { describe, expect, test } from "@jest/globals"; import { Document } from "@langchain/core/documents"; import { CharacterTextSplitter, LatexTextSplitter, MarkdownTextSplitter, RecursiveCharacterTextSplitter, TokenTextSplitter, } from "../text_splitter.js"; function textLineGenerator(char: string, length: number) { const line = new Array(length).join(char); return `${line}\n`; } describe("Character text splitter", () => { test("Test splitting by character count.", async () => { const text = "foo bar baz 123"; const splitter = new CharacterTextSplitter({ separator: " ", chunkSize: 7, chunkOverlap: 3, }); const output = await splitter.splitText(text); const expectedOutput = ["foo bar", "bar baz", "baz 123"]; expect(output).toEqual(expectedOutput); }); test("Test splitting by character count doesn't create empty documents.", async () => { const text = "foo bar"; const splitter = new CharacterTextSplitter({ separator: " ", chunkSize: 2, chunkOverlap: 0, }); const output = await splitter.splitText(text); const expectedOutput = ["foo", "bar"]; expect(output).toEqual(expectedOutput); }); test("Test splitting by character count on long words.", async () => { const text = "foo bar baz a a"; const splitter = new CharacterTextSplitter({ separator: " ", chunkSize: 3, chunkOverlap: 1, }); const output = await splitter.splitText(text); const expectedOutput = ["foo", "bar", "baz", "a a"]; expect(output).toEqual(expectedOutput); }); test("Test splitting by character count when shorter words are first.", async () => { const text = "a a foo bar baz"; const splitter = new CharacterTextSplitter({ separator: " ", chunkSize: 3, chunkOverlap: 1, }); const output = await splitter.splitText(text); const expectedOutput = ["a a", "foo", "bar", "baz"]; expect(output).toEqual(expectedOutput); }); test("Test splitting by characters when splits not found easily.", async () => { const text = "foo bar baz 123"; const splitter = new CharacterTextSplitter({ separator: " ", chunkSize: 1, chunkOverlap: 0, }); const output = await splitter.splitText(text); const expectedOutput = ["foo", "bar", "baz", "123"]; expect(output).toEqual(expectedOutput); }); test("Test invalid arguments.", () => { expect(() => { // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = new CharacterTextSplitter({ chunkSize: 2, chunkOverlap: 4 }); // console.log(res); }).toThrow(); }); test("Test create documents method.", async () => { const texts = ["foo bar", "baz"]; const splitter = new CharacterTextSplitter({ separator: " ", chunkSize: 3, chunkOverlap: 0, }); const docs = await splitter.createDocuments(texts); const metadata = { loc: { lines: { from: 1, to: 1 } } }; const expectedDocs = [ new Document({ pageContent: "foo", metadata }), new Document({ pageContent: "bar", metadata }), new Document({ pageContent: "baz", metadata }), ]; expect(docs).toEqual(expectedDocs); }); test("Test create documents with metadata method.", async () => { const texts = ["foo bar", "baz"]; const splitter = new CharacterTextSplitter({ separator: " ", chunkSize: 3, chunkOverlap: 0, }); const docs = await splitter.createDocuments(texts, [ { source: "1" }, { source: "2" }, ]); const loc = { lines: { from: 1, to: 1 } }; const expectedDocs = [ new Document({ pageContent: "foo", metadata: { source: "1", loc } }), new Document({ pageContent: "bar", metadata: { source: "1", loc }, }), new Document({ pageContent: "baz", metadata: { source: "2", loc } }), ]; expect(docs).toEqual(expectedDocs); }); test("Test create documents method with metadata and an added chunk header.", async () => { const texts = ["foo bar", "baz"]; const splitter = new CharacterTextSplitter({ separator: " ", chunkSize: 3, chunkOverlap: 0, }); const docs = await splitter.createDocuments( texts, [{ source: "1" }, { source: "2" }], { chunkHeader: `SOURCE NAME: testing\n-----\n`, appendChunkOverlapHeader: true, } ); const loc = { lines: { from: 1, to: 1 } }; const expectedDocs = [ new Document({ pageContent: "SOURCE NAME: testing\n-----\nfoo", metadata: { source: "1", loc }, }), new Document({ pageContent: "SOURCE NAME: testing\n-----\n(cont'd) bar", metadata: { source: "1", loc }, }), new Document({ pageContent: "SOURCE NAME: testing\n-----\nbaz", metadata: { source: "2", loc }, }), ]; expect(docs).toEqual(expectedDocs); }); }); describe("RecursiveCharacter text splitter", () => { test("One unique chunk", async () => { const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 100, chunkOverlap: 0, }); const content = textLineGenerator("A", 70); const docs = await splitter.createDocuments([content]); const expectedDocs = [ new Document({ pageContent: content.trim(), metadata: { loc: { lines: { from: 1, to: 1 } } }, }), ]; expect(docs).toEqual(expectedDocs); }); test("Test iterative text splitter.", async () => { const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f. This is a weird text to write, but gotta test the splittingggg some how.\n\n Bye!\n\n-H.`; const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 10, chunkOverlap: 1, }); const output = await splitter.splitText(text); const expectedOutput = [ "Hi.", "I'm", "Harrison.", "How? Are?", "You?", "Okay then", "f f f f.", "This is a", "weird", "text to", "write,", "but gotta", "test the", "splitting", "gggg", "some how.", "Bye!", "-H.", ]; expect(output).toEqual(expectedOutput); }); test("A basic chunked document", async () => { const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 100, chunkOverlap: 0, }); const line1 = textLineGenerator("A", 70); const line2 = textLineGenerator("B", 70); const content = line1 + line2; const docs = await splitter.createDocuments([content]); const expectedDocs = [ new Document({ pageContent: line1.trim(), metadata: { loc: { lines: { from: 1, to: 1 } } }, }), new Document({ pageContent: line2.trim(), metadata: { loc: { lines: { from: 2, to: 2 } } }, }), ]; expect(docs).toEqual(expectedDocs); }); test("A chunked document with similar text", async () => { const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 100, chunkOverlap: 0, }); const line = textLineGenerator("A", 70); const content = line + line; const docs = await splitter.createDocuments([content]); const expectedDocs = [ new Document({ pageContent: line.trim(), metadata: { loc: { lines: { from: 1, to: 1 } } }, }), new Document({ pageContent: line.trim(), metadata: { loc: { lines: { from: 2, to: 2 } } }, }), ]; expect(docs).toEqual(expectedDocs); }); test("A chunked document starting with new lines", async () => { const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 100, chunkOverlap: 0, }); const line1 = textLineGenerator("\n", 2); const line2 = textLineGenerator("A", 70); const line3 = textLineGenerator("\n", 4); const line4 = textLineGenerator("B", 70); const line5 = textLineGenerator("\n", 4); const content = line1 + line2 + line3 + line4 + line5; const docs = await splitter.createDocuments([content]); const expectedDocs = [ new Document({ pageContent: line2.trim(), metadata: { loc: { lines: { from: 3, to: 3 } } }, }), new Document({ pageContent: line4.trim(), metadata: { loc: { lines: { from: 8, to: 8 } } }, }), ]; expect(docs).toEqual(expectedDocs); }); test("A chunked with overlap", async () => { const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 100, chunkOverlap: 30, }); const line1 = textLineGenerator("A", 70); const line2 = textLineGenerator("B", 20); const line3 = textLineGenerator("C", 70); const content = line1 + line2 + line3; const docs = await splitter.createDocuments([content]); const expectedDocs = [ new Document({ pageContent: line1 + line2.trim(), metadata: { loc: { lines: { from: 1, to: 2 } } }, }), new Document({ pageContent: line2 + line3.trim(), metadata: { loc: { lines: { from: 2, to: 3 } } }, }), ]; expect(docs).toEqual(expectedDocs); }); test("Chunks with overlap that contains new lines", async () => { const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 100, chunkOverlap: 30, }); const line1 = textLineGenerator("A", 70); const line2 = textLineGenerator("B", 10); const line3 = textLineGenerator("C", 10); const line4 = textLineGenerator("D", 70); const content = line1 + line2 + line3 + line4; const docs = await splitter.createDocuments([content]); const expectedDocs = [ new Document({ pageContent: line1 + line2 + line3.trim(), metadata: { loc: { lines: { from: 1, to: 3 } } }, }), new Document({ pageContent: line2 + line3 + line4.trim(), metadata: { loc: { lines: { from: 2, to: 4 } } }, }), ]; expect(docs).toEqual(expectedDocs); }); }); test("Separator length is considered correctly for chunk size", async () => { const text = "aa ab ac ba bb"; const splitter = new RecursiveCharacterTextSplitter({ keepSeparator: false, chunkSize: 7, chunkOverlap: 3, }); const output = await splitter.splitText(text); const expectedOutput = ["aa ab", "ab ac", "ac ba", "ba bb"]; expect(output).toEqual(expectedOutput); }); test("Token text splitter", async () => { const text = "foo bar baz a a"; const splitter = new TokenTextSplitter({ encodingName: "r50k_base", chunkSize: 3, chunkOverlap: 0, }); const output = await splitter.splitText(text); const expectedOutput = ["foo bar b", "az a a"]; expect(output).toEqual(expectedOutput); }); test("Token text splitter overlap when last chunk is large", async () => { const text = "foo bar baz a a"; const splitter = new TokenTextSplitter({ encodingName: "r50k_base", chunkSize: 5, chunkOverlap: 3, }); const output = await splitter.splitText(text); const expectedOutput = ["foo bar baz a", " baz a a"]; expect(output).toEqual(expectedOutput); }); test("Test markdown text splitter", async () => { const text = "# 🦜️🔗 LangChain\n" + "\n" + "⚡ Building applications with LLMs through composability ⚡\n" + "\n" + "## Quick Install\n" + "\n" + "```bash\n" + "# Hopefully this code block isn't split\n" + "pip install langchain\n" + "```\n" + "\n" + "As an open source project in a rapidly developing field, we are extremely open to contributions."; const splitter = new MarkdownTextSplitter({ chunkSize: 100, chunkOverlap: 0, }); const output = await splitter.splitText(text); const expectedOutput = [ "# 🦜️🔗 LangChain\n\n⚡ Building applications with LLMs through composability ⚡", "## Quick Install\n\n```bash\n# Hopefully this code block isn't split\npip install langchain", "```", "As an open source project in a rapidly developing field, we are extremely open to contributions.", ]; expect(output).toEqual(expectedOutput); }); test("Test latex text splitter.", async () => { const text = `\\begin{document} \\title{🦜️🔗 LangChain} ⚡ Building applications with LLMs through composability ⚡ \\section{Quick Install} \\begin{verbatim} Hopefully this code block isn't split yarn add langchain \\end{verbatim} As an open source project in a rapidly developing field, we are extremely open to contributions. \\end{document}`; const splitter = new LatexTextSplitter({ chunkSize: 100, chunkOverlap: 0, }); const output = await splitter.splitText(text); const expectedOutput = [ "\\begin{document}\n\\title{🦜️🔗 LangChain}\n⚡ Building applications with LLMs through composability ⚡", "\\section{Quick Install}", "\\begin{verbatim}\nHopefully this code block isn't split\nyarn add langchain\n\\end{verbatim}", "As an open source project in a rapidly developing field, we are extremely open to contributions.", "\\end{document}", ]; expect(output).toEqual(expectedOutput); }); test("Test HTML text splitter", async () => { const text = `<!DOCTYPE html> <html> <head> <title>🦜️🔗 LangChain</title> <style> body { font-family: Arial, sans-serif; } h1 { color: darkblue; } </style> </head> <body> <div> <h1>🦜️🔗 LangChain</h1> <p>⚡ Building applications with LLMs through composability ⚡</p> </div> <div> As an open source project in a rapidly developing field, we are extremely open to contributions. </div> </body> </html>`; const splitter = RecursiveCharacterTextSplitter.fromLanguage("html", { chunkSize: 175, chunkOverlap: 20, }); const output = await splitter.splitText(text); const expectedOutput = [ "<!DOCTYPE html>\n<html>", "<head>\n <title>🦜️🔗 LangChain</title>", `<style>\n body { font-family: Arial, sans-serif; } h1 { color: darkblue; } </style> </head>`, `<body> <div> <h1>🦜️🔗 LangChain</h1> <p>⚡ Building applications with LLMs through composability ⚡</p> </div>`, `<div> As an open source project in a rapidly developing field, we are extremely open to contributions. </div> </body> </html>`, ]; expect(output).toEqual(expectedOutput); }); test("Test lines loc on iterative text splitter.", async () => { const text = `Hi.\nI'm Harrison.\n\nHow?\na\nb`; const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 20, chunkOverlap: 1, }); const docs = await splitter.createDocuments([text]); const expectedDocs = [ new Document({ pageContent: "Hi.\nI'm Harrison.", metadata: { loc: { lines: { from: 1, to: 2 } } }, }), new Document({ pageContent: "How?\na\nb", metadata: { loc: { lines: { from: 4, to: 6 } } }, }), ]; expect(docs).toEqual(expectedDocs); });
0
lc_public_repos/langchainjs/libs/langchain-textsplitters/src
lc_public_repos/langchainjs/libs/langchain-textsplitters/src/tests/code_text_splitter.test.ts
import { test, expect } from "@jest/globals"; import { RecursiveCharacterTextSplitter } from "../text_splitter.js"; test("Python code splitter", async () => { const splitter = RecursiveCharacterTextSplitter.fromLanguage("python", { chunkSize: 16, chunkOverlap: 0, }); const code = `def hello_world(): print("Hello, World!") # Call the function hello_world()`; const chunks = await splitter.splitText(code); expect(chunks).toStrictEqual([ "def", "hello_world():", 'print("Hello,', 'World!")', "# Call the", "function", "hello_world()", ]); }); test("Golang code splitter", async () => { const splitter = RecursiveCharacterTextSplitter.fromLanguage("go", { chunkSize: 16, chunkOverlap: 0, }); const code = `package main import "fmt" func helloWorld() { fmt.Println("Hello, World!") } func main() { helloWorld() }`; const chunks = await splitter.splitText(code); expect(chunks).toStrictEqual([ "package main", 'import "fmt"', "func", "helloWorld() {", 'fmt.Println("He', "llo,", 'World!")', "}", "func main() {", "helloWorld()", "}", ]); }); test("RST code splitter", async () => { const splitter = RecursiveCharacterTextSplitter.fromLanguage("rst", { chunkSize: 16, chunkOverlap: 0, }); const code = `Sample Document =============== Section ------- This is the content of the section. Lists ----- - Item 1 - Item 2 - Item 3`; const chunks = await splitter.splitText(code); expect(chunks).toStrictEqual([ "Sample Document", "===============", "Section\n-------", "This is the", "content of the", "section.", "Lists\n-----", "- Item 1", "- Item 2", "- Item 3", ]); }); test("Proto code splitter", async () => { const splitter = RecursiveCharacterTextSplitter.fromLanguage("proto", { chunkSize: 16, chunkOverlap: 0, }); const code = `syntax = "proto3"; package example; message Person { string name = 1; int32 age = 2; repeated string hobbies = 3; }`; const chunks = await splitter.splitText(code); expect(chunks).toStrictEqual([ "syntax =", '"proto3";', "package", "example;", "message Person", "{", "string name", "= 1;", "int32 age =", "2;", "repeated", "string hobbies", "= 3;", "}", ]); }); test("JS code splitter", async () => { const splitter = RecursiveCharacterTextSplitter.fromLanguage("js", { chunkSize: 16, chunkOverlap: 0, }); const code = `function helloWorld() { console.log("Hello, World!"); } // Call the function helloWorld();`; const chunks = await splitter.splitText(code); expect(chunks).toStrictEqual([ "function", "helloWorld() {", 'console.log("He', "llo,", 'World!");', "}", "// Call the", "function", "helloWorld();", ]); }); test("Java code splitter", async () => { const splitter = RecursiveCharacterTextSplitter.fromLanguage("java", { chunkSize: 16, chunkOverlap: 0, }); const code = `public class HelloWorld { public static void main(String[] args) { System.out.println("Hello, World!"); } }`; const chunks = await splitter.splitText(code); expect(chunks).toStrictEqual([ "public class", "HelloWorld {", "public static", "void", "main(String[]", "args) {", "System.out.prin", 'tln("Hello,', 'World!");', "}\n}", ]); }); test("CPP code splitter", async () => { const splitter = RecursiveCharacterTextSplitter.fromLanguage("cpp", { chunkSize: 16, chunkOverlap: 0, }); const code = `#include <iostream> int main() { std::cout << "Hello, World!" << std::endl; return 0; }`; const chunks = await splitter.splitText(code); expect(chunks).toStrictEqual([ "#include", "<iostream>", "int main() {", "std::cout", '<< "Hello,', 'World!" <<', "std::endl;", "return 0;\n}", ]); }); test("Scala code splitter", async () => { const splitter = RecursiveCharacterTextSplitter.fromLanguage("scala", { chunkSize: 16, chunkOverlap: 0, }); const code = `object HelloWorld { def main(args: Array[String]): Unit = { println("Hello, World!") } }`; const chunks = await splitter.splitText(code); expect(chunks).toStrictEqual([ "object", "HelloWorld {", "def", "main(args:", "Array[String]):", "Unit = {", 'println("Hello,', 'World!")', "}\n}", ]); }); test("Ruby code splitter", async () => { const splitter = RecursiveCharacterTextSplitter.fromLanguage("ruby", { chunkSize: 16, chunkOverlap: 0, }); const code = `def hello_world puts "Hello, World!" end hello_world`; const chunks = await splitter.splitText(code); expect(chunks).toStrictEqual([ "def hello_world", 'puts "Hello,', 'World!"', "end\nhello_world", ]); }); test("PHP code splitter", async () => { const splitter = RecursiveCharacterTextSplitter.fromLanguage("php", { chunkSize: 16, chunkOverlap: 0, }); const code = `<?php function hello_world() { echo "Hello, World!"; } hello_world(); ?>`; const chunks = await splitter.splitText(code); expect(chunks).toStrictEqual([ "<?php", "function", "hello_world() {", "echo", '"Hello,', 'World!";', "}", "hello_world();", "?>", ]); }); test("Swift code splitter", async () => { const splitter = RecursiveCharacterTextSplitter.fromLanguage("swift", { chunkSize: 16, chunkOverlap: 0, }); const code = `func helloWorld() { print("Hello, World!") } helloWorld()`; const chunks = await splitter.splitText(code); expect(chunks).toStrictEqual([ "func", "helloWorld() {", 'print("Hello,', 'World!")', "}\nhelloWorld()", ]); }); test("Rust code splitter", async () => { const splitter = RecursiveCharacterTextSplitter.fromLanguage("rust", { chunkSize: 16, chunkOverlap: 0, }); const code = `fn main() { println!("Hello, World!"); }`; const chunks = await splitter.splitText(code); expect(chunks).toStrictEqual([ "fn main() {", 'println!("Hello', ",", 'World!");', "}", ]); }); test("Solidity code splitter", async () => { const splitter = RecursiveCharacterTextSplitter.fromLanguage("sol", { chunkSize: 16, chunkOverlap: 0, }); const code = `pragma solidity ^0.8.20; contract HelloWorld { function add(uint a, uint b) pure public returns(uint) { return a + b; } } `; const chunks = await splitter.splitText(code); expect(chunks).toStrictEqual([ "pragma solidity", "^0.8.20;", "contract", "HelloWorld {", "function", "add(uint a,", "uint b) pure", "public", "returns(uint) {", "return a", "+ b;", "}\n }", ]); });
0
lc_public_repos/langchainjs/libs/langchain-textsplitters
lc_public_repos/langchainjs/libs/langchain-textsplitters/scripts/jest-setup-after-env.js
import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { afterAll, jest } from "@jest/globals"; afterAll(awaitAllCallbacks); // Allow console.log to be disabled in tests if (process.env.DISABLE_CONSOLE_LOGS === "true") { console.log = jest.fn(); }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-mistralai/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": [ "ES2021", "ES2022.Object", "DOM" ], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": [ "src/**/*" ], "exclude": [ "node_modules", "dist", "docs" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-mistralai/LICENSE
The MIT License Copyright (c) 2023 LangChain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-mistralai/jest.config.cjs
/** @type {import('ts-jest').JestConfigWithTsJest} */ module.exports = { preset: "ts-jest/presets/default-esm", testEnvironment: "./jest.env.cjs", modulePathIgnorePatterns: ["dist/", "docs/"], moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, transform: { '^.+\\.tsx?$': ['@swc/jest'], }, transformIgnorePatterns: [ "/node_modules/", "\\.pnp\\.[^\\/]+$", "./scripts/jest-setup-after-env.js", ], setupFiles: ["dotenv/config"], testTimeout: 20_000, passWithNoTests: true, collectCoverageFrom: ["src/**/*.ts"] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-mistralai/jest.env.cjs
const { TestEnvironment } = require("jest-environment-node"); class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { constructor(config, context) { // Make `instanceof Float32Array` return true in tests // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 super(config, context); this.global.Float32Array = Float32Array; } } module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-mistralai/README.md
# @langchain/mistralai This package contains the LangChain.js integrations for Mistral through their SDK. ## Installation ```bash npm2yarn npm install @langchain/mistralai @langchain/core ``` This package, along with the main LangChain package, depends on [`@langchain/core`](https://npmjs.com/package/@langchain/core/). If you are using this package with other LangChain packages, you should make sure that all of the packages depend on the same instance of @langchain/core. You can do so by adding appropriate field to your project's `package.json` like this: ```json { "name": "your-project", "version": "0.0.0", "dependencies": { "@langchain/core": "^0.3.0", "@langchain/mistralai": "^0.0.0" }, "resolutions": { "@langchain/core": "^0.3.0" }, "overrides": { "@langchain/core": "^0.3.0" }, "pnpm": { "overrides": { "@langchain/core": "^0.3.0" } } } ``` The field you need depends on the package manager you're using, but we recommend adding a field for the common `yarn`, `npm`, and `pnpm` to maximize compatibility. ## Chat Models This package contains the `ChatMistralAI` class, which is the recommended way to interface with the Mistral series of models. To use, install the requirements, and configure your environment. ```bash export MISTRAL_API_KEY=your-api-key ``` Then initialize ```typescript import { ChatMistralAI } from "@langchain/mistralai"; const model = new ChatMistralAI({ apiKey: process.env.MISTRAL_API_KEY, modelName: "mistral-small", }); const response = await model.invoke(new HumanMessage("Hello world!")); ``` ### Streaming ```typescript import { ChatMistralAI } from "@langchain/mistralai"; const model = new ChatMistralAI({ apiKey: process.env.MISTRAL_API_KEY, modelName: "mistral-small", }); const response = await model.stream(new HumanMessage("Hello world!")); ``` ## Embeddings This package also adds support for Mistral's embeddings model. ```typescript import { MistralAIEmbeddings } from "@langchain/mistralai"; const embeddings = new MistralAIEmbeddings({ apiKey: process.env.MISTRAL_API_KEY, }); const res = await embeddings.embedQuery("Hello world"); ``` ## Development To develop the Mistral package, you'll need to follow these instructions: ### Install dependencies ```bash yarn install ``` ### Build the package ```bash yarn build ``` Or from the repo root: ```bash yarn build --filter=@langchain/mistralai ``` ### Run tests Test files should live within a `tests/` file in the `src/` folder. Unit tests should end in `.test.ts` and integration tests should end in `.int.test.ts`: ```bash $ yarn test $ yarn test:int ``` ### Lint & Format Run the linter & formatter to ensure your code is up to standard: ```bash yarn lint && yarn format ``` ### Adding new entrypoints If you add a new file to be exported, either import & re-export from `src/index.ts`, or add it to the `entrypoints` field in the `config` variable located inside `langchain.config.js` and run `yarn build` to generate the new entrypoint.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-mistralai/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": [ "--workspaces-update=false" ] } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-mistralai/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof"], ignorePatterns: [ ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };