index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-openai/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": [ "--workspaces-update=false" ] } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-openai/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof", "eslint-plugin-jest"], ignorePatterns: [ "src/utils/@cfworker", "src/utils/fast-json-patch", "src/utils/js-sha1", ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], 'jest/no-focused-tests': 'error', }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-openai/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals: [/node\:/, /@langchain\/core\//, "openai/helpers/zod"], entrypoints: { index: "index", }, tsConfigPath: resolve("./tsconfig.json"), cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-openai/package.json
{ "name": "@langchain/openai", "version": "0.3.14", "description": "OpenAI integrations for LangChain.js", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-openai/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/openai", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "clean": "rm -rf .turbo dist/", "prepack": "yarn build", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard:unit": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard": "yarn test:standard:unit && yarn test:standard:int", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "js-tiktoken": "^1.0.12", "openai": "^4.71.0", "zod": "^3.22.4", "zod-to-json-schema": "^3.22.3" }, "peerDependencies": { "@langchain/core": ">=0.2.26 <0.4.0" }, "devDependencies": { "@azure/identity": "^4.2.1", "@jest/globals": "^29.5.0", "@langchain/core": "workspace:*", "@langchain/scripts": ">=0.1.0 <0.2.0", "@langchain/standard-tests": "0.0.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "dpdm": "^3.12.0", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-jest": "^27.6.0", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "prettier": "^2.8.3", "release-it": "^17.6.0", "rimraf": "^5.0.1", "ts-jest": "^29.1.0", "typescript": "~5.1.6" }, "publishConfig": { "access": "public" }, "keywords": [ "llm", "ai", "gpt3", "chain", "prompt", "prompt engineering", "chatgpt", "machine learning", "ml", "openai", "embeddings", "vectorstores" ], "exports": { ".": { "types": { "import": "./index.d.ts", "require": "./index.d.cts", "default": "./index.d.ts" }, "import": "./index.js", "require": "./index.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "index.cjs", "index.js", "index.d.ts", "index.d.cts" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-openai/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": [ "node_modules", "dist", "docs", "**/tests" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-openai/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-openai/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/libs/langchain-openai
lc_public_repos/langchainjs/libs/langchain-openai/src/types.ts
import type { OpenAI as OpenAIClient } from "openai"; import type { ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema, } from "openai/resources/shared"; import { TiktokenModel } from "js-tiktoken/lite"; import type { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; import type { z } from "zod"; // reexport this type from the included package so we can easily override and extend it if needed in the future // also makes it easier for folks to import this type without digging around into the dependent packages export type { TiktokenModel }; export declare interface OpenAIBaseInput { /** Sampling temperature to use */ temperature: number; /** * Maximum number of tokens to generate in the completion. -1 returns as many * tokens as possible given the prompt and the model's maximum context size. */ maxTokens?: number; /** Total probability mass of tokens to consider at each step */ topP: number; /** Penalizes repeated tokens according to frequency */ frequencyPenalty: number; /** Penalizes repeated tokens */ presencePenalty: number; /** Number of completions to generate for each prompt */ n: number; /** Dictionary used to adjust the probability of specific tokens being generated */ logitBias?: Record<string, number>; /** Unique string identifier representing your end-user, which can help OpenAI to monitor and detect abuse. */ user?: string; /** Whether to stream the results or not. Enabling disables tokenUsage reporting */ streaming: boolean; /** * Whether or not to include token usage data in streamed chunks. * @default true */ streamUsage?: boolean; /** * Model name to use * Alias for `model` */ modelName: string; /** Model name to use */ model: string; /** Holds any additional parameters that are valid to pass to {@link * https://platform.openai.com/docs/api-reference/completions/create | * `openai.createCompletion`} that are not explicitly specified on this class. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any modelKwargs?: Record<string, any>; /** * List of stop words to use when generating * Alias for `stopSequences` */ stop?: string[]; /** List of stop words to use when generating */ stopSequences?: string[]; /** * Timeout to use when making requests to OpenAI. */ timeout?: number; /** * API key to use when making requests to OpenAI. Defaults to the value of * `OPENAI_API_KEY` environment variable. * Alias for `apiKey` */ openAIApiKey?: string; /** * API key to use when making requests to OpenAI. Defaults to the value of * `OPENAI_API_KEY` environment variable. */ apiKey?: string; } // TODO use OpenAI.Core.RequestOptions when SDK is updated to make it available export type OpenAICoreRequestOptions< Req extends object = Record<string, unknown> > = { path?: string; query?: Req | undefined; body?: Req | undefined; headers?: Record<string, string | null | undefined> | undefined; maxRetries?: number; stream?: boolean | undefined; timeout?: number; // eslint-disable-next-line @typescript-eslint/no-explicit-any httpAgent?: any; signal?: AbortSignal | undefined | null; idempotencyKey?: string; }; export interface OpenAICallOptions extends BaseLanguageModelCallOptions { /** * Additional options to pass to the underlying axios request. */ options?: OpenAICoreRequestOptions; } /** * Input to OpenAI class. */ export declare interface OpenAIInput extends OpenAIBaseInput { /** Generates `bestOf` completions server side and returns the "best" */ bestOf?: number; /** Batch size to use when passing multiple documents to generate */ batchSize: number; } /** * @deprecated Use "baseURL", "defaultHeaders", and "defaultParams" instead. */ export interface LegacyOpenAIInput { /** @deprecated Use baseURL instead */ basePath?: string; /** @deprecated Use defaultHeaders and defaultQuery instead */ baseOptions?: { headers?: Record<string, string>; params?: Record<string, string>; }; } export interface OpenAIChatInput extends OpenAIBaseInput { /** * Whether to return log probabilities of the output tokens or not. * If true, returns the log probabilities of each output token returned in the content of message. */ logprobs?: boolean; /** * An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, * each with an associated log probability. logprobs must be set to true if this parameter is used. */ topLogprobs?: number; /** ChatGPT messages to pass as a prefix to the prompt */ prefixMessages?: OpenAIClient.Chat.ChatCompletionMessageParam[]; /** * Whether to include the raw OpenAI response in the output message's "additional_kwargs" field. * Currently in experimental beta. */ __includeRawResponse?: boolean; /** * Whether the model supports the `strict` argument when passing in tools. * If `undefined` the `strict` argument will not be passed to OpenAI. */ supportsStrictToolCalling?: boolean; /** * Output types that you would like the model to generate for this request. Most * models are capable of generating text, which is the default: * * `["text"]` * * The `gpt-4o-audio-preview` model can also be used to * [generate audio](https://platform.openai.com/docs/guides/audio). To request that * this model generate both text and audio responses, you can use: * * `["text", "audio"]` */ modalities?: Array<OpenAIClient.Chat.ChatCompletionModality>; /** * Parameters for audio output. Required when audio output is requested with * `modalities: ["audio"]`. * [Learn more](https://platform.openai.com/docs/guides/audio). */ audio?: OpenAIClient.Chat.ChatCompletionAudioParam; } export declare interface AzureOpenAIInput { /** * API version to use when making requests to Azure OpenAI. */ azureOpenAIApiVersion?: string; /** * API key to use when making requests to Azure OpenAI. */ azureOpenAIApiKey?: string; /** * Azure OpenAI API instance name to use when making requests to Azure OpenAI. * this is the name of the instance you created in the Azure portal. * e.g. "my-openai-instance" * this will be used in the endpoint URL: https://my-openai-instance.openai.azure.com/openai/deployments/{DeploymentName}/ */ azureOpenAIApiInstanceName?: string; /** * Azure OpenAI API deployment name to use for completions when making requests to Azure OpenAI. * This is the name of the deployment you created in the Azure portal. * e.g. "my-openai-deployment" * this will be used in the endpoint URL: https://{InstanceName}.openai.azure.com/openai/deployments/my-openai-deployment/ */ azureOpenAIApiDeploymentName?: string; /** * Azure OpenAI API deployment name to use for embedding when making requests to Azure OpenAI. * This is the name of the deployment you created in the Azure portal. * This will fallback to azureOpenAIApiDeploymentName if not provided. * e.g. "my-openai-deployment" * this will be used in the endpoint URL: https://{InstanceName}.openai.azure.com/openai/deployments/my-openai-deployment/ */ azureOpenAIApiEmbeddingsDeploymentName?: string; /** * Azure OpenAI API deployment name to use for completions when making requests to Azure OpenAI. * Completions are only available for gpt-3.5-turbo and text-davinci-003 deployments. * This is the name of the deployment you created in the Azure portal. * This will fallback to azureOpenAIApiDeploymentName if not provided. * e.g. "my-openai-deployment" * this will be used in the endpoint URL: https://{InstanceName}.openai.azure.com/openai/deployments/my-openai-deployment/ */ azureOpenAIApiCompletionsDeploymentName?: string; /** * Custom base url for Azure OpenAI API. This is useful in case you have a deployment in another region. * e.g. setting this value to "https://westeurope.api.cognitive.microsoft.com/openai/deployments" * will be result in the endpoint URL: https://westeurope.api.cognitive.microsoft.com/openai/deployments/{DeploymentName}/ */ azureOpenAIBasePath?: string; /** * Custom endpoint for Azure OpenAI API. This is useful in case you have a deployment in another region. * e.g. setting this value to "https://westeurope.api.cognitive.microsoft.com/" * will be result in the endpoint URL: https://westeurope.api.cognitive.microsoft.com/openai/deployments/{DeploymentName}/ */ azureOpenAIEndpoint?: string; /** * A function that returns an access token for Microsoft Entra (formerly known as Azure Active Directory), * which will be invoked on every request. */ azureADTokenProvider?: () => Promise<string>; } type ChatOpenAIResponseFormatJSONSchema = Omit< ResponseFormatJSONSchema, "json_schema" > & { json_schema: Omit<ResponseFormatJSONSchema["json_schema"], "schema"> & { /** * The schema for the response format, described as a JSON Schema object * or a Zod object. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any schema: Record<string, any> | z.ZodObject<any, any, any, any>; }; }; export type ChatOpenAIResponseFormat = | ResponseFormatText | ResponseFormatJSONObject | ChatOpenAIResponseFormatJSONSchema;
0
lc_public_repos/langchainjs/libs/langchain-openai
lc_public_repos/langchainjs/libs/langchain-openai/src/llms.ts
import type { TiktokenModel } from "js-tiktoken/lite"; import { type ClientOptions, OpenAI as OpenAIClient } from "openai"; import { calculateMaxTokens } from "@langchain/core/language_models/base"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { GenerationChunk, type LLMResult } from "@langchain/core/outputs"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { BaseLLM, type BaseLLMParams, } from "@langchain/core/language_models/llms"; import { chunkArray } from "@langchain/core/utils/chunk_array"; import type { AzureOpenAIInput, OpenAICallOptions, OpenAICoreRequestOptions, OpenAIInput, LegacyOpenAIInput, } from "./types.js"; import { OpenAIEndpointConfig, getEndpoint } from "./utils/azure.js"; import { OpenAIChat, OpenAIChatCallOptions } from "./legacy.js"; import { wrapOpenAIClientError } from "./utils/openai.js"; export type { AzureOpenAIInput, OpenAICallOptions, OpenAIInput, OpenAIChatCallOptions, }; export { OpenAIChat }; /** * Interface for tracking token usage in OpenAI calls. */ interface TokenUsage { completionTokens?: number; promptTokens?: number; totalTokens?: number; } /** * Wrapper around OpenAI large language models. * * To use you should have the `openai` package installed, with the * `OPENAI_API_KEY` environment variable set. * * To use with Azure you should have the `openai` package installed, with the * `AZURE_OPENAI_API_KEY`, * `AZURE_OPENAI_API_INSTANCE_NAME`, * `AZURE_OPENAI_API_DEPLOYMENT_NAME` * and `AZURE_OPENAI_API_VERSION` environment variable set. * * @remarks * Any parameters that are valid to be passed to {@link * https://platform.openai.com/docs/api-reference/completions/create | * `openai.createCompletion`} can be passed through {@link modelKwargs}, even * if not explicitly available on this class. * @example * ```typescript * const model = new OpenAI({ * modelName: "gpt-4", * temperature: 0.7, * maxTokens: 1000, * maxRetries: 5, * }); * * const res = await model.invoke( * "Question: What would be a good company name for a company that makes colorful socks?\nAnswer:" * ); * console.log({ res }); * ``` */ export class OpenAI<CallOptions extends OpenAICallOptions = OpenAICallOptions> extends BaseLLM<CallOptions> implements OpenAIInput, AzureOpenAIInput { static lc_name() { return "OpenAI"; } get callKeys() { return [...super.callKeys, "options"]; } lc_serializable = true; get lc_secrets(): { [key: string]: string } | undefined { return { openAIApiKey: "OPENAI_API_KEY", apiKey: "OPENAI_API_KEY", azureOpenAIApiKey: "AZURE_OPENAI_API_KEY", organization: "OPENAI_ORGANIZATION", }; } get lc_aliases(): Record<string, string> { return { modelName: "model", openAIApiKey: "openai_api_key", apiKey: "openai_api_key", azureOpenAIApiVersion: "azure_openai_api_version", azureOpenAIApiKey: "azure_openai_api_key", azureOpenAIApiInstanceName: "azure_openai_api_instance_name", azureOpenAIApiDeploymentName: "azure_openai_api_deployment_name", }; } temperature = 0.7; maxTokens = 256; topP = 1; frequencyPenalty = 0; presencePenalty = 0; n = 1; bestOf?: number; logitBias?: Record<string, number>; modelName = "gpt-3.5-turbo-instruct"; model = "gpt-3.5-turbo-instruct"; modelKwargs?: OpenAIInput["modelKwargs"]; batchSize = 20; timeout?: number; stop?: string[]; stopSequences?: string[]; user?: string; streaming = false; openAIApiKey?: string; apiKey?: string; azureOpenAIApiVersion?: string; azureOpenAIApiKey?: string; azureADTokenProvider?: () => Promise<string>; azureOpenAIApiInstanceName?: string; azureOpenAIApiDeploymentName?: string; azureOpenAIBasePath?: string; organization?: string; protected client: OpenAIClient; protected clientConfig: ClientOptions; constructor( fields?: Partial<OpenAIInput> & Partial<AzureOpenAIInput> & BaseLLMParams & { configuration?: ClientOptions & LegacyOpenAIInput; }, /** @deprecated */ configuration?: ClientOptions & LegacyOpenAIInput ) { let model = fields?.model ?? fields?.modelName; if ( (model?.startsWith("gpt-3.5-turbo") || model?.startsWith("gpt-4")) && !model?.includes("-instruct") ) { console.warn( [ `Your chosen OpenAI model, "${model}", is a chat model and not a text-in/text-out LLM.`, `Passing it into the "OpenAI" class is deprecated and only permitted for backwards-compatibility. You may experience odd behavior.`, `Please use the "ChatOpenAI" class instead.`, "", `See this page for more information:`, "|", `└> https://js.langchain.com/docs/integrations/chat/openai`, ].join("\n") ); // eslint-disable-next-line no-constructor-return return new OpenAIChat( fields, configuration ) as unknown as OpenAI<CallOptions>; } super(fields ?? {}); model = model ?? this.model; this.openAIApiKey = fields?.apiKey ?? fields?.openAIApiKey ?? getEnvironmentVariable("OPENAI_API_KEY"); this.apiKey = this.openAIApiKey; this.azureOpenAIApiKey = fields?.azureOpenAIApiKey ?? getEnvironmentVariable("AZURE_OPENAI_API_KEY"); this.azureADTokenProvider = fields?.azureADTokenProvider ?? undefined; if (!this.azureOpenAIApiKey && !this.apiKey && !this.azureADTokenProvider) { throw new Error( "OpenAI or Azure OpenAI API key or Token Provider not found" ); } this.azureOpenAIApiInstanceName = fields?.azureOpenAIApiInstanceName ?? getEnvironmentVariable("AZURE_OPENAI_API_INSTANCE_NAME"); this.azureOpenAIApiDeploymentName = (fields?.azureOpenAIApiCompletionsDeploymentName || fields?.azureOpenAIApiDeploymentName) ?? (getEnvironmentVariable("AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME") || getEnvironmentVariable("AZURE_OPENAI_API_DEPLOYMENT_NAME")); this.azureOpenAIApiVersion = fields?.azureOpenAIApiVersion ?? getEnvironmentVariable("AZURE_OPENAI_API_VERSION"); this.azureOpenAIBasePath = fields?.azureOpenAIBasePath ?? getEnvironmentVariable("AZURE_OPENAI_BASE_PATH"); this.organization = fields?.configuration?.organization ?? getEnvironmentVariable("OPENAI_ORGANIZATION"); this.modelName = model; this.model = model; this.modelKwargs = fields?.modelKwargs ?? {}; this.batchSize = fields?.batchSize ?? this.batchSize; this.timeout = fields?.timeout; this.temperature = fields?.temperature ?? this.temperature; this.maxTokens = fields?.maxTokens ?? this.maxTokens; this.topP = fields?.topP ?? this.topP; this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty; this.presencePenalty = fields?.presencePenalty ?? this.presencePenalty; this.n = fields?.n ?? this.n; this.bestOf = fields?.bestOf ?? this.bestOf; this.logitBias = fields?.logitBias; this.stop = fields?.stopSequences ?? fields?.stop; this.stopSequences = fields?.stopSequences; this.user = fields?.user; this.streaming = fields?.streaming ?? false; if (this.streaming && this.bestOf && this.bestOf > 1) { throw new Error("Cannot stream results when bestOf > 1"); } if (this.azureOpenAIApiKey || this.azureADTokenProvider) { if (!this.azureOpenAIApiInstanceName && !this.azureOpenAIBasePath) { throw new Error("Azure OpenAI API instance name not found"); } if (!this.azureOpenAIApiDeploymentName) { throw new Error("Azure OpenAI API deployment name not found"); } if (!this.azureOpenAIApiVersion) { throw new Error("Azure OpenAI API version not found"); } this.apiKey = this.apiKey ?? ""; } this.clientConfig = { apiKey: this.apiKey, organization: this.organization, baseURL: configuration?.basePath ?? fields?.configuration?.basePath, dangerouslyAllowBrowser: true, defaultHeaders: configuration?.baseOptions?.headers ?? fields?.configuration?.baseOptions?.headers, defaultQuery: configuration?.baseOptions?.params ?? fields?.configuration?.baseOptions?.params, ...configuration, ...fields?.configuration, }; } /** * Get the parameters used to invoke the model */ invocationParams( options?: this["ParsedCallOptions"] ): Omit<OpenAIClient.CompletionCreateParams, "prompt"> { return { model: this.model, temperature: this.temperature, max_tokens: this.maxTokens, top_p: this.topP, frequency_penalty: this.frequencyPenalty, presence_penalty: this.presencePenalty, n: this.n, best_of: this.bestOf, logit_bias: this.logitBias, stop: options?.stop ?? this.stopSequences, user: this.user, stream: this.streaming, ...this.modelKwargs, }; } /** @ignore */ _identifyingParams(): Omit<OpenAIClient.CompletionCreateParams, "prompt"> & { model_name: string; } & ClientOptions { return { model_name: this.model, ...this.invocationParams(), ...this.clientConfig, }; } /** * Get the identifying parameters for the model */ identifyingParams(): Omit<OpenAIClient.CompletionCreateParams, "prompt"> & { model_name: string; } & ClientOptions { return this._identifyingParams(); } /** * Call out to OpenAI's endpoint with k unique prompts * * @param [prompts] - The prompts to pass into the model. * @param [options] - Optional list of stop words to use when generating. * @param [runManager] - Optional callback manager to use when generating. * * @returns The full LLM output. * * @example * ```ts * import { OpenAI } from "langchain/llms/openai"; * const openai = new OpenAI(); * const response = await openai.generate(["Tell me a joke."]); * ``` */ async _generate( prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<LLMResult> { const subPrompts = chunkArray(prompts, this.batchSize); const choices: OpenAIClient.CompletionChoice[] = []; const tokenUsage: TokenUsage = {}; const params = this.invocationParams(options); if (params.max_tokens === -1) { if (prompts.length !== 1) { throw new Error( "max_tokens set to -1 not supported for multiple inputs" ); } params.max_tokens = await calculateMaxTokens({ prompt: prompts[0], // Cast here to allow for other models that may not fit the union modelName: this.model as TiktokenModel, }); } for (let i = 0; i < subPrompts.length; i += 1) { const data = params.stream ? await (async () => { const choices: OpenAIClient.CompletionChoice[] = []; let response: Omit<OpenAIClient.Completion, "choices"> | undefined; const stream = await this.completionWithRetry( { ...params, stream: true, prompt: subPrompts[i], }, options ); for await (const message of stream) { // on the first message set the response properties if (!response) { response = { id: message.id, object: message.object, created: message.created, model: message.model, }; } // on all messages, update choice for (const part of message.choices) { if (!choices[part.index]) { choices[part.index] = part; } else { const choice = choices[part.index]; choice.text += part.text; choice.finish_reason = part.finish_reason; choice.logprobs = part.logprobs; } void runManager?.handleLLMNewToken(part.text, { prompt: Math.floor(part.index / this.n), completion: part.index % this.n, }); } } if (options.signal?.aborted) { throw new Error("AbortError"); } return { ...response, choices }; })() : await this.completionWithRetry( { ...params, stream: false, prompt: subPrompts[i], }, { signal: options.signal, ...options.options, } ); choices.push(...data.choices); const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, } = data.usage ? data.usage : { completion_tokens: undefined, prompt_tokens: undefined, total_tokens: undefined, }; if (completionTokens) { tokenUsage.completionTokens = (tokenUsage.completionTokens ?? 0) + completionTokens; } if (promptTokens) { tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + promptTokens; } if (totalTokens) { tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + totalTokens; } } const generations = chunkArray(choices, this.n).map((promptChoices) => promptChoices.map((choice) => ({ text: choice.text ?? "", generationInfo: { finishReason: choice.finish_reason, logprobs: choice.logprobs, }, })) ); return { generations, llmOutput: { tokenUsage }, }; } // TODO(jacoblee): Refactor with _generate(..., {stream: true}) implementation? async *_streamResponseChunks( input: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): AsyncGenerator<GenerationChunk> { const params = { ...this.invocationParams(options), prompt: input, stream: true as const, }; const stream = await this.completionWithRetry(params, options); for await (const data of stream) { const choice = data?.choices[0]; if (!choice) { continue; } const chunk = new GenerationChunk({ text: choice.text, generationInfo: { finishReason: choice.finish_reason, }, }); yield chunk; // eslint-disable-next-line no-void void runManager?.handleLLMNewToken(chunk.text ?? ""); } if (options.signal?.aborted) { throw new Error("AbortError"); } } /** * Calls the OpenAI API with retry logic in case of failures. * @param request The request to send to the OpenAI API. * @param options Optional configuration for the API call. * @returns The response from the OpenAI API. */ async completionWithRetry( request: OpenAIClient.CompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions ): Promise<AsyncIterable<OpenAIClient.Completion>>; async completionWithRetry( request: OpenAIClient.CompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions ): Promise<OpenAIClient.Completions.Completion>; async completionWithRetry( request: | OpenAIClient.CompletionCreateParamsStreaming | OpenAIClient.CompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions ): Promise< AsyncIterable<OpenAIClient.Completion> | OpenAIClient.Completions.Completion > { const requestOptions = this._getClientOptions(options); return this.caller.call(async () => { try { const res = await this.client.completions.create( request, requestOptions ); return res; } catch (e) { const error = wrapOpenAIClientError(e); throw error; } }); } /** * Calls the OpenAI API with retry logic in case of failures. * @param request The request to send to the OpenAI API. * @param options Optional configuration for the API call. * @returns The response from the OpenAI API. */ protected _getClientOptions(options: OpenAICoreRequestOptions | undefined) { if (!this.client) { const openAIEndpointConfig: OpenAIEndpointConfig = { azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName, azureOpenAIApiKey: this.azureOpenAIApiKey, azureOpenAIBasePath: this.azureOpenAIBasePath, baseURL: this.clientConfig.baseURL, }; const endpoint = getEndpoint(openAIEndpointConfig); const params = { ...this.clientConfig, baseURL: endpoint, timeout: this.timeout, maxRetries: 0, }; if (!params.baseURL) { delete params.baseURL; } this.client = new OpenAIClient(params); } const requestOptions = { ...this.clientConfig, ...options, } as OpenAICoreRequestOptions; if (this.azureOpenAIApiKey) { requestOptions.headers = { "api-key": this.azureOpenAIApiKey, ...requestOptions.headers, }; requestOptions.query = { "api-version": this.azureOpenAIApiVersion, ...requestOptions.query, }; } return requestOptions; } _llmType() { return "openai"; } }
0
lc_public_repos/langchainjs/libs/langchain-openai
lc_public_repos/langchainjs/libs/langchain-openai/src/legacy.ts
import { type ClientOptions, OpenAI as OpenAIClient } from "openai"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { GenerationChunk } from "@langchain/core/outputs"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { type BaseLLMParams, LLM } from "@langchain/core/language_models/llms"; import { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput, OpenAICoreRequestOptions, LegacyOpenAIInput, } from "./types.js"; import { OpenAIEndpointConfig, getEndpoint } from "./utils/azure.js"; import { wrapOpenAIClientError } from "./utils/openai.js"; export { type AzureOpenAIInput, type OpenAIChatInput }; /** * Interface that extends the OpenAICallOptions interface and includes an * optional promptIndex property. It represents the options that can be * passed when making a call to the OpenAI Chat API. */ export interface OpenAIChatCallOptions extends OpenAICallOptions { promptIndex?: number; } /** * @deprecated For legacy compatibility. Use ChatOpenAI instead. * * Wrapper around OpenAI large language models that use the Chat endpoint. * * To use you should have the `openai` package installed, with the * `OPENAI_API_KEY` environment variable set. * * To use with Azure you should have the `openai` package installed, with the * `AZURE_OPENAI_API_KEY`, * `AZURE_OPENAI_API_INSTANCE_NAME`, * `AZURE_OPENAI_API_DEPLOYMENT_NAME` * and `AZURE_OPENAI_API_VERSION` environment variable set. * * @remarks * Any parameters that are valid to be passed to {@link * https://platform.openai.com/docs/api-reference/chat/create | * `openai.createCompletion`} can be passed through {@link modelKwargs}, even * if not explicitly available on this class. * * @augments BaseLLM * @augments OpenAIInput * @augments AzureOpenAIChatInput * @example * ```typescript * const model = new OpenAIChat({ * prefixMessages: [ * { * role: "system", * content: "You are a helpful assistant that answers in pirate language", * }, * ], * maxTokens: 50, * }); * * const res = await model.invoke( * "What would be a good company name for a company that makes colorful socks?" * ); * console.log({ res }); * ``` */ export class OpenAIChat extends LLM<OpenAIChatCallOptions> implements OpenAIChatInput, AzureOpenAIInput { static lc_name() { return "OpenAIChat"; } get callKeys() { return [...super.callKeys, "options", "promptIndex"]; } lc_serializable = true; get lc_secrets(): { [key: string]: string } | undefined { return { openAIApiKey: "OPENAI_API_KEY", azureOpenAIApiKey: "AZURE_OPENAI_API_KEY", organization: "OPENAI_ORGANIZATION", }; } get lc_aliases(): Record<string, string> { return { modelName: "model", openAIApiKey: "openai_api_key", azureOpenAIApiVersion: "azure_openai_api_version", azureOpenAIApiKey: "azure_openai_api_key", azureOpenAIApiInstanceName: "azure_openai_api_instance_name", azureOpenAIApiDeploymentName: "azure_openai_api_deployment_name", }; } temperature = 1; topP = 1; frequencyPenalty = 0; presencePenalty = 0; n = 1; logitBias?: Record<string, number>; maxTokens?: number; modelName = "gpt-3.5-turbo"; model = "gpt-3.5-turbo"; prefixMessages?: OpenAIClient.Chat.ChatCompletionMessageParam[]; modelKwargs?: OpenAIChatInput["modelKwargs"]; timeout?: number; stop?: string[]; user?: string; streaming = false; openAIApiKey?: string; azureOpenAIApiVersion?: string; azureOpenAIApiKey?: string; azureOpenAIApiInstanceName?: string; azureOpenAIApiDeploymentName?: string; azureOpenAIBasePath?: string; organization?: string; private client: OpenAIClient; private clientConfig: ClientOptions; constructor( fields?: Partial<OpenAIChatInput> & Partial<AzureOpenAIInput> & BaseLLMParams & { configuration?: ClientOptions & LegacyOpenAIInput; }, /** @deprecated */ configuration?: ClientOptions & LegacyOpenAIInput ) { super(fields ?? {}); this.openAIApiKey = fields?.apiKey ?? fields?.openAIApiKey ?? getEnvironmentVariable("OPENAI_API_KEY"); this.azureOpenAIApiKey = fields?.azureOpenAIApiKey ?? getEnvironmentVariable("AZURE_OPENAI_API_KEY"); if (!this.azureOpenAIApiKey && !this.openAIApiKey) { throw new Error("OpenAI or Azure OpenAI API key not found"); } this.azureOpenAIApiInstanceName = fields?.azureOpenAIApiInstanceName ?? getEnvironmentVariable("AZURE_OPENAI_API_INSTANCE_NAME"); this.azureOpenAIApiDeploymentName = (fields?.azureOpenAIApiCompletionsDeploymentName || fields?.azureOpenAIApiDeploymentName) ?? (getEnvironmentVariable("AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME") || getEnvironmentVariable("AZURE_OPENAI_API_DEPLOYMENT_NAME")); this.azureOpenAIApiVersion = fields?.azureOpenAIApiVersion ?? getEnvironmentVariable("AZURE_OPENAI_API_VERSION"); this.azureOpenAIBasePath = fields?.azureOpenAIBasePath ?? getEnvironmentVariable("AZURE_OPENAI_BASE_PATH"); this.organization = fields?.configuration?.organization ?? getEnvironmentVariable("OPENAI_ORGANIZATION"); this.modelName = fields?.model ?? fields?.modelName ?? this.modelName; this.prefixMessages = fields?.prefixMessages ?? this.prefixMessages; this.modelKwargs = fields?.modelKwargs ?? {}; this.timeout = fields?.timeout; this.temperature = fields?.temperature ?? this.temperature; this.topP = fields?.topP ?? this.topP; this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty; this.presencePenalty = fields?.presencePenalty ?? this.presencePenalty; this.n = fields?.n ?? this.n; this.logitBias = fields?.logitBias; this.maxTokens = fields?.maxTokens; this.stop = fields?.stop; this.user = fields?.user; this.streaming = fields?.streaming ?? false; if (this.n > 1) { throw new Error( "Cannot use n > 1 in OpenAIChat LLM. Use ChatOpenAI Chat Model instead." ); } if (this.azureOpenAIApiKey) { if (!this.azureOpenAIApiInstanceName && !this.azureOpenAIBasePath) { throw new Error("Azure OpenAI API instance name not found"); } if (!this.azureOpenAIApiDeploymentName) { throw new Error("Azure OpenAI API deployment name not found"); } if (!this.azureOpenAIApiVersion) { throw new Error("Azure OpenAI API version not found"); } this.openAIApiKey = this.openAIApiKey ?? ""; } this.clientConfig = { apiKey: this.openAIApiKey, organization: this.organization, baseURL: configuration?.basePath ?? fields?.configuration?.basePath, dangerouslyAllowBrowser: true, defaultHeaders: configuration?.baseOptions?.headers ?? fields?.configuration?.baseOptions?.headers, defaultQuery: configuration?.baseOptions?.params ?? fields?.configuration?.baseOptions?.params, ...configuration, ...fields?.configuration, }; } /** * Get the parameters used to invoke the model */ invocationParams( options?: this["ParsedCallOptions"] ): Omit<OpenAIClient.Chat.ChatCompletionCreateParams, "messages"> { return { model: this.modelName, temperature: this.temperature, top_p: this.topP, frequency_penalty: this.frequencyPenalty, presence_penalty: this.presencePenalty, n: this.n, logit_bias: this.logitBias, max_tokens: this.maxTokens === -1 ? undefined : this.maxTokens, stop: options?.stop ?? this.stop, user: this.user, stream: this.streaming, ...this.modelKwargs, }; } /** @ignore */ _identifyingParams(): Omit< OpenAIClient.Chat.ChatCompletionCreateParams, "messages" > & { model_name: string; } & ClientOptions { return { model_name: this.modelName, ...this.invocationParams(), ...this.clientConfig, }; } /** * Get the identifying parameters for the model */ identifyingParams(): Omit< OpenAIClient.Chat.ChatCompletionCreateParams, "messages" > & { model_name: string; } & ClientOptions { return { model_name: this.modelName, ...this.invocationParams(), ...this.clientConfig, }; } /** * Formats the messages for the OpenAI API. * @param prompt The prompt to be formatted. * @returns Array of formatted messages. */ private formatMessages( prompt: string ): OpenAIClient.Chat.ChatCompletionMessageParam[] { const message: OpenAIClient.Chat.ChatCompletionMessageParam = { role: "user", content: prompt, }; return this.prefixMessages ? [...this.prefixMessages, message] : [message]; } async *_streamResponseChunks( prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): AsyncGenerator<GenerationChunk> { const params = { ...this.invocationParams(options), messages: this.formatMessages(prompt), stream: true as const, }; const stream = await this.completionWithRetry(params, options); for await (const data of stream) { const choice = data?.choices[0]; if (!choice) { continue; } const { delta } = choice; const generationChunk = new GenerationChunk({ text: delta.content ?? "", }); yield generationChunk; const newTokenIndices = { prompt: options.promptIndex ?? 0, completion: choice.index ?? 0, }; // eslint-disable-next-line no-void void runManager?.handleLLMNewToken( generationChunk.text ?? "", newTokenIndices ); } if (options.signal?.aborted) { throw new Error("AbortError"); } } /** @ignore */ async _call( prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<string> { const params = this.invocationParams(options); if (params.stream) { const stream = await this._streamResponseChunks( prompt, options, runManager ); let finalChunk: GenerationChunk | undefined; for await (const chunk of stream) { if (finalChunk === undefined) { finalChunk = chunk; } else { finalChunk = finalChunk.concat(chunk); } } return finalChunk?.text ?? ""; } else { const response = await this.completionWithRetry( { ...params, stream: false, messages: this.formatMessages(prompt), }, { signal: options.signal, ...options.options, } ); return response?.choices[0]?.message?.content ?? ""; } } /** * Calls the OpenAI API with retry logic in case of failures. * @param request The request to send to the OpenAI API. * @param options Optional configuration for the API call. * @returns The response from the OpenAI API. */ async completionWithRetry( request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions ): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>; async completionWithRetry( request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions ): Promise<OpenAIClient.Chat.Completions.ChatCompletion>; async completionWithRetry( request: | OpenAIClient.Chat.ChatCompletionCreateParamsStreaming | OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions ): Promise< | AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk> | OpenAIClient.Chat.Completions.ChatCompletion > { const requestOptions = this._getClientOptions(options); return this.caller.call(async () => { try { const res = await this.client.chat.completions.create( request, requestOptions ); return res; } catch (e) { const error = wrapOpenAIClientError(e); throw error; } }); } /** @ignore */ private _getClientOptions(options: OpenAICoreRequestOptions | undefined) { if (!this.client) { const openAIEndpointConfig: OpenAIEndpointConfig = { azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName, azureOpenAIApiKey: this.azureOpenAIApiKey, azureOpenAIBasePath: this.azureOpenAIBasePath, baseURL: this.clientConfig.baseURL, }; const endpoint = getEndpoint(openAIEndpointConfig); const params = { ...this.clientConfig, baseURL: endpoint, timeout: this.timeout, maxRetries: 0, }; if (!params.baseURL) { delete params.baseURL; } this.client = new OpenAIClient(params); } const requestOptions = { ...this.clientConfig, ...options, } as OpenAICoreRequestOptions; if (this.azureOpenAIApiKey) { requestOptions.headers = { "api-key": this.azureOpenAIApiKey, ...requestOptions.headers, }; requestOptions.query = { "api-version": this.azureOpenAIApiVersion, ...requestOptions.query, }; } return requestOptions; } _llmType() { return "openai"; } }
0
lc_public_repos/langchainjs/libs/langchain-openai
lc_public_repos/langchainjs/libs/langchain-openai/src/index.ts
export { OpenAI as OpenAIClient, type ClientOptions, toFile } from "openai"; export * from "./chat_models.js"; export * from "./azure/chat_models.js"; export * from "./llms.js"; export * from "./azure/llms.js"; export * from "./azure/embeddings.js"; export * from "./embeddings.js"; export * from "./types.js"; export * from "./utils/openai.js"; export * from "./utils/azure.js"; export * from "./tools/index.js"; export { convertPromptToOpenAI } from "./utils/prompts.js";
0
lc_public_repos/langchainjs/libs/langchain-openai
lc_public_repos/langchainjs/libs/langchain-openai/src/chat_models.ts
import { type ClientOptions, OpenAI as OpenAIClient } from "openai"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { AIMessage, AIMessageChunk, type BaseMessage, ChatMessage, ChatMessageChunk, FunctionMessageChunk, HumanMessageChunk, SystemMessageChunk, ToolMessage, ToolMessageChunk, OpenAIToolCall, isAIMessage, UsageMetadata, } from "@langchain/core/messages"; import { type ChatGeneration, ChatGenerationChunk, type ChatResult, } from "@langchain/core/outputs"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { BaseChatModel, BindToolsInput, LangSmithParams, type BaseChatModelParams, } from "@langchain/core/language_models/chat_models"; import { isOpenAITool, type BaseFunctionCallOptions, type BaseLanguageModelInput, type FunctionDefinition, type StructuredOutputMethodOptions, type StructuredOutputMethodParams, } from "@langchain/core/language_models/base"; import { NewTokenIndices } from "@langchain/core/callbacks/base"; import { z } from "zod"; import { Runnable, RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; import { JsonOutputParser, StructuredOutputParser, type BaseLLMOutputParser, } from "@langchain/core/output_parsers"; import { JsonOutputKeyToolsParser, convertLangChainToolCallToOpenAI, makeInvalidToolCall, parseToolCall, } from "@langchain/core/output_parsers/openai_tools"; import { zodToJsonSchema } from "zod-to-json-schema"; import { ToolCallChunk } from "@langchain/core/messages/tool"; import { zodResponseFormat } from "openai/helpers/zod"; import type { ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema, } from "openai/resources/shared"; import type { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput, OpenAICoreRequestOptions, LegacyOpenAIInput, ChatOpenAIResponseFormat, } from "./types.js"; import { type OpenAIEndpointConfig, getEndpoint } from "./utils/azure.js"; import { OpenAIToolChoice, formatToOpenAIToolChoice, wrapOpenAIClientError, } from "./utils/openai.js"; import { FunctionDef, formatFunctionDefinitions, } from "./utils/openai-format-fndef.js"; import { _convertToOpenAITool } from "./utils/tools.js"; export type { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput }; interface TokenUsage { completionTokens?: number; promptTokens?: number; totalTokens?: number; } interface OpenAILLMOutput { tokenUsage: TokenUsage; } // TODO import from SDK when available type OpenAIRoleEnum = "system" | "assistant" | "user" | "function" | "tool"; type OpenAICompletionParam = OpenAIClient.Chat.Completions.ChatCompletionMessageParam; type OpenAIFnDef = OpenAIClient.Chat.ChatCompletionCreateParams.Function; type OpenAIFnCallOption = OpenAIClient.Chat.ChatCompletionFunctionCallOption; function extractGenericMessageCustomRole(message: ChatMessage) { if ( message.role !== "system" && message.role !== "assistant" && message.role !== "user" && message.role !== "function" && message.role !== "tool" ) { console.warn(`Unknown message role: ${message.role}`); } return message.role as OpenAIRoleEnum; } export function messageToOpenAIRole(message: BaseMessage): OpenAIRoleEnum { const type = message._getType(); switch (type) { case "system": return "system"; case "ai": return "assistant"; case "human": return "user"; case "function": return "function"; case "tool": return "tool"; case "generic": { if (!ChatMessage.isInstance(message)) throw new Error("Invalid generic chat message"); return extractGenericMessageCustomRole(message); } default: throw new Error(`Unknown message type: ${type}`); } } function openAIResponseToChatMessage( message: OpenAIClient.Chat.Completions.ChatCompletionMessage, rawResponse: OpenAIClient.Chat.Completions.ChatCompletion, includeRawResponse?: boolean ): BaseMessage { const rawToolCalls: OpenAIToolCall[] | undefined = message.tool_calls as | OpenAIToolCall[] | undefined; switch (message.role) { case "assistant": { const toolCalls = []; const invalidToolCalls = []; for (const rawToolCall of rawToolCalls ?? []) { try { toolCalls.push(parseToolCall(rawToolCall, { returnId: true })); // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e: any) { invalidToolCalls.push(makeInvalidToolCall(rawToolCall, e.message)); } } const additional_kwargs: Record<string, unknown> = { function_call: message.function_call, tool_calls: rawToolCalls, }; if (includeRawResponse !== undefined) { additional_kwargs.__raw_response = rawResponse; } let response_metadata: Record<string, unknown> | undefined; if (rawResponse.system_fingerprint) { response_metadata = { usage: { ...rawResponse.usage }, system_fingerprint: rawResponse.system_fingerprint, }; } if (message.audio) { additional_kwargs.audio = message.audio; } return new AIMessage({ content: message.content || "", tool_calls: toolCalls, invalid_tool_calls: invalidToolCalls, additional_kwargs, response_metadata, id: rawResponse.id, }); } default: return new ChatMessage(message.content || "", message.role ?? "unknown"); } } function _convertDeltaToMessageChunk( // eslint-disable-next-line @typescript-eslint/no-explicit-any delta: Record<string, any>, rawResponse: OpenAIClient.Chat.Completions.ChatCompletionChunk, defaultRole?: OpenAIRoleEnum, includeRawResponse?: boolean ) { const role = delta.role ?? defaultRole; const content = delta.content ?? ""; let additional_kwargs: Record<string, unknown>; if (delta.function_call) { additional_kwargs = { function_call: delta.function_call, }; } else if (delta.tool_calls) { additional_kwargs = { tool_calls: delta.tool_calls, }; } else { additional_kwargs = {}; } if (includeRawResponse) { additional_kwargs.__raw_response = rawResponse; } if (delta.audio) { additional_kwargs.audio = { ...delta.audio, index: rawResponse.choices[0].index, }; } const response_metadata = { usage: { ...rawResponse.usage } }; if (role === "user") { return new HumanMessageChunk({ content, response_metadata }); } else if (role === "assistant") { const toolCallChunks: ToolCallChunk[] = []; if (Array.isArray(delta.tool_calls)) { for (const rawToolCall of delta.tool_calls) { toolCallChunks.push({ name: rawToolCall.function?.name, args: rawToolCall.function?.arguments, id: rawToolCall.id, index: rawToolCall.index, type: "tool_call_chunk", }); } } return new AIMessageChunk({ content, tool_call_chunks: toolCallChunks, additional_kwargs, id: rawResponse.id, response_metadata, }); } else if (role === "system") { return new SystemMessageChunk({ content, response_metadata }); } else if (role === "function") { return new FunctionMessageChunk({ content, additional_kwargs, name: delta.name, response_metadata, }); } else if (role === "tool") { return new ToolMessageChunk({ content, additional_kwargs, tool_call_id: delta.tool_call_id, response_metadata, }); } else { return new ChatMessageChunk({ content, role, response_metadata }); } } // Used in LangSmith, export is important here export function _convertMessagesToOpenAIParams( messages: BaseMessage[] ): OpenAICompletionParam[] { // TODO: Function messages do not support array content, fix cast return messages.flatMap((message) => { // eslint-disable-next-line @typescript-eslint/no-explicit-any const completionParam: Record<string, any> = { role: messageToOpenAIRole(message), content: message.content, }; if (message.name != null) { completionParam.name = message.name; } if (message.additional_kwargs.function_call != null) { completionParam.function_call = message.additional_kwargs.function_call; completionParam.content = null; } if (isAIMessage(message) && !!message.tool_calls?.length) { completionParam.tool_calls = message.tool_calls.map( convertLangChainToolCallToOpenAI ); completionParam.content = null; } else { if (message.additional_kwargs.tool_calls != null) { completionParam.tool_calls = message.additional_kwargs.tool_calls; } if ((message as ToolMessage).tool_call_id != null) { completionParam.tool_call_id = (message as ToolMessage).tool_call_id; } } if ( message.additional_kwargs.audio && typeof message.additional_kwargs.audio === "object" && "id" in message.additional_kwargs.audio ) { const audioMessage = { role: "assistant", audio: { id: message.additional_kwargs.audio.id, }, }; return [completionParam, audioMessage] as OpenAICompletionParam[]; } return completionParam as OpenAICompletionParam; }); } type ChatOpenAIToolType = BindToolsInput | OpenAIClient.ChatCompletionTool; function _convertChatOpenAIToolTypeToOpenAITool( tool: ChatOpenAIToolType, fields?: { strict?: boolean; } ): OpenAIClient.ChatCompletionTool { if (isOpenAITool(tool)) { if (fields?.strict !== undefined) { return { ...tool, function: { ...tool.function, strict: fields.strict, }, }; } return tool; } return _convertToOpenAITool(tool, fields); } // TODO: Use the base structured output options param in next breaking release. export interface ChatOpenAIStructuredOutputMethodOptions< IncludeRaw extends boolean > extends StructuredOutputMethodOptions<IncludeRaw> { /** * strict: If `true` and `method` = "function_calling", model output is * guaranteed to exactly match the schema. If `true`, the input schema * will also be validated according to * https://platform.openai.com/docs/guides/structured-outputs/supported-schemas. * If `false`, input schema will not be validated and model output will not * be validated. * If `undefined`, `strict` argument will not be passed to the model. * * @version 0.2.6 * @note Planned breaking change in version `0.3.0`: * `strict` will default to `true` when `method` is * "function_calling" as of version `0.3.0`. */ strict?: boolean; } export interface ChatOpenAICallOptions extends OpenAICallOptions, BaseFunctionCallOptions { tools?: ChatOpenAIToolType[]; tool_choice?: OpenAIToolChoice; promptIndex?: number; response_format?: ChatOpenAIResponseFormat; seed?: number; /** * Additional options to pass to streamed completions. * If provided takes precedence over "streamUsage" set at initialization time. */ stream_options?: { /** * Whether or not to include token usage in the stream. * If set to `true`, this will include an additional * chunk at the end of the stream with the token usage. */ include_usage: boolean; }; /** * Whether or not to restrict the ability to * call multiple tools in one response. */ parallel_tool_calls?: boolean; /** * If `true`, model output is guaranteed to exactly match the JSON Schema * provided in the tool definition. If `true`, the input schema will also be * validated according to * https://platform.openai.com/docs/guides/structured-outputs/supported-schemas. * * If `false`, input schema will not be validated and model output will not * be validated. * * If `undefined`, `strict` argument will not be passed to the model. * * @version 0.2.6 */ strict?: boolean; /** * Output types that you would like the model to generate for this request. Most * models are capable of generating text, which is the default: * * `["text"]` * * The `gpt-4o-audio-preview` model can also be used to * [generate audio](https://platform.openai.com/docs/guides/audio). To request that * this model generate both text and audio responses, you can use: * * `["text", "audio"]` */ modalities?: Array<OpenAIClient.Chat.ChatCompletionModality>; /** * Parameters for audio output. Required when audio output is requested with * `modalities: ["audio"]`. * [Learn more](https://platform.openai.com/docs/guides/audio). */ audio?: OpenAIClient.Chat.ChatCompletionAudioParam; /** * Static predicted output content, such as the content of a text file that is being regenerated. * [Learn more](https://platform.openai.com/docs/guides/latency-optimization#use-predicted-outputs). */ prediction?: OpenAIClient.ChatCompletionPredictionContent; } export interface ChatOpenAIFields extends Partial<OpenAIChatInput>, Partial<AzureOpenAIInput>, BaseChatModelParams { configuration?: ClientOptions & LegacyOpenAIInput; } /** * OpenAI chat model integration. * * Setup: * Install `@langchain/openai` and set an environment variable named `OPENAI_API_KEY`. * * ```bash * npm install @langchain/openai * export OPENAI_API_KEY="your-api-key" * ``` * * ## [Constructor args](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html#constructor) * * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_openai.ChatOpenAICallOptions.html) * * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc. * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below: * * ```typescript * // When calling `.bind`, call options should be passed via the first argument * const llmWithArgsBound = llm.bind({ * stop: ["\n"], * tools: [...], * }); * * // When calling `.bindTools`, call options should be passed via the second argument * const llmWithTools = llm.bindTools( * [...], * { * tool_choice: "auto", * } * ); * ``` * * ## Examples * * <details open> * <summary><strong>Instantiate</strong></summary> * * ```typescript * import { ChatOpenAI } from '@langchain/openai'; * * const llm = new ChatOpenAI({ * model: "gpt-4o", * temperature: 0, * maxTokens: undefined, * timeout: undefined, * maxRetries: 2, * // apiKey: "...", * // baseUrl: "...", * // organization: "...", * // other params... * }); * ``` * </details> * * <br /> * * <details> * <summary><strong>Invoking</strong></summary> * * ```typescript * const input = `Translate "I love programming" into French.`; * * // Models also accept a list of chat messages or a formatted prompt * const result = await llm.invoke(input); * console.log(result); * ``` * * ```txt * AIMessage { * "id": "chatcmpl-9u4Mpu44CbPjwYFkTbeoZgvzB00Tz", * "content": "J'adore la programmation.", * "response_metadata": { * "tokenUsage": { * "completionTokens": 5, * "promptTokens": 28, * "totalTokens": 33 * }, * "finish_reason": "stop", * "system_fingerprint": "fp_3aa7262c27" * }, * "usage_metadata": { * "input_tokens": 28, * "output_tokens": 5, * "total_tokens": 33 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Streaming Chunks</strong></summary> * * ```typescript * for await (const chunk of await llm.stream(input)) { * console.log(chunk); * } * ``` * * ```txt * AIMessageChunk { * "id": "chatcmpl-9u4NWB7yUeHCKdLr6jP3HpaOYHTqs", * "content": "" * } * AIMessageChunk { * "content": "J" * } * AIMessageChunk { * "content": "'adore" * } * AIMessageChunk { * "content": " la" * } * AIMessageChunk { * "content": " programmation",, * } * AIMessageChunk { * "content": ".",, * } * AIMessageChunk { * "content": "", * "response_metadata": { * "finish_reason": "stop", * "system_fingerprint": "fp_c9aa9c0491" * }, * } * AIMessageChunk { * "content": "", * "usage_metadata": { * "input_tokens": 28, * "output_tokens": 5, * "total_tokens": 33 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Aggregate Streamed Chunks</strong></summary> * * ```typescript * import { AIMessageChunk } from '@langchain/core/messages'; * import { concat } from '@langchain/core/utils/stream'; * * const stream = await llm.stream(input); * let full: AIMessageChunk | undefined; * for await (const chunk of stream) { * full = !full ? chunk : concat(full, chunk); * } * console.log(full); * ``` * * ```txt * AIMessageChunk { * "id": "chatcmpl-9u4PnX6Fy7OmK46DASy0bH6cxn5Xu", * "content": "J'adore la programmation.", * "response_metadata": { * "prompt": 0, * "completion": 0, * "finish_reason": "stop", * }, * "usage_metadata": { * "input_tokens": 28, * "output_tokens": 5, * "total_tokens": 33 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Bind tools</strong></summary> * * ```typescript * import { z } from 'zod'; * * const GetWeather = { * name: "GetWeather", * description: "Get the current weather in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const GetPopulation = { * name: "GetPopulation", * description: "Get the current population in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const llmWithTools = llm.bindTools( * [GetWeather, GetPopulation], * { * // strict: true // enforce tool args schema is respected * } * ); * const aiMsg = await llmWithTools.invoke( * "Which city is hotter today and which is bigger: LA or NY?" * ); * console.log(aiMsg.tool_calls); * ``` * * ```txt * [ * { * name: 'GetWeather', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call', * id: 'call_uPU4FiFzoKAtMxfmPnfQL6UK' * }, * { * name: 'GetWeather', * args: { location: 'New York, NY' }, * type: 'tool_call', * id: 'call_UNkEwuQsHrGYqgDQuH9nPAtX' * }, * { * name: 'GetPopulation', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call', * id: 'call_kL3OXxaq9OjIKqRTpvjaCH14' * }, * { * name: 'GetPopulation', * args: { location: 'New York, NY' }, * type: 'tool_call', * id: 'call_s9KQB1UWj45LLGaEnjz0179q' * } * ] * ``` * </details> * * <br /> * * <details> * <summary><strong>Structured Output</strong></summary> * * ```typescript * import { z } from 'zod'; * * const Joke = z.object({ * setup: z.string().describe("The setup of the joke"), * punchline: z.string().describe("The punchline to the joke"), * rating: z.number().optional().describe("How funny the joke is, from 1 to 10") * }).describe('Joke to tell user.'); * * const structuredLlm = llm.withStructuredOutput(Joke, { * name: "Joke", * strict: true, // Optionally enable OpenAI structured outputs * }); * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats"); * console.log(jokeResult); * ``` * * ```txt * { * setup: 'Why was the cat sitting on the computer?', * punchline: 'Because it wanted to keep an eye on the mouse!', * rating: 7 * } * ``` * </details> * * <br /> * * <details> * <summary><strong>JSON Object Response Format</strong></summary> * * ```typescript * const jsonLlm = llm.bind({ response_format: { type: "json_object" } }); * const jsonLlmAiMsg = await jsonLlm.invoke( * "Return a JSON object with key 'randomInts' and a value of 10 random ints in [0-99]" * ); * console.log(jsonLlmAiMsg.content); * ``` * * ```txt * { * "randomInts": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67] * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Multimodal</strong></summary> * * ```typescript * import { HumanMessage } from '@langchain/core/messages'; * * const imageUrl = "https://example.com/image.jpg"; * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer()); * const base64Image = Buffer.from(imageData).toString('base64'); * * const message = new HumanMessage({ * content: [ * { type: "text", text: "describe the weather in this image" }, * { * type: "image_url", * image_url: { url: `data:image/jpeg;base64,${base64Image}` }, * }, * ] * }); * * const imageDescriptionAiMsg = await llm.invoke([message]); * console.log(imageDescriptionAiMsg.content); * ``` * * ```txt * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions. * ``` * </details> * * <br /> * * <details> * <summary><strong>Usage Metadata</strong></summary> * * ```typescript * const aiMsgForMetadata = await llm.invoke(input); * console.log(aiMsgForMetadata.usage_metadata); * ``` * * ```txt * { input_tokens: 28, output_tokens: 5, total_tokens: 33 } * ``` * </details> * * <br /> * * <details> * <summary><strong>Logprobs</strong></summary> * * ```typescript * const logprobsLlm = new ChatOpenAI({ logprobs: true }); * const aiMsgForLogprobs = await logprobsLlm.invoke(input); * console.log(aiMsgForLogprobs.response_metadata.logprobs); * ``` * * ```txt * { * content: [ * { * token: 'J', * logprob: -0.000050616763, * bytes: [Array], * top_logprobs: [] * }, * { * token: "'", * logprob: -0.01868736, * bytes: [Array], * top_logprobs: [] * }, * { * token: 'ad', * logprob: -0.0000030545007, * bytes: [Array], * top_logprobs: [] * }, * { token: 'ore', logprob: 0, bytes: [Array], top_logprobs: [] }, * { * token: ' la', * logprob: -0.515404, * bytes: [Array], * top_logprobs: [] * }, * { * token: ' programm', * logprob: -0.0000118755715, * bytes: [Array], * top_logprobs: [] * }, * { token: 'ation', logprob: 0, bytes: [Array], top_logprobs: [] }, * { * token: '.', * logprob: -0.0000037697225, * bytes: [Array], * top_logprobs: [] * } * ], * refusal: null * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Response Metadata</strong></summary> * * ```typescript * const aiMsgForResponseMetadata = await llm.invoke(input); * console.log(aiMsgForResponseMetadata.response_metadata); * ``` * * ```txt * { * tokenUsage: { completionTokens: 5, promptTokens: 28, totalTokens: 33 }, * finish_reason: 'stop', * system_fingerprint: 'fp_3aa7262c27' * } * ``` * </details> * * <br /> * * <details> * <summary><strong>JSON Schema Structured Output</strong></summary> * * ```typescript * const llmForJsonSchema = new ChatOpenAI({ * model: "gpt-4o-2024-08-06", * }).withStructuredOutput( * z.object({ * command: z.string().describe("The command to execute"), * expectedOutput: z.string().describe("The expected output of the command"), * options: z * .array(z.string()) * .describe("The options you can pass to the command"), * }), * { * method: "jsonSchema", * strict: true, // Optional when using the `jsonSchema` method * } * ); * * const jsonSchemaRes = await llmForJsonSchema.invoke( * "What is the command to list files in a directory?" * ); * console.log(jsonSchemaRes); * ``` * * ```txt * { * command: 'ls', * expectedOutput: 'A list of files and subdirectories within the specified directory.', * options: [ * '-a: include directory entries whose names begin with a dot (.).', * '-l: use a long listing format.', * '-h: with -l, print sizes in human readable format (e.g., 1K, 234M, 2G).', * '-t: sort by time, newest first.', * '-r: reverse order while sorting.', * '-S: sort by file size, largest first.', * '-R: list subdirectories recursively.' * ] * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Audio Outputs</strong></summary> * * ```typescript * import { ChatOpenAI } from "@langchain/openai"; * * const modelWithAudioOutput = new ChatOpenAI({ * model: "gpt-4o-audio-preview", * // You may also pass these fields to `.bind` as a call argument. * modalities: ["text", "audio"], // Specifies that the model should output audio. * audio: { * voice: "alloy", * format: "wav", * }, * }); * * const audioOutputResult = await modelWithAudioOutput.invoke("Tell me a joke about cats."); * const castMessageContent = audioOutputResult.content[0] as Record<string, any>; * * console.log({ * ...castMessageContent, * data: castMessageContent.data.slice(0, 100) // Sliced for brevity * }) * ``` * * ```txt * { * id: 'audio_67117718c6008190a3afad3e3054b9b6', * data: 'UklGRqYwBgBXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAATElTVBoAAABJTkZPSVNGVA4AAABMYXZmNTguMjkuMTAwAGRhdGFg', * expires_at: 1729201448, * transcript: 'Sure! Why did the cat sit on the computer? Because it wanted to keep an eye on the mouse!' * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Audio Outputs</strong></summary> * * ```typescript * import { ChatOpenAI } from "@langchain/openai"; * * const modelWithAudioOutput = new ChatOpenAI({ * model: "gpt-4o-audio-preview", * // You may also pass these fields to `.bind` as a call argument. * modalities: ["text", "audio"], // Specifies that the model should output audio. * audio: { * voice: "alloy", * format: "wav", * }, * }); * * const audioOutputResult = await modelWithAudioOutput.invoke("Tell me a joke about cats."); * const castAudioContent = audioOutputResult.additional_kwargs.audio as Record<string, any>; * * console.log({ * ...castAudioContent, * data: castAudioContent.data.slice(0, 100) // Sliced for brevity * }) * ``` * * ```txt * { * id: 'audio_67117718c6008190a3afad3e3054b9b6', * data: 'UklGRqYwBgBXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAATElTVBoAAABJTkZPSVNGVA4AAABMYXZmNTguMjkuMTAwAGRhdGFg', * expires_at: 1729201448, * transcript: 'Sure! Why did the cat sit on the computer? Because it wanted to keep an eye on the mouse!' * } * ``` * </details> * * <br /> */ export class ChatOpenAI< CallOptions extends ChatOpenAICallOptions = ChatOpenAICallOptions > extends BaseChatModel<CallOptions, AIMessageChunk> implements OpenAIChatInput, AzureOpenAIInput { static lc_name() { return "ChatOpenAI"; } get callKeys() { return [ ...super.callKeys, "options", "function_call", "functions", "tools", "tool_choice", "promptIndex", "response_format", "seed", ]; } lc_serializable = true; get lc_secrets(): { [key: string]: string } | undefined { return { openAIApiKey: "OPENAI_API_KEY", apiKey: "OPENAI_API_KEY", azureOpenAIApiKey: "AZURE_OPENAI_API_KEY", organization: "OPENAI_ORGANIZATION", }; } get lc_aliases(): Record<string, string> { return { modelName: "model", openAIApiKey: "openai_api_key", apiKey: "openai_api_key", azureOpenAIApiVersion: "azure_openai_api_version", azureOpenAIApiKey: "azure_openai_api_key", azureOpenAIApiInstanceName: "azure_openai_api_instance_name", azureOpenAIApiDeploymentName: "azure_openai_api_deployment_name", }; } temperature = 1; topP = 1; frequencyPenalty = 0; presencePenalty = 0; n = 1; logitBias?: Record<string, number>; modelName = "gpt-3.5-turbo"; model = "gpt-3.5-turbo"; modelKwargs?: OpenAIChatInput["modelKwargs"]; stop?: string[]; stopSequences?: string[]; user?: string; timeout?: number; streaming = false; streamUsage = true; maxTokens?: number; logprobs?: boolean; topLogprobs?: number; openAIApiKey?: string; apiKey?: string; azureOpenAIApiVersion?: string; azureOpenAIApiKey?: string; azureADTokenProvider?: () => Promise<string>; azureOpenAIApiInstanceName?: string; azureOpenAIApiDeploymentName?: string; azureOpenAIBasePath?: string; azureOpenAIEndpoint?: string; organization?: string; __includeRawResponse?: boolean; protected client: OpenAIClient; protected clientConfig: ClientOptions; /** * Whether the model supports the `strict` argument when passing in tools. * If `undefined` the `strict` argument will not be passed to OpenAI. */ supportsStrictToolCalling?: boolean; audio?: OpenAIClient.Chat.ChatCompletionAudioParam; modalities?: Array<OpenAIClient.Chat.ChatCompletionModality>; constructor( fields?: ChatOpenAIFields, /** @deprecated */ configuration?: ClientOptions & LegacyOpenAIInput ) { super(fields ?? {}); this.openAIApiKey = fields?.apiKey ?? fields?.openAIApiKey ?? fields?.configuration?.apiKey ?? getEnvironmentVariable("OPENAI_API_KEY"); this.apiKey = this.openAIApiKey; this.azureOpenAIApiKey = fields?.azureOpenAIApiKey ?? getEnvironmentVariable("AZURE_OPENAI_API_KEY"); this.azureADTokenProvider = fields?.azureADTokenProvider ?? undefined; if (!this.azureOpenAIApiKey && !this.apiKey && !this.azureADTokenProvider) { throw new Error( "OpenAI or Azure OpenAI API key or Token Provider not found" ); } this.azureOpenAIApiInstanceName = fields?.azureOpenAIApiInstanceName ?? getEnvironmentVariable("AZURE_OPENAI_API_INSTANCE_NAME"); this.azureOpenAIApiDeploymentName = fields?.azureOpenAIApiDeploymentName ?? getEnvironmentVariable("AZURE_OPENAI_API_DEPLOYMENT_NAME"); this.azureOpenAIApiVersion = fields?.azureOpenAIApiVersion ?? getEnvironmentVariable("AZURE_OPENAI_API_VERSION"); this.azureOpenAIBasePath = fields?.azureOpenAIBasePath ?? getEnvironmentVariable("AZURE_OPENAI_BASE_PATH"); this.organization = fields?.configuration?.organization ?? getEnvironmentVariable("OPENAI_ORGANIZATION"); this.azureOpenAIEndpoint = fields?.azureOpenAIEndpoint ?? getEnvironmentVariable("AZURE_OPENAI_ENDPOINT"); this.modelName = fields?.model ?? fields?.modelName ?? this.model; this.model = this.modelName; this.modelKwargs = fields?.modelKwargs ?? {}; this.timeout = fields?.timeout; this.temperature = fields?.temperature ?? this.temperature; this.topP = fields?.topP ?? this.topP; this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty; this.presencePenalty = fields?.presencePenalty ?? this.presencePenalty; this.maxTokens = fields?.maxTokens; this.logprobs = fields?.logprobs; this.topLogprobs = fields?.topLogprobs; this.n = fields?.n ?? this.n; this.logitBias = fields?.logitBias; this.stop = fields?.stopSequences ?? fields?.stop; this.stopSequences = this?.stop; this.user = fields?.user; this.__includeRawResponse = fields?.__includeRawResponse; this.audio = fields?.audio; this.modalities = fields?.modalities; if (this.azureOpenAIApiKey || this.azureADTokenProvider) { if ( !this.azureOpenAIApiInstanceName && !this.azureOpenAIBasePath && !this.azureOpenAIEndpoint ) { throw new Error("Azure OpenAI API instance name not found"); } if (!this.azureOpenAIApiDeploymentName && this.azureOpenAIBasePath) { const parts = this.azureOpenAIBasePath.split("/openai/deployments/"); if (parts.length === 2) { const [, deployment] = parts; this.azureOpenAIApiDeploymentName = deployment; } } if (!this.azureOpenAIApiDeploymentName) { throw new Error("Azure OpenAI API deployment name not found"); } if (!this.azureOpenAIApiVersion) { throw new Error("Azure OpenAI API version not found"); } this.apiKey = this.apiKey ?? ""; // Streaming usage is not supported by Azure deployments, so default to false this.streamUsage = false; } this.streaming = fields?.streaming ?? false; this.streamUsage = fields?.streamUsage ?? this.streamUsage; this.clientConfig = { apiKey: this.apiKey, organization: this.organization, baseURL: configuration?.basePath ?? fields?.configuration?.basePath, dangerouslyAllowBrowser: true, defaultHeaders: configuration?.baseOptions?.headers ?? fields?.configuration?.baseOptions?.headers, defaultQuery: configuration?.baseOptions?.params ?? fields?.configuration?.baseOptions?.params, ...configuration, ...fields?.configuration, }; // If `supportsStrictToolCalling` is explicitly set, use that value. // Else leave undefined so it's not passed to OpenAI. if (fields?.supportsStrictToolCalling !== undefined) { this.supportsStrictToolCalling = fields.supportsStrictToolCalling; } } getLsParams(options: this["ParsedCallOptions"]): LangSmithParams { const params = this.invocationParams(options); return { ls_provider: "openai", ls_model_name: this.model, ls_model_type: "chat", ls_temperature: params.temperature ?? undefined, ls_max_tokens: params.max_tokens ?? undefined, ls_stop: options.stop, }; } override bindTools( tools: ChatOpenAIToolType[], kwargs?: Partial<CallOptions> ): Runnable<BaseLanguageModelInput, AIMessageChunk, CallOptions> { let strict: boolean | undefined; if (kwargs?.strict !== undefined) { strict = kwargs.strict; } else if (this.supportsStrictToolCalling !== undefined) { strict = this.supportsStrictToolCalling; } return this.bind({ tools: tools.map((tool) => _convertChatOpenAIToolTypeToOpenAITool(tool, { strict }) ), ...kwargs, } as Partial<CallOptions>); } private createResponseFormat( resFormat?: CallOptions["response_format"] ): | ResponseFormatText | ResponseFormatJSONObject | ResponseFormatJSONSchema | undefined { if ( resFormat && resFormat.type === "json_schema" && resFormat.json_schema.schema && isZodSchema(resFormat.json_schema.schema) ) { return zodResponseFormat( resFormat.json_schema.schema, resFormat.json_schema.name, { description: resFormat.json_schema.description, } ); } return resFormat as | ResponseFormatText | ResponseFormatJSONObject | ResponseFormatJSONSchema | undefined; } /** * Get the parameters used to invoke the model */ invocationParams( options?: this["ParsedCallOptions"], extra?: { streaming?: boolean; } ): Omit<OpenAIClient.Chat.ChatCompletionCreateParams, "messages"> { let strict: boolean | undefined; if (options?.strict !== undefined) { strict = options.strict; } else if (this.supportsStrictToolCalling !== undefined) { strict = this.supportsStrictToolCalling; } let streamOptionsConfig = {}; if (options?.stream_options !== undefined) { streamOptionsConfig = { stream_options: options.stream_options }; } else if (this.streamUsage && (this.streaming || extra?.streaming)) { streamOptionsConfig = { stream_options: { include_usage: true } }; } const params: Omit< OpenAIClient.Chat.ChatCompletionCreateParams, "messages" > = { model: this.model, temperature: this.temperature, top_p: this.topP, frequency_penalty: this.frequencyPenalty, presence_penalty: this.presencePenalty, max_tokens: this.maxTokens === -1 ? undefined : this.maxTokens, logprobs: this.logprobs, top_logprobs: this.topLogprobs, n: this.n, logit_bias: this.logitBias, stop: options?.stop ?? this.stopSequences, user: this.user, // if include_usage is set or streamUsage then stream must be set to true. stream: this.streaming, functions: options?.functions, function_call: options?.function_call, tools: options?.tools?.length ? options.tools.map((tool) => _convertChatOpenAIToolTypeToOpenAITool(tool, { strict }) ) : undefined, tool_choice: formatToOpenAIToolChoice(options?.tool_choice), response_format: this.createResponseFormat(options?.response_format), seed: options?.seed, ...streamOptionsConfig, parallel_tool_calls: options?.parallel_tool_calls, ...(this.audio || options?.audio ? { audio: this.audio || options?.audio } : {}), ...(this.modalities || options?.modalities ? { modalities: this.modalities || options?.modalities } : {}), ...this.modelKwargs, }; if (options?.prediction !== undefined) { params.prediction = options.prediction; } return params; } /** @ignore */ _identifyingParams(): Omit< OpenAIClient.Chat.ChatCompletionCreateParams, "messages" > & { model_name: string; } & ClientOptions { return { model_name: this.model, ...this.invocationParams(), ...this.clientConfig, }; } async *_streamResponseChunks( messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): AsyncGenerator<ChatGenerationChunk> { const messagesMapped: OpenAICompletionParam[] = _convertMessagesToOpenAIParams(messages); const params = { ...this.invocationParams(options, { streaming: true, }), messages: messagesMapped, stream: true as const, }; let defaultRole: OpenAIRoleEnum | undefined; const streamIterable = await this.completionWithRetry(params, options); let usage: OpenAIClient.Completions.CompletionUsage | undefined; for await (const data of streamIterable) { const choice = data?.choices?.[0]; if (data.usage) { usage = data.usage; } if (!choice) { continue; } const { delta } = choice; if (!delta) { continue; } const chunk = _convertDeltaToMessageChunk( delta, data, defaultRole, this.__includeRawResponse ); defaultRole = delta.role ?? defaultRole; const newTokenIndices = { prompt: options.promptIndex ?? 0, completion: choice.index ?? 0, }; if (typeof chunk.content !== "string") { console.log( "[WARNING]: Received non-string content from OpenAI. This is currently not supported." ); continue; } // eslint-disable-next-line @typescript-eslint/no-explicit-any const generationInfo: Record<string, any> = { ...newTokenIndices }; if (choice.finish_reason != null) { generationInfo.finish_reason = choice.finish_reason; // Only include system fingerprint in the last chunk for now // to avoid concatenation issues generationInfo.system_fingerprint = data.system_fingerprint; } if (this.logprobs) { generationInfo.logprobs = choice.logprobs; } const generationChunk = new ChatGenerationChunk({ message: chunk, text: chunk.content, generationInfo, }); yield generationChunk; await runManager?.handleLLMNewToken( generationChunk.text ?? "", newTokenIndices, undefined, undefined, undefined, { chunk: generationChunk } ); } if (usage) { const inputTokenDetails = { ...(usage.prompt_tokens_details?.audio_tokens !== null && { audio: usage.prompt_tokens_details?.audio_tokens, }), ...(usage.prompt_tokens_details?.cached_tokens !== null && { cache_read: usage.prompt_tokens_details?.cached_tokens, }), }; const outputTokenDetails = { ...(usage.completion_tokens_details?.audio_tokens !== null && { audio: usage.completion_tokens_details?.audio_tokens, }), ...(usage.completion_tokens_details?.reasoning_tokens !== null && { reasoning: usage.completion_tokens_details?.reasoning_tokens, }), }; const generationChunk = new ChatGenerationChunk({ message: new AIMessageChunk({ content: "", response_metadata: { usage: { ...usage }, }, usage_metadata: { input_tokens: usage.prompt_tokens, output_tokens: usage.completion_tokens, total_tokens: usage.total_tokens, ...(Object.keys(inputTokenDetails).length > 0 && { input_token_details: inputTokenDetails, }), ...(Object.keys(outputTokenDetails).length > 0 && { output_token_details: outputTokenDetails, }), }, }), text: "", }); yield generationChunk; } if (options.signal?.aborted) { throw new Error("AbortError"); } } /** * Get the identifying parameters for the model * */ identifyingParams() { return this._identifyingParams(); } /** @ignore */ async _generate( messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<ChatResult> { const usageMetadata = {} as UsageMetadata; const params = this.invocationParams(options); const messagesMapped: OpenAICompletionParam[] = _convertMessagesToOpenAIParams(messages); if (params.stream) { const stream = this._streamResponseChunks(messages, options, runManager); const finalChunks: Record<number, ChatGenerationChunk> = {}; for await (const chunk of stream) { chunk.message.response_metadata = { ...chunk.generationInfo, ...chunk.message.response_metadata, }; const index = (chunk.generationInfo as NewTokenIndices)?.completion ?? 0; if (finalChunks[index] === undefined) { finalChunks[index] = chunk; } else { finalChunks[index] = finalChunks[index].concat(chunk); } } const generations = Object.entries(finalChunks) .sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10)) .map(([_, value]) => value); const { functions, function_call } = this.invocationParams(options); // OpenAI does not support token usage report under stream mode, // fallback to estimation. const promptTokenUsage = await this.getEstimatedTokenCountFromPrompt( messages, functions, function_call ); const completionTokenUsage = await this.getNumTokensFromGenerations( generations ); usageMetadata.input_tokens = promptTokenUsage; usageMetadata.output_tokens = completionTokenUsage; usageMetadata.total_tokens = promptTokenUsage + completionTokenUsage; return { generations, llmOutput: { estimatedTokenUsage: { promptTokens: usageMetadata.input_tokens, completionTokens: usageMetadata.output_tokens, totalTokens: usageMetadata.total_tokens, }, }, }; } else { let data; if ( options.response_format && options.response_format.type === "json_schema" ) { data = await this.betaParsedCompletionWithRetry( { ...params, stream: false, messages: messagesMapped, }, { signal: options?.signal, ...options?.options, } ); } else { data = await this.completionWithRetry( { ...params, stream: false, messages: messagesMapped, }, { signal: options?.signal, ...options?.options, } ); } const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, prompt_tokens_details: promptTokensDetails, completion_tokens_details: completionTokensDetails, } = data?.usage ?? {}; if (completionTokens) { usageMetadata.output_tokens = (usageMetadata.output_tokens ?? 0) + completionTokens; } if (promptTokens) { usageMetadata.input_tokens = (usageMetadata.input_tokens ?? 0) + promptTokens; } if (totalTokens) { usageMetadata.total_tokens = (usageMetadata.total_tokens ?? 0) + totalTokens; } if ( promptTokensDetails?.audio_tokens !== null || promptTokensDetails?.cached_tokens !== null ) { usageMetadata.input_token_details = { ...(promptTokensDetails?.audio_tokens !== null && { audio: promptTokensDetails?.audio_tokens, }), ...(promptTokensDetails?.cached_tokens !== null && { cache_read: promptTokensDetails?.cached_tokens, }), }; } if ( completionTokensDetails?.audio_tokens !== null || completionTokensDetails?.reasoning_tokens !== null ) { usageMetadata.output_token_details = { ...(completionTokensDetails?.audio_tokens !== null && { audio: completionTokensDetails?.audio_tokens, }), ...(completionTokensDetails?.reasoning_tokens !== null && { reasoning: completionTokensDetails?.reasoning_tokens, }), }; } const generations: ChatGeneration[] = []; for (const part of data?.choices ?? []) { const text = part.message?.content ?? ""; const generation: ChatGeneration = { text, message: openAIResponseToChatMessage( part.message ?? { role: "assistant" }, data, this.__includeRawResponse ), }; generation.generationInfo = { ...(part.finish_reason ? { finish_reason: part.finish_reason } : {}), ...(part.logprobs ? { logprobs: part.logprobs } : {}), }; if (isAIMessage(generation.message)) { generation.message.usage_metadata = usageMetadata; } // Fields are not serialized unless passed to the constructor // Doing this ensures all fields on the message are serialized generation.message = new AIMessage({ ...generation.message, }); generations.push(generation); } return { generations, llmOutput: { tokenUsage: { promptTokens: usageMetadata.input_tokens, completionTokens: usageMetadata.output_tokens, totalTokens: usageMetadata.total_tokens, }, }, }; } } /** * Estimate the number of tokens a prompt will use. * Modified from: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts */ private async getEstimatedTokenCountFromPrompt( messages: BaseMessage[], functions?: OpenAIFnDef[], function_call?: "none" | "auto" | OpenAIFnCallOption ): Promise<number> { // It appears that if functions are present, the first system message is padded with a trailing newline. This // was inferred by trying lots of combinations of messages and functions and seeing what the token counts were. let tokens = (await this.getNumTokensFromMessages(messages)).totalCount; // If there are functions, add the function definitions as they count towards token usage if (functions && function_call !== "auto") { const promptDefinitions = formatFunctionDefinitions( functions as unknown as FunctionDef[] ); tokens += await this.getNumTokens(promptDefinitions); tokens += 9; // Add nine per completion } // If there's a system message _and_ functions are present, subtract four tokens. I assume this is because // functions typically add a system message, but reuse the first one if it's already there. This offsets // the extra 9 tokens added by the function definitions. if (functions && messages.find((m) => m._getType() === "system")) { tokens -= 4; } // If function_call is 'none', add one token. // If it's a FunctionCall object, add 4 + the number of tokens in the function name. // If it's undefined or 'auto', don't add anything. if (function_call === "none") { tokens += 1; } else if (typeof function_call === "object") { tokens += (await this.getNumTokens(function_call.name)) + 4; } return tokens; } /** * Estimate the number of tokens an array of generations have used. */ private async getNumTokensFromGenerations(generations: ChatGeneration[]) { const generationUsages = await Promise.all( generations.map(async (generation) => { if (generation.message.additional_kwargs?.function_call) { return (await this.getNumTokensFromMessages([generation.message])) .countPerMessage[0]; } else { return await this.getNumTokens(generation.message.content); } }) ); return generationUsages.reduce((a, b) => a + b, 0); } async getNumTokensFromMessages(messages: BaseMessage[]) { let totalCount = 0; let tokensPerMessage = 0; let tokensPerName = 0; // From: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb if (this.model === "gpt-3.5-turbo-0301") { tokensPerMessage = 4; tokensPerName = -1; } else { tokensPerMessage = 3; tokensPerName = 1; } const countPerMessage = await Promise.all( messages.map(async (message) => { const textCount = await this.getNumTokens(message.content); const roleCount = await this.getNumTokens(messageToOpenAIRole(message)); const nameCount = message.name !== undefined ? tokensPerName + (await this.getNumTokens(message.name)) : 0; let count = textCount + tokensPerMessage + roleCount + nameCount; // From: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts messageTokenEstimate const openAIMessage = message; if (openAIMessage._getType() === "function") { count -= 2; } if (openAIMessage.additional_kwargs?.function_call) { count += 3; } if (openAIMessage?.additional_kwargs.function_call?.name) { count += await this.getNumTokens( openAIMessage.additional_kwargs.function_call?.name ); } if (openAIMessage.additional_kwargs.function_call?.arguments) { try { count += await this.getNumTokens( // Remove newlines and spaces JSON.stringify( JSON.parse( openAIMessage.additional_kwargs.function_call?.arguments ) ) ); } catch (error) { console.error( "Error parsing function arguments", error, JSON.stringify(openAIMessage.additional_kwargs.function_call) ); count += await this.getNumTokens( openAIMessage.additional_kwargs.function_call?.arguments ); } } totalCount += count; return count; }) ); totalCount += 3; // every reply is primed with <|start|>assistant<|message|> return { totalCount, countPerMessage }; } /** * Calls the OpenAI API with retry logic in case of failures. * @param request The request to send to the OpenAI API. * @param options Optional configuration for the API call. * @returns The response from the OpenAI API. */ async completionWithRetry( request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions ): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>; async completionWithRetry( request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions ): Promise<OpenAIClient.Chat.Completions.ChatCompletion>; async completionWithRetry( request: | OpenAIClient.Chat.ChatCompletionCreateParamsStreaming | OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions ): Promise< | AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk> | OpenAIClient.Chat.Completions.ChatCompletion > { const requestOptions = this._getClientOptions(options); return this.caller.call(async () => { try { const res = await this.client.chat.completions.create( request, requestOptions ); return res; } catch (e) { const error = wrapOpenAIClientError(e); throw error; } }); } /** * Call the beta chat completions parse endpoint. This should only be called if * response_format is set to "json_object". * @param {OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming} request * @param {OpenAICoreRequestOptions | undefined} options */ async betaParsedCompletionWithRetry( request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions // Avoid relying importing a beta type with no official entrypoint ): Promise<ReturnType<OpenAIClient["beta"]["chat"]["completions"]["parse"]>> { const requestOptions = this._getClientOptions(options); return this.caller.call(async () => { try { const res = await this.client.beta.chat.completions.parse( request, requestOptions ); return res; } catch (e) { const error = wrapOpenAIClientError(e); throw error; } }); } protected _getClientOptions(options: OpenAICoreRequestOptions | undefined) { if (!this.client) { const openAIEndpointConfig: OpenAIEndpointConfig = { azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName, azureOpenAIApiKey: this.azureOpenAIApiKey, azureOpenAIBasePath: this.azureOpenAIBasePath, baseURL: this.clientConfig.baseURL, azureOpenAIEndpoint: this.azureOpenAIEndpoint, }; const endpoint = getEndpoint(openAIEndpointConfig); const params = { ...this.clientConfig, baseURL: endpoint, timeout: this.timeout, maxRetries: 0, }; if (!params.baseURL) { delete params.baseURL; } this.client = new OpenAIClient(params); } const requestOptions = { ...this.clientConfig, ...options, } as OpenAICoreRequestOptions; if (this.azureOpenAIApiKey) { requestOptions.headers = { "api-key": this.azureOpenAIApiKey, ...requestOptions.headers, }; requestOptions.query = { "api-version": this.azureOpenAIApiVersion, ...requestOptions.query, }; } return requestOptions; } _llmType() { return "openai"; } /** @ignore */ _combineLLMOutput(...llmOutputs: OpenAILLMOutput[]): OpenAILLMOutput { return llmOutputs.reduce<{ [key in keyof OpenAILLMOutput]: Required<OpenAILLMOutput[key]>; }>( (acc, llmOutput) => { if (llmOutput && llmOutput.tokenUsage) { acc.tokenUsage.completionTokens += llmOutput.tokenUsage.completionTokens ?? 0; acc.tokenUsage.promptTokens += llmOutput.tokenUsage.promptTokens ?? 0; acc.tokenUsage.totalTokens += llmOutput.tokenUsage.totalTokens ?? 0; } return acc; }, { tokenUsage: { completionTokens: 0, promptTokens: 0, totalTokens: 0, }, } ); } withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( outputSchema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: ChatOpenAIStructuredOutputMethodOptions<false> ): Runnable<BaseLanguageModelInput, RunOutput>; withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( outputSchema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: ChatOpenAIStructuredOutputMethodOptions<true> ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>; withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( outputSchema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: ChatOpenAIStructuredOutputMethodOptions<boolean> ): | Runnable<BaseLanguageModelInput, RunOutput> | Runnable< BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput } > { // eslint-disable-next-line @typescript-eslint/no-explicit-any let schema: z.ZodType<RunOutput> | Record<string, any>; let name; let method; let includeRaw; if (isStructuredOutputMethodParams(outputSchema)) { schema = outputSchema.schema; name = outputSchema.name; method = outputSchema.method; includeRaw = outputSchema.includeRaw; } else { schema = outputSchema; name = config?.name; method = config?.method; includeRaw = config?.includeRaw; } let llm: Runnable<BaseLanguageModelInput>; let outputParser: BaseLLMOutputParser<RunOutput>; if (config?.strict !== undefined && method === "jsonMode") { throw new Error( "Argument `strict` is only supported for `method` = 'function_calling'" ); } if (method === "jsonMode") { llm = this.bind({ response_format: { type: "json_object" }, } as Partial<CallOptions>); if (isZodSchema(schema)) { outputParser = StructuredOutputParser.fromZodSchema(schema); } else { outputParser = new JsonOutputParser<RunOutput>(); } } else if (method === "jsonSchema") { llm = this.bind({ response_format: { type: "json_schema", json_schema: { name: name ?? "extract", description: schema.description, schema, strict: config?.strict, }, }, } as Partial<CallOptions>); if (isZodSchema(schema)) { outputParser = StructuredOutputParser.fromZodSchema(schema); } else { outputParser = new JsonOutputParser<RunOutput>(); } } else { let functionName = name ?? "extract"; // Is function calling if (isZodSchema(schema)) { const asJsonSchema = zodToJsonSchema(schema); llm = this.bind({ tools: [ { type: "function" as const, function: { name: functionName, description: asJsonSchema.description, parameters: asJsonSchema, }, }, ], tool_choice: { type: "function" as const, function: { name: functionName, }, }, // Do not pass `strict` argument to OpenAI if `config.strict` is undefined ...(config?.strict !== undefined ? { strict: config.strict } : {}), } as Partial<CallOptions>); outputParser = new JsonOutputKeyToolsParser({ returnSingle: true, keyName: functionName, zodSchema: schema, }); } else { let openAIFunctionDefinition: FunctionDefinition; if ( typeof schema.name === "string" && typeof schema.parameters === "object" && schema.parameters != null ) { openAIFunctionDefinition = schema as FunctionDefinition; functionName = schema.name; } else { functionName = schema.title ?? functionName; openAIFunctionDefinition = { name: functionName, description: schema.description ?? "", parameters: schema, }; } llm = this.bind({ tools: [ { type: "function" as const, function: openAIFunctionDefinition, }, ], tool_choice: { type: "function" as const, function: { name: functionName, }, }, // Do not pass `strict` argument to OpenAI if `config.strict` is undefined ...(config?.strict !== undefined ? { strict: config.strict } : {}), } as Partial<CallOptions>); outputParser = new JsonOutputKeyToolsParser<RunOutput>({ returnSingle: true, keyName: functionName, }); } } if (!includeRaw) { return llm.pipe(outputParser) as Runnable< BaseLanguageModelInput, RunOutput >; } const parserAssign = RunnablePassthrough.assign({ // eslint-disable-next-line @typescript-eslint/no-explicit-any parsed: (input: any, config) => outputParser.invoke(input.raw, config), }); const parserNone = RunnablePassthrough.assign({ parsed: () => null, }); const parsedWithFallback = parserAssign.withFallbacks({ fallbacks: [parserNone], }); return RunnableSequence.from< BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput } >([ { raw: llm, }, parsedWithFallback, ]); } } function isZodSchema< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( // eslint-disable-next-line @typescript-eslint/no-explicit-any input: z.ZodType<RunOutput> | Record<string, any> ): input is z.ZodType<RunOutput> { // Check for a characteristic method of Zod schemas return typeof (input as z.ZodType<RunOutput>)?.parse === "function"; } function isStructuredOutputMethodParams( x: unknown // eslint-disable-next-line @typescript-eslint/no-explicit-any ): x is StructuredOutputMethodParams<Record<string, any>> { return ( x !== undefined && // eslint-disable-next-line @typescript-eslint/no-explicit-any typeof (x as StructuredOutputMethodParams<Record<string, any>>).schema === "object" ); }
0
lc_public_repos/langchainjs/libs/langchain-openai
lc_public_repos/langchainjs/libs/langchain-openai/src/embeddings.ts
import { type ClientOptions, OpenAI as OpenAIClient } from "openai"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { Embeddings, type EmbeddingsParams } from "@langchain/core/embeddings"; import { chunkArray } from "@langchain/core/utils/chunk_array"; import { AzureOpenAIInput, OpenAICoreRequestOptions, LegacyOpenAIInput, } from "./types.js"; import { getEndpoint, OpenAIEndpointConfig } from "./utils/azure.js"; import { wrapOpenAIClientError } from "./utils/openai.js"; /** * Interface for OpenAIEmbeddings parameters. Extends EmbeddingsParams and * defines additional parameters specific to the OpenAIEmbeddings class. */ export interface OpenAIEmbeddingsParams extends EmbeddingsParams { /** * Model name to use * Alias for `model` */ modelName: string; /** Model name to use */ model: string; /** * The number of dimensions the resulting output embeddings should have. * Only supported in `text-embedding-3` and later models. */ dimensions?: number; /** * Timeout to use when making requests to OpenAI. */ timeout?: number; /** * The maximum number of documents to embed in a single request. This is * limited by the OpenAI API to a maximum of 2048. */ batchSize?: number; /** * Whether to strip new lines from the input text. This is recommended by * OpenAI for older models, but may not be suitable for all use cases. * See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 */ stripNewLines?: boolean; } /** * Class for generating embeddings using the OpenAI API. Extends the * Embeddings class and implements OpenAIEmbeddingsParams and * AzureOpenAIInput. * @example * ```typescript * // Embed a query using OpenAIEmbeddings to generate embeddings for a given text * const model = new OpenAIEmbeddings(); * const res = await model.embedQuery( * "What would be a good company name for a company that makes colorful socks?", * ); * console.log({ res }); * * ``` */ export class OpenAIEmbeddings extends Embeddings implements OpenAIEmbeddingsParams, AzureOpenAIInput { modelName = "text-embedding-ada-002"; model = "text-embedding-ada-002"; batchSize = 512; // TODO: Update to `false` on next minor release (see: https://github.com/langchain-ai/langchainjs/pull/3612) stripNewLines = true; /** * The number of dimensions the resulting output embeddings should have. * Only supported in `text-embedding-3` and later models. */ dimensions?: number; timeout?: number; azureOpenAIApiVersion?: string; azureOpenAIApiKey?: string; azureADTokenProvider?: () => Promise<string>; azureOpenAIApiInstanceName?: string; azureOpenAIApiDeploymentName?: string; azureOpenAIBasePath?: string; organization?: string; protected client: OpenAIClient; protected clientConfig: ClientOptions; constructor( fields?: Partial<OpenAIEmbeddingsParams> & Partial<AzureOpenAIInput> & { verbose?: boolean; /** * The OpenAI API key to use. * Alias for `apiKey`. */ openAIApiKey?: string; /** The OpenAI API key to use. */ apiKey?: string; configuration?: ClientOptions; }, configuration?: ClientOptions & LegacyOpenAIInput ) { const fieldsWithDefaults = { maxConcurrency: 2, ...fields }; super(fieldsWithDefaults); let apiKey = fieldsWithDefaults?.apiKey ?? fieldsWithDefaults?.openAIApiKey ?? getEnvironmentVariable("OPENAI_API_KEY"); const azureApiKey = fieldsWithDefaults?.azureOpenAIApiKey ?? getEnvironmentVariable("AZURE_OPENAI_API_KEY"); this.azureADTokenProvider = fields?.azureADTokenProvider ?? undefined; if (!azureApiKey && !apiKey && !this.azureADTokenProvider) { throw new Error( "OpenAI or Azure OpenAI API key or Token Provider not found" ); } const azureApiInstanceName = fieldsWithDefaults?.azureOpenAIApiInstanceName ?? getEnvironmentVariable("AZURE_OPENAI_API_INSTANCE_NAME"); const azureApiDeploymentName = (fieldsWithDefaults?.azureOpenAIApiEmbeddingsDeploymentName || fieldsWithDefaults?.azureOpenAIApiDeploymentName) ?? (getEnvironmentVariable("AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME") || getEnvironmentVariable("AZURE_OPENAI_API_DEPLOYMENT_NAME")); const azureApiVersion = fieldsWithDefaults?.azureOpenAIApiVersion ?? getEnvironmentVariable("AZURE_OPENAI_API_VERSION"); this.azureOpenAIBasePath = fieldsWithDefaults?.azureOpenAIBasePath ?? getEnvironmentVariable("AZURE_OPENAI_BASE_PATH"); this.organization = fieldsWithDefaults?.configuration?.organization ?? getEnvironmentVariable("OPENAI_ORGANIZATION"); this.modelName = fieldsWithDefaults?.model ?? fieldsWithDefaults?.modelName ?? this.model; this.model = this.modelName; this.batchSize = fieldsWithDefaults?.batchSize ?? (azureApiKey ? 1 : this.batchSize); this.stripNewLines = fieldsWithDefaults?.stripNewLines ?? this.stripNewLines; this.timeout = fieldsWithDefaults?.timeout; this.dimensions = fieldsWithDefaults?.dimensions; this.azureOpenAIApiVersion = azureApiVersion; this.azureOpenAIApiKey = azureApiKey; this.azureOpenAIApiInstanceName = azureApiInstanceName; this.azureOpenAIApiDeploymentName = azureApiDeploymentName; if (this.azureOpenAIApiKey || this.azureADTokenProvider) { if (!this.azureOpenAIApiInstanceName && !this.azureOpenAIBasePath) { throw new Error("Azure OpenAI API instance name not found"); } if (!this.azureOpenAIApiDeploymentName) { throw new Error("Azure OpenAI API deployment name not found"); } if (!this.azureOpenAIApiVersion) { throw new Error("Azure OpenAI API version not found"); } apiKey = apiKey ?? ""; } this.clientConfig = { apiKey, organization: this.organization, baseURL: configuration?.basePath, dangerouslyAllowBrowser: true, defaultHeaders: configuration?.baseOptions?.headers, defaultQuery: configuration?.baseOptions?.params, ...configuration, ...fields?.configuration, }; } /** * Method to generate embeddings for an array of documents. Splits the * documents into batches and makes requests to the OpenAI API to generate * embeddings. * @param texts Array of documents to generate embeddings for. * @returns Promise that resolves to a 2D array of embeddings for each document. */ async embedDocuments(texts: string[]): Promise<number[][]> { const batches = chunkArray( this.stripNewLines ? texts.map((t) => t.replace(/\n/g, " ")) : texts, this.batchSize ); const batchRequests = batches.map((batch) => { const params: OpenAIClient.EmbeddingCreateParams = { model: this.model, input: batch, }; if (this.dimensions) { params.dimensions = this.dimensions; } return this.embeddingWithRetry(params); }); const batchResponses = await Promise.all(batchRequests); const embeddings: number[][] = []; for (let i = 0; i < batchResponses.length; i += 1) { const batch = batches[i]; const { data: batchResponse } = batchResponses[i]; for (let j = 0; j < batch.length; j += 1) { embeddings.push(batchResponse[j].embedding); } } return embeddings; } /** * Method to generate an embedding for a single document. Calls the * embeddingWithRetry method with the document as the input. * @param text Document to generate an embedding for. * @returns Promise that resolves to an embedding for the document. */ async embedQuery(text: string): Promise<number[]> { const params: OpenAIClient.EmbeddingCreateParams = { model: this.model, input: this.stripNewLines ? text.replace(/\n/g, " ") : text, }; if (this.dimensions) { params.dimensions = this.dimensions; } const { data } = await this.embeddingWithRetry(params); return data[0].embedding; } /** * Private method to make a request to the OpenAI API to generate * embeddings. Handles the retry logic and returns the response from the * API. * @param request Request to send to the OpenAI API. * @returns Promise that resolves to the response from the API. */ protected async embeddingWithRetry( request: OpenAIClient.EmbeddingCreateParams ) { if (!this.client) { const openAIEndpointConfig: OpenAIEndpointConfig = { azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName, azureOpenAIApiKey: this.azureOpenAIApiKey, azureOpenAIBasePath: this.azureOpenAIBasePath, baseURL: this.clientConfig.baseURL, }; const endpoint = getEndpoint(openAIEndpointConfig); const params = { ...this.clientConfig, baseURL: endpoint, timeout: this.timeout, maxRetries: 0, }; if (!params.baseURL) { delete params.baseURL; } this.client = new OpenAIClient(params); } const requestOptions: OpenAICoreRequestOptions = {}; if (this.azureOpenAIApiKey) { requestOptions.headers = { "api-key": this.azureOpenAIApiKey, ...requestOptions.headers, }; requestOptions.query = { "api-version": this.azureOpenAIApiVersion, ...requestOptions.query, }; } return this.caller.call(async () => { try { const res = await this.client.embeddings.create( request, requestOptions ); return res; } catch (e) { const error = wrapOpenAIClientError(e); throw error; } }); } }
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/tools/dalle.ts
/* eslint-disable no-param-reassign */ import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { OpenAI as OpenAIClient } from "openai"; import { Tool, ToolParams } from "@langchain/core/tools"; import { MessageContentComplex, MessageContentImageUrl, } from "@langchain/core/messages"; /** * An interface for the Dall-E API Wrapper. */ export interface DallEAPIWrapperParams extends ToolParams { /** * The OpenAI API key * Alias for `apiKey` */ openAIApiKey?: string; /** * The OpenAI API key */ apiKey?: string; /** * The model to use. * Alias for `model` * @params "dall-e-2" | "dall-e-3" * @default "dall-e-3" */ modelName?: string; /** * The model to use. * @params "dall-e-2" | "dall-e-3" * @default "dall-e-3" */ model?: string; /** * The style of the generated images. Must be one of vivid or natural. * Vivid causes the model to lean towards generating hyper-real and dramatic images. * Natural causes the model to produce more natural, less hyper-real looking images. * @default "vivid" */ style?: "natural" | "vivid"; /** * The quality of the image that will be generated. ‘hd’ creates images with finer * details and greater consistency across the image. * @default "standard" */ quality?: "standard" | "hd"; /** * The number of images to generate. * Must be between 1 and 10. * For dall-e-3, only `n: 1` is supported. * @default 1 */ n?: number; /** * The size of the generated images. * Must be one of 256x256, 512x512, or 1024x1024 for DALL·E-2 models. * Must be one of 1024x1024, 1792x1024, or 1024x1792 for DALL·E-3 models. * @default "1024x1024" */ size?: "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792"; /** * The format in which the generated images are returned. * Must be one of "url" or "b64_json". * @default "url" */ dallEResponseFormat?: "url" | "b64_json"; /** * @deprecated Use dallEResponseFormat instead for the Dall-E response type. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any responseFormat?: any; /** * A unique identifier representing your end-user, which will help * OpenAI to monitor and detect abuse. */ user?: string; /** * The organization to use */ organization?: string; /** * The base URL of the OpenAI API. */ baseUrl?: string; } /** * A tool for generating images with Open AIs Dall-E 2 or 3 API. */ export class DallEAPIWrapper extends Tool { static lc_name() { return "DallEAPIWrapper"; } name = "dalle_api_wrapper"; description = "A wrapper around OpenAI DALL-E API. Useful for when you need to generate images from a text description. Input should be an image description."; protected client: OpenAIClient; static readonly toolName = "dalle_api_wrapper"; private model = "dall-e-3"; private style: "natural" | "vivid" = "vivid"; private quality: "standard" | "hd" = "standard"; private n = 1; private size: | "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792" = "1024x1024"; private dallEResponseFormat: "url" | "b64_json" = "url"; private user?: string; constructor(fields?: DallEAPIWrapperParams) { // Shim for new base tool param name if ( fields?.responseFormat !== undefined && ["url", "b64_json"].includes(fields.responseFormat) ) { // eslint-disable-next-line @typescript-eslint/no-explicit-any fields.dallEResponseFormat = fields.responseFormat as any; fields.responseFormat = "content"; } super(fields); const openAIApiKey = fields?.apiKey ?? fields?.openAIApiKey ?? getEnvironmentVariable("OPENAI_API_KEY"); const organization = fields?.organization ?? getEnvironmentVariable("OPENAI_ORGANIZATION"); const clientConfig = { apiKey: openAIApiKey, organization, dangerouslyAllowBrowser: true, baseUrl: fields?.baseUrl, }; this.client = new OpenAIClient(clientConfig); this.model = fields?.model ?? fields?.modelName ?? this.model; this.style = fields?.style ?? this.style; this.quality = fields?.quality ?? this.quality; this.n = fields?.n ?? this.n; this.size = fields?.size ?? this.size; this.dallEResponseFormat = fields?.dallEResponseFormat ?? this.dallEResponseFormat; this.user = fields?.user; } /** * Processes the API response if multiple images are generated. * Returns a list of MessageContentImageUrl objects. If the response * format is `url`, then the `image_url` field will contain the URL. * If it is `b64_json`, then the `image_url` field will contain an object * with a `url` field with the base64 encoded image. * * @param {OpenAIClient.Images.ImagesResponse[]} response The API response * @returns {MessageContentImageUrl[]} */ private processMultipleGeneratedUrls( response: OpenAIClient.Images.ImagesResponse[] ): MessageContentImageUrl[] { if (this.dallEResponseFormat === "url") { return response.flatMap((res) => { const imageUrlContent = res.data .flatMap((item) => { if (!item.url) return []; return { type: "image_url" as const, image_url: item.url, }; }) .filter( (item) => item !== undefined && item.type === "image_url" && typeof item.image_url === "string" && item.image_url !== undefined ); return imageUrlContent; }); } else { return response.flatMap((res) => { const b64Content = res.data .flatMap((item) => { if (!item.b64_json) return []; return { type: "image_url" as const, image_url: { url: item.b64_json, }, }; }) .filter( (item) => item !== undefined && item.type === "image_url" && typeof item.image_url === "object" && "url" in item.image_url && typeof item.image_url.url === "string" && item.image_url.url !== undefined ); return b64Content; }); } } /** @ignore */ async _call(input: string): Promise<string | MessageContentComplex[]> { const generateImageFields = { model: this.model, prompt: input, n: 1, size: this.size, response_format: this.dallEResponseFormat, style: this.style, quality: this.quality, user: this.user, }; if (this.n > 1) { const results = await Promise.all( Array.from({ length: this.n }).map(() => this.client.images.generate(generateImageFields) ) ); return this.processMultipleGeneratedUrls(results); } const response = await this.client.images.generate(generateImageFields); let data = ""; if (this.dallEResponseFormat === "url") { [data] = response.data .map((item) => item.url) .filter((url): url is string => url !== "undefined"); } else { [data] = response.data .map((item) => item.b64_json) .filter((b64_json): b64_json is string => b64_json !== "undefined"); } return data; } }
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/tools/index.ts
export * from "./dalle.js";
0
lc_public_repos/langchainjs/libs/langchain-openai/src/tools
lc_public_repos/langchainjs/libs/langchain-openai/src/tools/tests/dalle.int.test.ts
import { test, expect } from "@jest/globals"; import { DallEAPIWrapper } from "../dalle.js"; test.skip("Dalle can generate images", async () => { const dalle = new DallEAPIWrapper(); const res = await dalle.invoke("A painting of a cat"); expect(res).toBeDefined(); expect(res).toContain("https://"); }); test.skip("Dalle can generate images with base 64 response format", async () => { const dalle = new DallEAPIWrapper({ responseFormat: "b64_json", }); const res = await dalle.invoke("A painting of a cat"); expect(res).toBeDefined(); expect(res).not.toContain("https://"); }); test.skip("Dalle returns multiple image URLs if n > 1", async () => { const dalle = new DallEAPIWrapper({ n: 2, }); const res = await dalle.invoke("A painting of a cat"); expect(res).toBeDefined(); expect(res).toBeInstanceOf(Array); if (!Array.isArray(res)) return; expect(res).toHaveLength(2); // The types for each should be `image_url` with an `image_url` field containing the URL expect(res[0].type).toBe("image_url"); expect(res[1].type).toBe("image_url"); expect(res[0]).toHaveProperty("image_url"); expect(res[1]).toHaveProperty("image_url"); expect(res[0].image_url.startsWith("https://")).toBe(true); expect(res[1].image_url.startsWith("https://")).toBe(true); }); test.skip("Dalle returns multiple base64 image strings if n > 1", async () => { const dalle = new DallEAPIWrapper({ n: 2, dallEResponseFormat: "b64_json", }); const res = await dalle.invoke("A painting of a cat"); expect(res).toBeDefined(); expect(res).toBeInstanceOf(Array); if (!Array.isArray(res)) return; expect(res).toHaveLength(2); // The types for each should be `b64_json` with an `b64_json` field containing the URL expect(res[0].type).toBe("image_url"); expect(res[1].type).toBe("image_url"); expect(res[0]).toHaveProperty("image_url"); expect(res[1]).toHaveProperty("image_url"); expect(res[0].image_url).toHaveProperty("url"); expect(res[1].image_url).toHaveProperty("url"); expect(res[0].image_url.url).toBeDefined(); expect(res[1].image_url.url).toBeDefined(); expect(res[0].image_url.url).not.toBe(""); expect(res[1].image_url.url).not.toBe(""); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/prompts.int.test.ts
import OpenAI from "openai"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { convertPromptToOpenAI } from "../utils/prompts.js"; test("Convert hub prompt to OpenAI payload and invoke", async () => { const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are a world class comedian"], ["human", "Tell me a joke about {topic}"], ]); const formattedPrompt = await prompt.invoke({ topic: "cats", }); const { messages } = convertPromptToOpenAI(formattedPrompt); const openAIClient = new OpenAI(); const openAIResponse = await openAIClient.chat.completions.create({ model: "gpt-4o-mini", messages, }); expect(openAIResponse.choices.length).toBeGreaterThan(0); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/chat_models.standard.int.test.ts
/* eslint-disable no-process-env */ import { test } from "@jest/globals"; import { ChatModelIntegrationTests } from "@langchain/standard-tests"; import { AIMessage, AIMessageChunk } from "@langchain/core/messages"; import { readFileSync } from "fs"; import { join } from "path"; import { concat } from "@langchain/core/utils/stream"; import { BaseLanguageModelInput } from "@langchain/core/language_models/base"; import { ChatOpenAI, ChatOpenAICallOptions } from "../chat_models.js"; const REPO_ROOT_DIR = process.cwd(); class ChatOpenAIStandardIntegrationTests extends ChatModelIntegrationTests< ChatOpenAICallOptions, AIMessageChunk > { constructor() { if (!process.env.OPENAI_API_KEY) { throw new Error( "OPENAI_API_KEY must be set to run standard integration tests." ); } super({ Cls: ChatOpenAI, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, supportsParallelToolCalls: true, constructorArgs: { model: "gpt-3.5-turbo", }, }); } supportedUsageMetadataDetails: { invoke: Array< | "audio_input" | "audio_output" | "reasoning_output" | "cache_read_input" | "cache_creation_input" >; stream: Array< | "audio_input" | "audio_output" | "reasoning_output" | "cache_read_input" | "cache_creation_input" >; } = { invoke: ["cache_read_input", "reasoning_output"], stream: [] }; async invokeWithReasoningOutput(stream: boolean) { const chatModel = new ChatOpenAI({ model: "o1-mini", streamUsage: true, temperature: 1, }); const input = "explain the relationship between the 2008/9 economic crisis and the startup ecosystem in the early 2010s"; return invoke(chatModel, input, stream); } async invokeWithCacheReadInput(stream: boolean = false): Promise<AIMessage> { const readme = readFileSync(join(REPO_ROOT_DIR, "README.md"), "utf-8"); const input = `What's langchain? Here's the langchain README: ${readme} `; const llm = new ChatOpenAI({ modelName: "gpt-4o-mini", streamUsage: true }); await invoke(llm, input, stream); // invoke twice so first invocation is cached return invoke(llm, input, stream); } async testUsageMetadataStreaming() { // ChatOpenAI does not support streaming tokens by // default, so we must pass in a call option to // enable streaming tokens. const callOptions: ChatOpenAI["ParsedCallOptions"] = { stream_options: { include_usage: true, }, }; await super.testUsageMetadataStreaming(callOptions); } async testInvokeMoreComplexTools() { this.skipTestMessage( "testInvokeMoreComplexTools", "ChatOpenAI", "OpenAI does not support tool schemas which contain object with unknown/any parameters." + "\nOpenAI only supports objects in schemas when the parameters are defined." ); } async testParallelToolCalling() { // Override constructor args to use a better model for this test. // I found that GPT 3.5 struggles with parallel tool calling. const constructorArgsCopy = { ...this.constructorArgs }; this.constructorArgs = { ...this.constructorArgs, model: "gpt-4o", }; await super.testParallelToolCalling(); this.constructorArgs = constructorArgsCopy; } } const testClass = new ChatOpenAIStandardIntegrationTests(); test("ChatOpenAIStandardIntegrationTests", async () => { const testResults = await testClass.runTests(); expect(testResults).toBe(true); }); async function invoke( chatModel: ChatOpenAI, input: BaseLanguageModelInput, stream: boolean ): Promise<AIMessage> { if (stream) { let finalChunks: AIMessageChunk | undefined; // Stream the response for a simple "Hello" prompt for await (const chunk of await chatModel.stream(input)) { // Concatenate chunks to get the final result finalChunks = finalChunks ? concat(finalChunks, chunk) : chunk; } return finalChunks as AIMessage; } else { return chatModel.invoke(input); } }
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/chat_models-extended.int.test.ts
/* eslint-disable no-promise-executor-return */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { test, expect, jest } from "@jest/globals"; import { AIMessage, HumanMessage, ToolMessage } from "@langchain/core/messages"; import { concat } from "@langchain/core/utils/stream"; import { InMemoryCache } from "@langchain/core/caches"; import { ChatOpenAI } from "../chat_models.js"; test("Test ChatOpenAI JSON mode", async () => { const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo-1106", maxTokens: 128, }).bind({ response_format: { type: "json_object", }, }); const message = new HumanMessage("Hello!"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([["system", "Only return JSON"], message]); // console.log(JSON.stringify(res)); }); test("Test ChatOpenAI seed", async () => { const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo-1106", maxTokens: 128, temperature: 1, }).bind({ seed: 123454930394983, }); const message = new HumanMessage("Say something random!"); const res = await chat.invoke([message]); const res2 = await chat.invoke([message]); expect(res.response_metadata.system_fingerprint).toBeDefined(); expect(res2.response_metadata.system_fingerprint).toBeDefined(); // These are unfortunately not consistently the same delete res.response_metadata.system_fingerprint; delete res2.response_metadata.system_fingerprint; const resAsObject = { ...res, id: undefined, lc_kwargs: { ...res.lc_kwargs, id: undefined }, }; const res2AsObject = { ...res2, id: undefined, lc_kwargs: { ...res2.lc_kwargs, id: undefined }, }; expect(resAsObject).toEqual(res2AsObject); }); test("Test ChatOpenAI tool calling", async () => { const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo-1106", maxTokens: 128, }).bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, }, ], tool_choice: "auto", }); const res = await chat.invoke([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], ]); // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls?.length).toEqual(3); expect(res.tool_calls?.[0].args).toEqual( JSON.parse(res.additional_kwargs.tool_calls?.[0].function.arguments ?? "{}") ); expect(res.tool_calls?.[1].args).toEqual( JSON.parse(res.additional_kwargs.tool_calls?.[1].function.arguments ?? "{}") ); expect(res.tool_calls?.[2].args).toEqual( JSON.parse(res.additional_kwargs.tool_calls?.[2].function.arguments ?? "{}") ); }); test("Test ChatOpenAI streaming logprobs", async () => { const model = new ChatOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo", streaming: true, logprobs: true, }); const res = await model.invoke("Print hello world."); // console.log(res.response_metadata.logprobs.content); expect(res.response_metadata.logprobs.content.length).toBeGreaterThan(0); }); test("Test ChatOpenAI tool calling with ToolMessages", async () => { function getCurrentWeather(location: string) { if (location.toLowerCase().includes("tokyo")) { return JSON.stringify({ location, temperature: "10", unit: "celsius" }); } else if (location.toLowerCase().includes("san francisco")) { return JSON.stringify({ location, temperature: "72", unit: "fahrenheit", }); } else { return JSON.stringify({ location, temperature: "22", unit: "celsius" }); } } const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo-1106", maxTokens: 128, }).bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, }, ], tool_choice: "auto", }); const res = await chat.invoke([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], ]); // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls?.length).toBeGreaterThan(1); // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const toolMessages = res.additional_kwargs.tool_calls!.map( (toolCall) => new ToolMessage({ tool_call_id: toolCall.id, name: toolCall.function.name, content: getCurrentWeather( JSON.parse(toolCall.function.arguments).location ), }) ); let toolError; try { await chat.invoke([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], res, ]); } catch (e) { toolError = e; } expect(toolError).toBeDefined(); expect((toolError as any)?.lc_error_code).toEqual("INVALID_TOOL_RESULTS"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const finalResponse = await chat.invoke([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], res, ...toolMessages, ]); // console.log(finalResponse); }); test("Test ChatOpenAI tool calling with streaming", async () => { const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo-1106", maxTokens: 256, }).bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, }, ], tool_choice: "auto", }); const stream = await chat.stream([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], ]); let finalChunk; const chunks = []; for await (const chunk of stream) { // console.log(chunk.additional_kwargs.tool_calls); chunks.push(chunk); if (!finalChunk) { finalChunk = chunk; } else { finalChunk = finalChunk.concat(chunk); } } expect(chunks.length).toBeGreaterThan(1); // console.log(finalChunk?.additional_kwargs.tool_calls); expect(finalChunk?.additional_kwargs.tool_calls?.length).toBeGreaterThan(1); }); test("ChatOpenAI in JSON mode can cache generations", async () => { const memoryCache = new InMemoryCache(); const lookupSpy = jest.spyOn(memoryCache, "lookup"); const updateSpy = jest.spyOn(memoryCache, "update"); const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo-1106", temperature: 1, cache: memoryCache, }).bind({ response_format: { type: "json_object", }, }); const message = new HumanMessage( "Respond with a JSON object containing arbitrary fields." ); const res = await chat.invoke([message]); // console.log(res); const res2 = await chat.invoke([message]); // console.log(res2); expect(res).toEqual(res2); expect(lookupSpy).toHaveBeenCalledTimes(2); expect(updateSpy).toHaveBeenCalledTimes(1); lookupSpy.mockRestore(); updateSpy.mockRestore(); }); test("Few shotting with tool calls", async () => { const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo-1106", temperature: 1, }).bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, }, ], tool_choice: "auto", }); const res = await chat.invoke([ new HumanMessage("What is the weather in SF?"), new AIMessage({ content: "", tool_calls: [ { id: "12345", name: "get_current_weather", args: { location: "SF", }, }, ], }), new ToolMessage({ tool_call_id: "12345", content: "It is currently 24 degrees with hail in SF.", }), new AIMessage("It is currently 24 degrees in SF with hail in SF."), new HumanMessage("What did you say the weather was?"), ]); // console.log(res); expect(res.content).toContain("24"); }); test("Test ChatOpenAI with raw response", async () => { const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo-1106", maxTokens: 128, __includeRawResponse: true, }); const message = new HumanMessage("Hello!"); const res = await chat.invoke([message]); expect(res.additional_kwargs.__raw_response).toBeDefined(); }); test("Test ChatOpenAI with raw response", async () => { const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo-1106", maxTokens: 128, __includeRawResponse: true, }); const message = new HumanMessage("Hello!"); const stream = await chat.stream([message]); for await (const chunk of stream) { expect( chunk.additional_kwargs.__raw_response || chunk.usage_metadata ).toBeDefined(); } }); const CACHED_TEXT = `## Components LangChain provides standard, extendable interfaces and external integrations for various components useful for building with LLMs. Some components LangChain implements, some components we rely on third-party integrations for, and others are a mix. ### Chat models <span data-heading-keywords="chat model,chat models"></span> Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text). These are generally newer models (older models are generally \`LLMs\`, see below). Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages. Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input. This gives them the same interface as LLMs (and simpler to use). When a string is passed in as input, it will be converted to a \`HumanMessage\` under the hood before being passed to the underlying model. LangChain does not host any Chat Models, rather we rely on third party integrations. We have some standardized parameters when constructing ChatModels: - \`model\`: the name of the model Chat Models also accept other parameters that are specific to that integration. :::important Some chat models have been fine-tuned for **tool calling** and provide a dedicated API for it. Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling. Please see the [tool calling section](/docs/concepts/#functiontool-calling) for more information. ::: For specifics on how to use chat models, see the [relevant how-to guides here](/docs/how_to/#chat-models). #### Multimodality Some chat models are multimodal, accepting images, audio and even video as inputs. These are still less common, meaning model providers haven't standardized on the "best" way to define the API. Multimodal outputs are even less common. As such, we've kept our multimodal abstractions fairly light weight and plan to further solidify the multimodal APIs and interaction patterns as the field matures. In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format. So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations. For specifics on how to use multimodal models, see the [relevant how-to guides here](/docs/how_to/#multimodal). ### LLMs <span data-heading-keywords="llm,llms"></span> :::caution Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models), even for non-chat use cases. You are probably looking for [the section above instead](/docs/concepts/#chat-models). ::: Language models that takes a string as input and returns a string. These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see above). Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input. This gives them the same interface as [Chat Models](/docs/concepts/#chat-models). When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model. LangChain does not host any LLMs, rather we rely on third party integrations. For specifics on how to use LLMs, see the [relevant how-to guides here](/docs/how_to/#llms). ### Message types Some language models take an array of messages as input and return a message. There are a few different types of messages. All messages have a \`role\`, \`content\`, and \`response_metadata\` property. The \`role\` describes WHO is saying the message. LangChain has different message classes for different roles. The \`content\` property describes the content of the message. This can be a few different things: - A string (most models deal this type of content) - A List of objects (this is used for multi-modal input, where the object contains information about that input type and that input location) #### HumanMessage This represents a message from the user. #### AIMessage This represents a message from the model. In addition to the \`content\` property, these messages also have: **\`response_metadata\`** The \`response_metadata\` property contains additional metadata about the response. The data here is often specific to each model provider. This is where information like log-probs and token usage may be stored. **\`tool_calls\`** These represent a decision from an language model to call a tool. They are included as part of an \`AIMessage\` output. They can be accessed from there with the \`.tool_calls\` property. This property returns a list of \`ToolCall\`s. A \`ToolCall\` is an object with the following arguments: - \`name\`: The name of the tool that should be called. - \`args\`: The arguments to that tool. - \`id\`: The id of that tool call. #### SystemMessage This represents a system message, which tells the model how to behave. Not every model provider supports this. #### ToolMessage This represents the result of a tool call. In addition to \`role\` and \`content\`, this message has: - a \`tool_call_id\` field which conveys the id of the call to the tool that was called to produce this result. - an \`artifact\` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model. #### (Legacy) FunctionMessage This is a legacy message type, corresponding to OpenAI's legacy function-calling API. \`ToolMessage\` should be used instead to correspond to the updated tool-calling API. This represents the result of a function call. In addition to \`role\` and \`content\`, this message has a \`name\` parameter which conveys the name of the function that was called to produce this result. ### Prompt templates <span data-heading-keywords="prompt,prompttemplate,chatprompttemplate"></span> Prompt templates help to translate user input and parameters into instructions for a language model. This can be used to guide a model's response, helping it understand the context and generate relevant and coherent language-based output. Prompt Templates take as input an object, where each key represents a variable in the prompt template to fill in. Prompt Templates output a PromptValue. This PromptValue can be passed to an LLM or a ChatModel, and can also be cast to a string or an array of messages. The reason this PromptValue exists is to make it easy to switch between strings and messages. There are a few different types of prompt templates: #### String PromptTemplates These prompt templates are used to format a single string, and generally are used for simpler inputs. For example, a common way to construct and use a PromptTemplate is as follows: \`\`\`typescript import { PromptTemplate } from "@langchain/core/prompts"; const promptTemplate = PromptTemplate.fromTemplate( "Tell me a joke about {topic}" ); await promptTemplate.invoke({ topic: "cats" }); \`\`\` #### ChatPromptTemplates These prompt templates are used to format an array of messages. These "templates" consist of an array of templates themselves. For example, a common way to construct and use a ChatPromptTemplate is as follows: \`\`\`typescript import { ChatPromptTemplate } from "@langchain/core/prompts"; const promptTemplate = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful assistant"], ["user", "Tell me a joke about {topic}"], ]); await promptTemplate.invoke({ topic: "cats" }); \`\`\` In the above example, this ChatPromptTemplate will construct two messages when called. The first is a system message, that has no variables to format. The second is a HumanMessage, and will be formatted by the \`topic\` variable the user passes in. #### MessagesPlaceholder <span data-heading-keywords="messagesplaceholder"></span> This prompt template is responsible for adding an array of messages in a particular place. In the above ChatPromptTemplate, we saw how we could format two messages, each one a string. But what if we wanted the user to pass in an array of messages that we would slot into a particular spot? This is how you use MessagesPlaceholder. \`\`\`typescript import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { HumanMessage } from "@langchain/core/messages"; const promptTemplate = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful assistant"], new MessagesPlaceholder("msgs"), ]); promptTemplate.invoke({ msgs: [new HumanMessage({ content: "hi!" })] }); \`\`\` This will produce an array of two messages, the first one being a system message, and the second one being the HumanMessage we passed in. If we had passed in 5 messages, then it would have produced 6 messages in total (the system message plus the 5 passed in). This is useful for letting an array of messages be slotted into a particular spot. An alternative way to accomplish the same thing without using the \`MessagesPlaceholder\` class explicitly is: \`\`\`typescript const promptTemplate = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful assistant"], ["placeholder", "{msgs}"], // <-- This is the changed part ]); \`\`\` For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates). ### Example Selectors One common prompting technique for achieving better performance is to include examples as part of the prompt. This gives the language model concrete examples of how it should behave. Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them. Example Selectors are classes responsible for selecting and then formatting examples into prompts. For specifics on how to use example selectors, see the [relevant how-to guides here](/docs/how_to/#example-selectors). ### Output parsers <span data-heading-keywords="output parser"></span> :::note The information here refers to parsers that take a text output from a model try to parse it into a more structured representation. More and more models are supporting function (or tool) calling, which handles this automatically. It is recommended to use function/tool calling rather than output parsing. See documentation for that [here](/docs/concepts/#function-tool-calling). ::: Responsible for taking the output of a model and transforming it to a more suitable format for downstream tasks. Useful when you are using LLMs to generate structured data, or to normalize output from chat models and LLMs. There are two main methods an output parser must implement: - "Get format instructions": A method which returns a string containing instructions for how the output of a language model should be formatted. - "Parse": A method which takes in a string (assumed to be the response from a language model) and parses it into some structure. And then one optional one: - "Parse with prompt": A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to be the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Output parsers accept a string or \`BaseMessage\` as input and can return an arbitrary type. LangChain has many different types of output parsers. This is a list of output parsers LangChain supports. The table below has various pieces of information: **Name**: The name of the output parser **Supports Streaming**: Whether the output parser supports streaming. **Input Type**: Expected input type. Most output parsers work on both strings and messages, but some (like OpenAI Functions) need a message with specific arguments. **Output Type**: The output type of the object returned by the parser. **Description**: Our commentary on this output parser and when to use it. The current date is ${new Date().toISOString()}`; test.skip("system prompt caching", async () => { const model = new ChatOpenAI({ model: "gpt-4o-mini-2024-07-18", }); const date = new Date().toISOString(); const messages = [ { role: "system", content: `You are a pirate. Always respond in pirate dialect. The current date is ${date}.\nUse the following as context when answering questions: ${CACHED_TEXT}`, }, { role: "user", content: "What types of messages are supported in LangChain?", }, ]; const res = await model.invoke(messages); expect(res.response_metadata?.usage.prompt_tokens_details.cached_tokens).toBe( 0 ); await new Promise((resolve) => setTimeout(resolve, 5000)); const res2 = await model.invoke(messages); expect( res2.response_metadata?.usage.prompt_tokens_details.cached_tokens ).toBeGreaterThan(0); let aggregate; for await (const chunk of await model.stream(messages)) { aggregate = aggregate ? concat(aggregate, chunk) : chunk; } expect( aggregate?.response_metadata?.usage.prompt_tokens_details.cached_tokens ).toBeGreaterThan(0); }); test("predicted output", async () => { const model = new ChatOpenAI({ model: "gpt-4o-mini", }); const code = ` /// <summary> /// Represents a user with a first name, last name, and username. /// </summary> public class User { /// <summary> /// Gets or sets the user's first name. /// </summary> public string FirstName { get; set; } /// <summary> /// Gets or sets the user's last name. /// </summary> public string LastName { get; set; } /// <summary> /// Gets or sets the user's username. /// </summary> public string Username { get; set; } } `; const res = await model.invoke( [ { role: "user", content: "Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.", }, { role: "user", content: code, }, ], { prediction: { type: "content", content: code, }, } ); expect( typeof res.response_metadata?.usage?.completion_tokens_details .accepted_prediction_tokens ).toBe("number"); expect( typeof res.response_metadata?.usage?.completion_tokens_details .rejected_prediction_tokens ).toBe("number"); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/llms.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { LLMResult } from "@langchain/core/outputs"; import { StringPromptValue } from "@langchain/core/prompt_values"; import { CallbackManager } from "@langchain/core/callbacks/manager"; import { NewTokenIndices } from "@langchain/core/callbacks/base"; import { OpenAIChat } from "../legacy.js"; import { OpenAI } from "../llms.js"; // Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND; test("Test OpenAI", async () => { const model = new OpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Print hello world"); // console.log({ res }); }); test("Test OpenAI with stop", async () => { const model = new OpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.call("Print hello world", ["world"]); // console.log({ res }); }); test("Test OpenAI with stop in object", async () => { const model = new OpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Print hello world", { stop: ["world"] }); // console.log({ res }); }); test("Test OpenAI with timeout in call options", async () => { const model = new OpenAI({ maxTokens: 5, maxRetries: 0, modelName: "gpt-3.5-turbo-instruct", }); await expect(() => model.invoke("Print hello world", { timeout: 10, }) ).rejects.toThrow(); }, 5000); test("Test OpenAI with timeout in call options and node adapter", async () => { const model = new OpenAI({ maxTokens: 5, maxRetries: 0, modelName: "gpt-3.5-turbo-instruct", }); await expect(() => model.invoke("Print hello world", { timeout: 10, }) ).rejects.toThrow(); }, 5000); test("Test OpenAI with signal in call options", async () => { const model = new OpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); const controller = new AbortController(); await expect(() => { const ret = model.invoke("Print hello world", { signal: controller.signal, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); test("Test OpenAI with signal in call options and node adapter", async () => { const model = new OpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); const controller = new AbortController(); await expect(() => { const ret = model.invoke("Print hello world", { signal: controller.signal, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); test("Test OpenAI with concurrency == 1", async () => { const model = new OpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", maxConcurrency: 1, }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await Promise.all([ model.invoke("Print hello world"), model.invoke("Print hello world"), ]); // console.log({ res }); }); test("Test OpenAI with maxTokens -1", async () => { const model = new OpenAI({ maxTokens: -1, modelName: "gpt-3.5-turbo-instruct", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.call("Print hello world", ["world"]); // console.log({ res }); }); test("Test OpenAI with chat model returns OpenAIChat", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo" }); expect(model).toBeInstanceOf(OpenAIChat); const res = await model.invoke("Print hello world"); // console.log({ res }); expect(typeof res).toBe("string"); }); test("Test OpenAI with instruct model returns OpenAI", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); expect(model).toBeInstanceOf(OpenAI); const res = await model.invoke("Print hello world"); // console.log({ res }); expect(typeof res).toBe("string"); }); test("Test OpenAI with versioned instruct model returns OpenAI", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct-0914" }); expect(model).toBeInstanceOf(OpenAI); const res = await model.invoke("Print hello world"); // console.log({ res }); expect(typeof res).toBe("string"); }); test("Test ChatOpenAI tokenUsage", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let tokenUsage = { completionTokens: 0, promptTokens: 0, totalTokens: 0, }; const model = new OpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { tokenUsage = output.llmOutput?.tokenUsage; }, }), }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Hello"); // console.log({ res }); expect(tokenUsage.promptTokens).toBe(1); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }); test("Test OpenAI in streaming mode", async () => { let nrNewTokens = 0; let streamedCompletion = ""; const model = new OpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", streaming: true, callbacks: CallbackManager.fromHandlers({ async handleLLMNewToken(token: string) { nrNewTokens += 1; streamedCompletion += token; }, }), }); const res = await model.invoke("Print hello world"); // console.log({ res }); expect(nrNewTokens > 0).toBe(true); expect(res).toBe(streamedCompletion); }); test("Test OpenAI in streaming mode with multiple prompts", async () => { let nrNewTokens = 0; const completions = [ ["", ""], ["", ""], ]; const model = new OpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", streaming: true, n: 2, callbacks: CallbackManager.fromHandlers({ async handleLLMNewToken(token: string, idx: NewTokenIndices) { nrNewTokens += 1; completions[idx.prompt][idx.completion] += token; }, }), }); const res = await model.generate(["Print hello world", "print hello sea"]); // console.log( // res.generations, // res.generations.map((g) => g[0].generationInfo) // ); expect(nrNewTokens > 0).toBe(true); expect(res.generations.length).toBe(2); expect(res.generations.map((g) => g.map((gg) => gg.text))).toEqual( completions ); }); test("Test OpenAIChat in streaming mode with multiple prompts", async () => { let nrNewTokens = 0; const completions = [[""], [""]]; const model = new OpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo", streaming: true, n: 1, callbacks: CallbackManager.fromHandlers({ async handleLLMNewToken(token: string, idx: NewTokenIndices) { nrNewTokens += 1; completions[idx.prompt][idx.completion] += token; }, }), }); const res = await model.generate(["Print hello world", "print hello sea"]); // console.log( // res.generations, // res.generations.map((g) => g[0].generationInfo) // ); expect(nrNewTokens > 0).toBe(true); expect(res.generations.length).toBe(2); expect(res.generations.map((g) => g.map((gg) => gg.text))).toEqual( completions ); }); test("Test OpenAI prompt value", async () => { const model = new OpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); const res = await model.generatePrompt([ new StringPromptValue("Print hello world"), ]); expect(res.generations.length).toBe(1); for (const generation of res.generations) { expect(generation.length).toBe(1); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for (const g of generation) { // console.log(g.text); } } // console.log({ res }); }); test("Test OpenAI stream method", async () => { const model = new OpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo-instruct", }); const stream = await model.stream("Print hello world."); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); }); test("Test OpenAI stream method with abort", async () => { await expect(async () => { const model = new OpenAI({ maxTokens: 250, maxRetries: 0, modelName: "gpt-3.5-turbo-instruct", }); const stream = await model.stream( "How is your day going? Be extremely verbose.", { signal: AbortSignal.timeout(1000), } ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); } }).rejects.toThrow(); }); test("Test OpenAI stream method with early break", async () => { const model = new OpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo-instruct", }); const stream = await model.stream( "How is your day going? Be extremely verbose." ); let i = 0; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); i += 1; if (i > 5) { break; } } });
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/chat_models-vision.int.test.ts
import { test } from "@jest/globals"; import { HumanMessage } from "@langchain/core/messages"; import * as fs from "node:fs/promises"; import { fileURLToPath } from "node:url"; import * as path from "node:path"; import { ChatOpenAI } from "../chat_models.js"; test("Test ChatOpenAI with a file", async () => { const __filename = fileURLToPath(import.meta.url); const __dirname = path.dirname(__filename); const imageData = await fs.readFile(path.join(__dirname, "/data/hotdog.jpg")); const chat = new ChatOpenAI({ modelName: "gpt-4-vision-preview", maxTokens: 1024, }); const message = new HumanMessage({ content: [ { type: "text", text: "What's in this image?", }, { type: "image_url", image_url: { url: `data:image/jpeg;base64,${imageData.toString("base64")}`, }, }, ], }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([message]); // console.log({ res }); }); test("Test ChatOpenAI with a URL", async () => { const chat = new ChatOpenAI({ modelName: "gpt-4-vision-preview", maxTokens: 1024, }); const message = new HumanMessage({ content: [ { type: "text", text: "What does this image say?", }, { type: "image_url", image_url: "https://www.freecodecamp.org/news/content/images/2023/05/Screenshot-2023-05-29-at-5.40.38-PM.png", }, ], }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([message]); // console.log({ res }); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/chat_models.standard.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelUnitTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { ChatOpenAI, ChatOpenAICallOptions } from "../chat_models.js"; class ChatOpenAIStandardUnitTests extends ChatModelUnitTests< ChatOpenAICallOptions, AIMessageChunk > { constructor() { super({ Cls: ChatOpenAI, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: {}, }); // This must be set so method like `.bindTools` or `.withStructuredOutput` // which we call after instantiating the model will work. // (constructor will throw if API key is not set) process.env.OPENAI_API_KEY = "test"; } testChatModelInitApiKey() { // Unset the API key env var here so this test can properly check // the API key class arg. process.env.OPENAI_API_KEY = ""; super.testChatModelInitApiKey(); // Re-set the API key env var here so other tests can run properly. process.env.OPENAI_API_KEY = "test"; } } const testClass = new ChatOpenAIStandardUnitTests(); test("ChatOpenAIStandardUnitTests", () => { const testResults = testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/chat_models.int.test.ts
/* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { test, jest, expect } from "@jest/globals"; import { AIMessageChunk, BaseMessage, ChatMessage, HumanMessage, SystemMessage, } from "@langchain/core/messages"; import { ChatGeneration, LLMResult } from "@langchain/core/outputs"; import { ChatPromptValue } from "@langchain/core/prompt_values"; import { PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts"; import { CallbackManager } from "@langchain/core/callbacks/manager"; import { NewTokenIndices } from "@langchain/core/callbacks/base"; import { InMemoryCache } from "@langchain/core/caches"; import { concat } from "@langchain/core/utils/stream"; import { ChatOpenAI } from "../chat_models.js"; // Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND; test("Test ChatOpenAI Generate", async () => { const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, n: 2, }); const message = new HumanMessage("Hello!"); const res = await chat.generate([[message], [message]]); expect(res.generations.length).toBe(2); for (const generation of res.generations) { expect(generation.length).toBe(2); for (const message of generation) { // console.log(message.text); expect(typeof message.text).toBe("string"); } } // console.log({ res }); }); test("Test ChatOpenAI invoke fails with proper error", async () => { const chat = new ChatOpenAI({ model: "gpt-4o-mini", maxTokens: 10, n: 2, apiKey: "bad", }); const message = new HumanMessage("Hello!"); let authError; try { await chat.invoke([message]); } catch (e) { authError = e; } expect(authError).toBeDefined(); expect((authError as any)?.lc_error_code).toEqual("MODEL_AUTHENTICATION"); }); test("Test ChatOpenAI invoke to unknown model fails with proper error", async () => { const chat = new ChatOpenAI({ model: "badbadbad", maxTokens: 10, n: 2, }); const message = new HumanMessage("Hello!"); let authError; try { await chat.invoke([message]); } catch (e) { authError = e; } expect(authError).toBeDefined(); expect((authError as any)?.lc_error_code).toEqual("MODEL_NOT_FOUND"); }); test("Test ChatOpenAI Generate throws when one of the calls fails", async () => { const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, n: 2, }); const message = new HumanMessage("Hello!"); await expect(() => chat.generate([[message], [message]], { signal: AbortSignal.timeout(10), }) ).rejects.toThrow(); }); test("Test ChatOpenAI tokenUsage", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let tokenUsage = { completionTokens: 0, promptTokens: 0, totalTokens: 0, }; const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { tokenUsage = output.llmOutput?.tokenUsage; }, }), }); const message = new HumanMessage("Hello"); await model.invoke([message]); expect(tokenUsage.promptTokens).toBeGreaterThan(0); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }); test("Test ChatOpenAI tokenUsage with a batch", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let tokenUsage = { completionTokens: 0, promptTokens: 0, totalTokens: 0, }; const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-3.5-turbo", callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { tokenUsage = output.llmOutput?.tokenUsage; }, }), }); await model.generate([ [new HumanMessage("Hello")], [new HumanMessage("Hi")], ]); expect(tokenUsage.promptTokens).toBeGreaterThan(0); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }); test("Test ChatOpenAI in streaming mode", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let nrNewTokens = 0; let streamedCompletion = ""; const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: true, maxTokens: 10, callbacks: [ { async handleLLMNewToken(token: string) { nrNewTokens += 1; streamedCompletion += token; }, }, ], }); const message = new HumanMessage("Hello!"); const result = await model.invoke([message]); // console.log(result); expect(nrNewTokens > 0).toBe(true); expect(result.content).toBe(streamedCompletion); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }, 10000); test("Test ChatOpenAI in streaming mode with n > 1 and multiple prompts", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let nrNewTokens = 0; const streamedCompletions = [ ["", ""], ["", ""], ]; const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: true, maxTokens: 10, n: 2, callbacks: [ { async handleLLMNewToken(token: string, idx: NewTokenIndices) { nrNewTokens += 1; streamedCompletions[idx.prompt][idx.completion] += token; }, }, ], }); const message1 = new HumanMessage("Hello!"); const message2 = new HumanMessage("Bye!"); const result = await model.generate([[message1], [message2]]); // console.log(result.generations); expect(nrNewTokens > 0).toBe(true); expect(result.generations.map((g) => g.map((gg) => gg.text))).toEqual( streamedCompletions ); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }, 10000); test("Test ChatOpenAI prompt value", async () => { const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, n: 2, }); const message = new HumanMessage("Hello!"); const res = await chat.generatePrompt([new ChatPromptValue([message])]); expect(res.generations.length).toBe(1); for (const generation of res.generations) { expect(generation.length).toBe(2); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for (const g of generation) { // console.log(g.text); } } // console.log({ res }); }); test("OpenAI Chat, docs, prompt templates", async () => { const chat = new ChatOpenAI({ temperature: 0, maxTokens: 10 }); const systemPrompt = PromptTemplate.fromTemplate( "You are a helpful assistant that translates {input_language} to {output_language}." ); const chatPrompt = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(systemPrompt), HumanMessagePromptTemplate.fromTemplate("{text}"), ]); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ input_language: "English", output_language: "French", text: "I love programming.", }), ]); // console.log(responseA.generations); }, 5000); test("Test OpenAI with stop", async () => { const model = new ChatOpenAI({ maxTokens: 5 }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke([new HumanMessage("Print hello world")], { stop: ["world"], }); // console.log({ res }); }); test("Test OpenAI with stop in object", async () => { const model = new ChatOpenAI({ maxTokens: 5 }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke([new HumanMessage("Print hello world")], { stop: ["world"], }); // console.log({ res }); }); test("Test OpenAI with timeout in call options", async () => { const model = new ChatOpenAI({ maxTokens: 5, maxRetries: 0 }); await expect(() => model.invoke([new HumanMessage("Print hello world")], { options: { timeout: 10 }, }) ).rejects.toThrow(); }, 5000); test("Test OpenAI with timeout in call options and node adapter", async () => { const model = new ChatOpenAI({ maxTokens: 5, maxRetries: 0 }); await expect(() => model.invoke([new HumanMessage("Print hello world")], { options: { timeout: 10 }, }) ).rejects.toThrow(); }, 5000); test("Test OpenAI with signal in call options", async () => { const model = new ChatOpenAI({ maxTokens: 5 }); const controller = new AbortController(); await expect(() => { const ret = model.invoke([new HumanMessage("Print hello world")], { options: { signal: controller.signal }, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); test("Test OpenAI with signal in call options and node adapter", async () => { const model = new ChatOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); const controller = new AbortController(); await expect(() => { const ret = model.invoke([new HumanMessage("Print hello world")], { options: { signal: controller.signal }, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); function createSystemChatMessage(text: string, name?: string) { const msg = new SystemMessage(text); msg.name = name; return msg; } function createSampleMessages(): BaseMessage[] { // same example as in https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb return [ createSystemChatMessage( "You are a helpful, pattern-following assistant that translates corporate jargon into plain English." ), createSystemChatMessage( "New synergies will help drive top-line growth.", "example_user" ), createSystemChatMessage( "Things working well together will increase revenue.", "example_assistant" ), createSystemChatMessage( "Let's circle back when we have more bandwidth to touch base on opportunities for increased leverage.", "example_user" ), createSystemChatMessage( "Let's talk later when we're less busy about how to do better.", "example_assistant" ), new HumanMessage( "This late pivot means we don't have time to boil the ocean for the client deliverable." ), ]; } test("getNumTokensFromMessages gpt-3.5-turbo-0301 model for sample input", async () => { const messages: BaseMessage[] = createSampleMessages(); const chat = new ChatOpenAI({ openAIApiKey: "dummy", modelName: "gpt-3.5-turbo-0301", }); const { totalCount } = await chat.getNumTokensFromMessages(messages); expect(totalCount).toBe(127); }); test("getNumTokensFromMessages gpt-4-0314 model for sample input", async () => { const messages: BaseMessage[] = createSampleMessages(); const chat = new ChatOpenAI({ openAIApiKey: "dummy", modelName: "gpt-4-0314", }); const { totalCount } = await chat.getNumTokensFromMessages(messages); expect(totalCount).toBe(129); }); test("Test OpenAI with specific roles in ChatMessage", async () => { const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10 }); const system_message = new ChatMessage( "You are to chat with a user.", "system" ); const user_message = new ChatMessage("Hello!", "user"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([system_message, user_message]); // console.log({ res }); }); test("Test ChatOpenAI stream method", async () => { const model = new ChatOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo" }); const stream = await model.stream("Print hello world."); const chunks = []; for await (const chunk of stream) { // console.log(chunk); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); }); test("Test ChatOpenAI stream method with abort", async () => { await expect(async () => { const model = new ChatOpenAI({ maxTokens: 100, modelName: "gpt-3.5-turbo", }); const stream = await model.stream( "How is your day going? Be extremely verbose.", { signal: AbortSignal.timeout(500), } ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); } }).rejects.toThrow(); }); test("Test ChatOpenAI stream method with early break", async () => { const model = new ChatOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo" }); const stream = await model.stream( "How is your day going? Be extremely verbose." ); let i = 0; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); i += 1; if (i > 10) { break; } } }); test("Test ChatOpenAI stream method, timeout error thrown from SDK", async () => { await expect(async () => { const model = new ChatOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo", timeout: 1, maxRetries: 0, }); const stream = await model.stream( "How is your day going? Be extremely verbose." ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); } }).rejects.toThrow(); }); test("Function calling with streaming", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let finalResult: BaseMessage | undefined; const modelForFunctionCalling = new ChatOpenAI({ modelName: "gpt-3.5-turbo", temperature: 0, callbacks: [ { handleLLMEnd(output: LLMResult) { finalResult = (output.generations[0][0] as ChatGeneration).message; }, }, ], }); const stream = await modelForFunctionCalling.stream( "What is the weather in New York?", { functions: [ { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, ], function_call: { name: "get_current_weather", }, } ); const chunks = []; let streamedOutput; for await (const chunk of stream) { chunks.push(chunk); if (!streamedOutput) { streamedOutput = chunk; } else if (chunk) { streamedOutput = streamedOutput.concat(chunk); } } expect(finalResult).toEqual(streamedOutput); expect(chunks.length).toBeGreaterThan(1); expect(finalResult?.additional_kwargs?.function_call?.name).toBe( "get_current_weather" ); // console.log( // JSON.parse(finalResult?.additional_kwargs?.function_call?.arguments ?? "") // .location // ); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }); test("ChatOpenAI can cache generations", async () => { const memoryCache = new InMemoryCache(); const lookupSpy = jest.spyOn(memoryCache, "lookup"); const updateSpy = jest.spyOn(memoryCache, "update"); const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, n: 2, cache: memoryCache, }); const message = new HumanMessage("Hello"); const res = await chat.generate([[message], [message]]); expect(res.generations.length).toBe(2); expect(lookupSpy).toHaveBeenCalledTimes(2); expect(updateSpy).toHaveBeenCalledTimes(2); lookupSpy.mockRestore(); updateSpy.mockRestore(); }); test("ChatOpenAI can write and read cached generations", async () => { const memoryCache = new InMemoryCache(); const lookupSpy = jest.spyOn(memoryCache, "lookup"); const updateSpy = jest.spyOn(memoryCache, "update"); const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 100, n: 1, cache: memoryCache, }); const generateUncachedSpy = jest.spyOn(chat, "_generateUncached"); const messages = [ [ new HumanMessage("what color is the sky?"), new HumanMessage("what color is the ocean?"), ], [new HumanMessage("hello")], ]; const response1 = await chat.generate(messages); expect(generateUncachedSpy).toHaveBeenCalledTimes(1); generateUncachedSpy.mockRestore(); const response2 = await chat.generate(messages); expect(generateUncachedSpy).toHaveBeenCalledTimes(0); // Request should be cached, no need to generate. generateUncachedSpy.mockRestore(); expect(response1.generations.length).toBe(2); expect(response2.generations).toEqual(response1.generations); expect(lookupSpy).toHaveBeenCalledTimes(4); expect(updateSpy).toHaveBeenCalledTimes(2); lookupSpy.mockRestore(); updateSpy.mockRestore(); }); test("ChatOpenAI should not reuse cache if function call args have changed", async () => { const memoryCache = new InMemoryCache(); const lookupSpy = jest.spyOn(memoryCache, "lookup"); const updateSpy = jest.spyOn(memoryCache, "update"); const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 100, n: 1, cache: memoryCache, }); const generateUncachedSpy = jest.spyOn(chat, "_generateUncached"); const messages = [ [ new HumanMessage("what color is the sky?"), new HumanMessage("what color is the ocean?"), ], [new HumanMessage("hello")], ]; const response1 = await chat.generate(messages); expect(generateUncachedSpy).toHaveBeenCalledTimes(1); generateUncachedSpy.mockRestore(); const response2 = await chat.generate(messages, { functions: [ { name: "extractor", description: "Extract fields from the input", parameters: { type: "object", properties: { tone: { type: "string", description: "the tone of the input", }, }, required: ["tone"], }, }, ], function_call: { name: "extractor", }, }); expect(generateUncachedSpy).toHaveBeenCalledTimes(0); // Request should not be cached since it's being called with different function call args expect(response1.generations.length).toBe(2); expect( (response2.generations[0][0] as ChatGeneration).message.additional_kwargs .function_call?.name ?? "" ).toEqual("extractor"); const response3 = await chat.generate(messages, { functions: [ { name: "extractor", description: "Extract fields from the input", parameters: { type: "object", properties: { tone: { type: "string", description: "the tone of the input", }, }, required: ["tone"], }, }, ], function_call: { name: "extractor", }, }); expect(response2.generations).toEqual(response3.generations); expect(lookupSpy).toHaveBeenCalledTimes(6); expect(updateSpy).toHaveBeenCalledTimes(4); lookupSpy.mockRestore(); updateSpy.mockRestore(); }); test("Test ChatOpenAI token usage reporting for streaming function calls", async () => { const humanMessage = "What a beautiful day!"; const extractionFunctionSchema = { name: "extractor", description: "Extracts fields from the input.", parameters: { type: "object", properties: { tone: { type: "string", enum: ["positive", "negative"], description: "The overall tone of the input", }, word_count: { type: "number", description: "The number of words in the input", }, chat_response: { type: "string", description: "A response to the human's input", }, }, required: ["tone", "word_count", "chat_response"], }, }; const callOptions = { seed: 42, functions: [extractionFunctionSchema], function_call: { name: "extractor" }, }; const constructorArgs = { model: "gpt-3.5-turbo", temperature: 0, }; const streamingModel = new ChatOpenAI({ ...constructorArgs, streaming: true, }).bind(callOptions); const nonStreamingModel = new ChatOpenAI({ ...constructorArgs, streaming: false, }).bind(callOptions); const [nonStreamingResult, streamingResult] = await Promise.all([ nonStreamingModel.invoke([new HumanMessage(humanMessage)]), streamingModel.invoke([new HumanMessage(humanMessage)]), ]); const tokenUsageStreaming = nonStreamingResult.usage_metadata; const tokenUsageNonStreaming = streamingResult.usage_metadata; if (!tokenUsageStreaming || !tokenUsageNonStreaming) { throw new Error(`Token usage not found in response. Streaming: ${JSON.stringify(streamingResult || {})} Non-streaming: ${JSON.stringify(nonStreamingResult || {})}`); } if ( nonStreamingResult.additional_kwargs.function_call?.arguments && streamingResult.additional_kwargs.function_call?.arguments ) { const nonStreamingArguments = JSON.stringify( JSON.parse(nonStreamingResult.additional_kwargs.function_call.arguments) ); const streamingArguments = JSON.stringify( JSON.parse(streamingResult.additional_kwargs.function_call.arguments) ); if (nonStreamingArguments === streamingArguments) { expect(tokenUsageStreaming).toEqual(tokenUsageNonStreaming); } } expect(tokenUsageStreaming.input_tokens).toBeGreaterThan(0); expect(tokenUsageStreaming.output_tokens).toBeGreaterThan(0); expect(tokenUsageStreaming.total_tokens).toBeGreaterThan(0); expect(tokenUsageNonStreaming.input_tokens).toBeGreaterThan(0); expect(tokenUsageNonStreaming.output_tokens).toBeGreaterThan(0); expect(tokenUsageNonStreaming.total_tokens).toBeGreaterThan(0); }); test("Test ChatOpenAI token usage reporting for streaming calls", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let streamingTokenUsed = -1; let nonStreamingTokenUsed = -1; const systemPrompt = "You are a helpful assistant"; const question = "What is the color of the night sky?"; const streamingModel = new ChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: true, maxRetries: 10, maxConcurrency: 10, temperature: 0, topP: 0, callbacks: [ { handleLLMEnd: async (output) => { streamingTokenUsed = output.llmOutput?.estimatedTokenUsage?.totalTokens; // console.log( // "streaming usage", // output.llmOutput?.estimatedTokenUsage // ); }, handleLLMError: async (_err) => { // console.error(err); }, }, ], }); const nonStreamingModel = new ChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: false, maxRetries: 10, maxConcurrency: 10, temperature: 0, topP: 0, callbacks: [ { handleLLMEnd: async (output) => { nonStreamingTokenUsed = output.llmOutput?.tokenUsage?.totalTokens; // console.log("non-streaming usage", output.llmOutput?.estimated); }, handleLLMError: async (_err) => { // console.error(err); }, }, ], }); const [nonStreamingResult, streamingResult] = await Promise.all([ nonStreamingModel.generate([ [new SystemMessage(systemPrompt), new HumanMessage(question)], ]), streamingModel.generate([ [new SystemMessage(systemPrompt), new HumanMessage(question)], ]), ]); expect(streamingTokenUsed).toBeGreaterThan(-1); if ( nonStreamingResult.generations[0][0].text === streamingResult.generations[0][0].text ) { expect(streamingTokenUsed).toEqual(nonStreamingTokenUsed); } } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }); test("Finish reason is 'stop'", async () => { const model = new ChatOpenAI(); const response = await model.stream("Hello, how are you?"); let finalResult: AIMessageChunk | undefined; for await (const chunk of response) { if (finalResult) { finalResult = finalResult.concat(chunk); } else { finalResult = chunk; } } expect(finalResult).toBeTruthy(); expect(finalResult?.response_metadata?.finish_reason).toBe("stop"); }); test("Streaming tokens can be found in usage_metadata field", async () => { const model = new ChatOpenAI(); const response = await model.stream("Hello, how are you?"); let finalResult: AIMessageChunk | undefined; for await (const chunk of response) { if (finalResult) { finalResult = finalResult.concat(chunk); } else { finalResult = chunk; } } // console.log({ // usage_metadata: finalResult?.usage_metadata, // }); expect(finalResult).toBeTruthy(); expect(finalResult?.usage_metadata).toBeTruthy(); expect(finalResult?.usage_metadata?.input_tokens).toBeGreaterThan(0); expect(finalResult?.usage_metadata?.output_tokens).toBeGreaterThan(0); expect(finalResult?.usage_metadata?.total_tokens).toBeGreaterThan(0); }); test("streaming: true tokens can be found in usage_metadata field", async () => { const model = new ChatOpenAI({ streaming: true, }); const response = await model.invoke("Hello, how are you?", { stream_options: { include_usage: true, }, }); // console.log({ // usage_metadata: response?.usage_metadata, // }); expect(response).toBeTruthy(); expect(response?.usage_metadata).toBeTruthy(); expect(response?.usage_metadata?.input_tokens).toBeGreaterThan(0); expect(response?.usage_metadata?.output_tokens).toBeGreaterThan(0); expect(response?.usage_metadata?.total_tokens).toBeGreaterThan(0); }); test("streaming: streamUsage will not override stream_options", async () => { const model = new ChatOpenAI({ streaming: true, }); const response = await model.invoke("Hello, how are you?", { stream_options: { include_usage: false }, }); // console.log({ // usage_metadata: response?.usage_metadata, // }); expect(response).toBeTruthy(); expect(response?.usage_metadata).toBeFalsy(); }); test("streaming: streamUsage default is true", async () => { const model = new ChatOpenAI(); const response = await model.invoke("Hello, how are you?"); // console.log({ // usage_metadata: response?.usage_metadata, // }); expect(response).toBeTruthy(); expect(response?.usage_metadata).toBeTruthy(); expect(response?.usage_metadata?.input_tokens).toBeGreaterThan(0); expect(response?.usage_metadata?.output_tokens).toBeGreaterThan(0); expect(response?.usage_metadata?.total_tokens).toBeGreaterThan(0); }); test("populates ID field on AIMessage", async () => { const model = new ChatOpenAI(); const response = await model.invoke("Hell"); // console.log({ // invokeId: response.id, // }); expect(response.id?.length).toBeGreaterThan(1); expect(response?.id?.startsWith("chatcmpl-")).toBe(true); // Streaming let finalChunk: AIMessageChunk | undefined; for await (const chunk of await model.stream("Hell")) { if (!finalChunk) { finalChunk = chunk; } else { finalChunk = finalChunk.concat(chunk); } } // console.log({ // streamId: finalChunk?.id, // }); expect(finalChunk?.id?.length).toBeGreaterThan(1); expect(finalChunk?.id?.startsWith("chatcmpl-")).toBe(true); }); test("Test ChatOpenAI stream method", async () => { const model = new ChatOpenAI({ model: "o1-mini" }); const stream = await model.stream("Print hello world."); const chunks = []; for await (const chunk of stream) { console.log(chunk); chunks.push(chunk); } expect(chunks.length).toEqual(1); }); describe("Audio output", () => { test("Audio output", async () => { const model = new ChatOpenAI({ maxRetries: 0, model: "gpt-4o-audio-preview", temperature: 0, modalities: ["text", "audio"], audio: { voice: "alloy", format: "wav", }, }); const response = await model.invoke("Make me an audio clip of you yelling"); expect(response.additional_kwargs.audio).toBeTruthy(); if (!response.additional_kwargs.audio) { throw new Error("Not in additional kwargs"); } // console.log( // "response.additional_kwargs.audio", // response.additional_kwargs.audio // ); expect(Object.keys(response.additional_kwargs.audio).sort()).toEqual([ "data", "expires_at", "id", "transcript", ]); }); test("Audio output can stream", async () => { const model = new ChatOpenAI({ maxRetries: 0, model: "gpt-4o-audio-preview", temperature: 0, modalities: ["text", "audio"], audio: { voice: "alloy", format: "pcm16", }, }); const stream = await model.stream("Make me an audio clip of you yelling"); let finalMsg: AIMessageChunk | undefined; for await (const chunk of stream) { finalMsg = finalMsg ? concat(finalMsg, chunk) : chunk; } if (!finalMsg) { throw new Error("No final message found"); } expect(finalMsg.additional_kwargs.audio).toBeTruthy(); if (!finalMsg.additional_kwargs.audio) { throw new Error("Not in additional kwargs"); } // console.log( // "response.additional_kwargs.audio", // finalMsg.additional_kwargs.audio // ); expect(Object.keys(finalMsg.additional_kwargs.audio).sort()).toEqual([ "data", "expires_at", "id", "index", "transcript", ]); }); test("Can bind audio output args", async () => { const model = new ChatOpenAI({ maxRetries: 0, model: "gpt-4o-audio-preview", temperature: 0, }).bind({ modalities: ["text", "audio"], audio: { voice: "alloy", format: "wav", }, }); const response = await model.invoke("Make me an audio clip of you yelling"); expect(response.additional_kwargs.audio).toBeTruthy(); if (!response.additional_kwargs.audio) { throw new Error("Not in additional kwargs"); } expect(Object.keys(response.additional_kwargs.audio).sort()).toEqual([ "data", "expires_at", "id", "transcript", ]); }); test("Audio output in chat history", async () => { const model = new ChatOpenAI({ model: "gpt-4o-audio-preview", temperature: 0, modalities: ["text", "audio"], audio: { voice: "alloy", format: "wav", }, maxRetries: 0, }); const input = [ { role: "user", content: "Make me an audio clip of you yelling", }, ]; const response = await model.invoke(input); expect(response.additional_kwargs.audio).toBeTruthy(); expect( (response.additional_kwargs.audio as Record<string, any>).transcript .length ).toBeGreaterThan(1); // console.log("response", (response.additional_kwargs.audio as any).transcript); const response2 = await model.invoke([ ...input, response, { role: "user", content: "What did you just say?", }, ]); // console.log("response2", (response2.additional_kwargs.audio as any).transcript); expect(response2.additional_kwargs.audio).toBeTruthy(); expect( (response2.additional_kwargs.audio as Record<string, any>).transcript .length ).toBeGreaterThan(1); }); test("Users can pass audio as inputs", async () => { const model = new ChatOpenAI({ maxRetries: 0, model: "gpt-4o-audio-preview", temperature: 0, modalities: ["text", "audio"], audio: { voice: "alloy", format: "wav", }, }); const response = await model.invoke("Make me an audio clip of you yelling"); // console.log("response", (response.additional_kwargs.audio as any).transcript); expect(response.additional_kwargs.audio).toBeTruthy(); expect( (response.additional_kwargs.audio as Record<string, any>).transcript .length ).toBeGreaterThan(1); const userInput = { type: "input_audio", input_audio: { data: (response.additional_kwargs.audio as any).data, format: "wav", }, }; const userInputRes = await model.invoke([ new HumanMessage({ content: [userInput], }), ]); // console.log("userInputRes.content", userInputRes.content); // console.log("userInputRes.additional_kwargs.audio", userInputRes.additional_kwargs.audio); expect(userInputRes.additional_kwargs.audio).toBeTruthy(); expect( (userInputRes.additional_kwargs.audio as Record<string, any>).transcript .length ).toBeGreaterThan(1); }); }); test("Can stream o1 requests", async () => { const model = new ChatOpenAI({ model: "o1-mini", }); const stream = await model.stream( "Write me a very simple hello world program in Python. Ensure it is wrapped in a function called 'hello_world' and has descriptive comments." ); let finalMsg: AIMessageChunk | undefined; let numChunks = 0; for await (const chunk of stream) { finalMsg = finalMsg ? concat(finalMsg, chunk) : chunk; numChunks += 1; } expect(finalMsg).toBeTruthy(); if (!finalMsg) { throw new Error("No final message found"); } if (typeof finalMsg.content === "string") { expect(finalMsg.content.length).toBeGreaterThan(10); } else { expect(finalMsg.content.length).toBeGreaterThanOrEqual(1); } // A expect(numChunks).toBeGreaterThan(3); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/legacy.int.test.ts
/* eslint-disable no-process-env */ import { expect, test } from "@jest/globals"; import { CallbackManager } from "@langchain/core/callbacks/manager"; import { OpenAIChat } from "../legacy.js"; // Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND; test("Test OpenAI", async () => { const model = new OpenAIChat({ modelName: "gpt-3.5-turbo", maxTokens: 10 }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Print hello world"); // console.log({ res }); }); test("Test OpenAI with prefix messages", async () => { const model = new OpenAIChat({ prefixMessages: [ { role: "user", content: "My name is John" }, { role: "assistant", content: "Hi there" }, ], maxTokens: 10, }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("What is my name"); // console.log({ res }); }); test("Test OpenAI in streaming mode", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let nrNewTokens = 0; let streamedCompletion = ""; const model = new OpenAIChat({ maxTokens: 10, modelName: "gpt-3.5-turbo", streaming: true, callbackManager: CallbackManager.fromHandlers({ async handleLLMNewToken(token: string) { nrNewTokens += 1; streamedCompletion += token; }, }), }); const res = await model.invoke("Print hello world"); // console.log({ res }); expect(nrNewTokens > 0).toBe(true); expect(res).toBe(streamedCompletion); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }, 30000); test("Test OpenAI with stop", async () => { const model = new OpenAIChat({ maxTokens: 5 }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.call("Print hello world", ["world"]); // console.log({ res }); }); test("Test OpenAI with stop in object", async () => { const model = new OpenAIChat({ maxTokens: 5 }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Print hello world", { stop: ["world"] }); // console.log({ res }); }); test("Test OpenAI with timeout in call options", async () => { const model = new OpenAIChat({ maxTokens: 5 }); await expect(() => model.invoke("Print hello world", { timeout: 10, }) ).rejects.toThrow(); }, 5000); test("Test OpenAI with timeout in call options and node adapter", async () => { const model = new OpenAIChat({ maxTokens: 5 }); await expect(() => model.invoke("Print hello world", { timeout: 10, }) ).rejects.toThrow(); }, 5000); test("Test OpenAI with signal in call options", async () => { const model = new OpenAIChat({ maxTokens: 5 }); const controller = new AbortController(); await expect(() => { const ret = model.invoke("Print hello world", { signal: controller.signal, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); test("Test OpenAI with signal in call options and node adapter", async () => { const model = new OpenAIChat({ maxTokens: 5 }); const controller = new AbortController(); await expect(() => { const ret = model.invoke("Print hello world", { signal: controller.signal, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); test("Test OpenAIChat stream method", async () => { const model = new OpenAIChat({ maxTokens: 50, modelName: "gpt-3.5-turbo" }); const stream = await model.stream("Print hello world."); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); // console.log(chunks); } expect(chunks.length).toBeGreaterThan(1); }); test("Test OpenAIChat stream method with abort", async () => { await expect(async () => { const model = new OpenAIChat({ maxTokens: 50, modelName: "gpt-3.5-turbo" }); const stream = await model.stream( "How is your day going? Be extremely verbose.", { signal: AbortSignal.timeout(1000), } ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); } }).rejects.toThrow(); }); test("Test OpenAIChat stream method with early break", async () => { const model = new OpenAIChat({ maxTokens: 50, modelName: "gpt-3.5-turbo" }); const stream = await model.stream( "How is your day going? Be extremely verbose." ); let i = 0; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); i += 1; if (i > 5) { break; } } });
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/embeddings.int.test.ts
import { test, expect } from "@jest/globals"; import { OpenAIEmbeddings } from "../embeddings.js"; test("Test OpenAIEmbeddings.embedQuery", async () => { const embeddings = new OpenAIEmbeddings(); const res = await embeddings.embedQuery("Hello world"); expect(typeof res[0]).toBe("number"); }); test("Test OpenAIEmbeddings.embedDocuments", async () => { const embeddings = new OpenAIEmbeddings(); const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]); expect(res).toHaveLength(2); expect(typeof res[0][0]).toBe("number"); expect(typeof res[1][0]).toBe("number"); }); test("Test OpenAIEmbeddings concurrency", async () => { const embeddings = new OpenAIEmbeddings({ batchSize: 1, maxConcurrency: 2, }); const res = await embeddings.embedDocuments([ "Hello world", "Bye bye", "Hello world", "Bye bye", "Hello world", "Bye bye", ]); expect(res).toHaveLength(6); expect(res.find((embedding) => typeof embedding[0] !== "number")).toBe( undefined ); }); test("Test timeout error thrown from SDK", async () => { await expect(async () => { const model = new OpenAIEmbeddings({ timeout: 1, maxRetries: 0, }); await model.embedDocuments([ "Hello world", "Bye bye", "Hello world", "Bye bye", "Hello world", "Bye bye", ]); }).rejects.toThrow(); }); test("Test OpenAI embeddings with an invalid org throws", async () => { await expect(async () => { const model = new OpenAIEmbeddings({ configuration: { organization: "NOT_REAL", }, }); await model.embedDocuments([ "Hello world", "Bye bye", "Hello world", "Bye bye", "Hello world", "Bye bye", ]); }).rejects.toThrow(); }); test("Test OpenAIEmbeddings.embedQuery with v3 and dimensions", async () => { const embeddings = new OpenAIEmbeddings({ modelName: "text-embedding-3-small", dimensions: 127, }); const res = await embeddings.embedQuery("Hello world"); expect(typeof res[0]).toBe("number"); expect(res.length).toBe(127); }); test("Test OpenAIEmbeddings.embedDocuments with v3 and dimensions", async () => { const embeddings = new OpenAIEmbeddings({ modelName: "text-embedding-3-small", dimensions: 127, }); const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]); expect(res).toHaveLength(2); expect(typeof res[0][0]).toBe("number"); expect(typeof res[1][0]).toBe("number"); expect(res[0].length).toBe(127); expect(res[1].length).toBe(127); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/chat_models.test.ts
/* eslint-disable @typescript-eslint/no-explicit-any, no-process-env */ import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { it, expect, describe, beforeAll, afterAll, jest } from "@jest/globals"; import { ChatOpenAI } from "../chat_models.js"; describe("strict tool calling", () => { const weatherTool = { type: "function" as const, function: { name: "get_current_weather", description: "Get the current weather in a location", parameters: zodToJsonSchema( z.object({ location: z.string().describe("The location to get the weather for"), }) ), }, }; // Store the original value of LANGCHAIN_TRACING_V2 let oldLangChainTracingValue: string | undefined; // Before all tests, save the current LANGCHAIN_TRACING_V2 value beforeAll(() => { oldLangChainTracingValue = process.env.LANGCHAIN_TRACING_V2; }); // After all tests, restore the original LANGCHAIN_TRACING_V2 value afterAll(() => { if (oldLangChainTracingValue !== undefined) { process.env.LANGCHAIN_TRACING_V2 = oldLangChainTracingValue; } else { // If it was undefined, remove the environment variable delete process.env.LANGCHAIN_TRACING_V2; } }); it("Can accept strict as a call arg via .bindTools", async () => { const mockFetch = jest.fn<(url: any, options?: any) => Promise<any>>(); mockFetch.mockImplementation((url, options) => { // Store the request details for later inspection mockFetch.mock.calls.push([url, options]); // Return a mock response return Promise.resolve({ ok: true, json: () => Promise.resolve({}), }); }); const model = new ChatOpenAI({ model: "gpt-4", apiKey: "test-key", configuration: { fetch: mockFetch, }, maxRetries: 0, }); const modelWithTools = model.bindTools([weatherTool], { strict: true }); // This will fail since we're not returning a valid response in our mocked fetch function. await expect( modelWithTools.invoke("What's the weather like?") ).rejects.toThrow(); expect(mockFetch).toHaveBeenCalled(); const [_url, options] = mockFetch.mock.calls[0]; if (options && options.body) { expect(JSON.parse(options.body).tools[0].function).toHaveProperty( "strict", true ); } else { throw new Error("Body not found in request."); } }); it("Can accept strict as a call arg via .bind", async () => { const mockFetch = jest.fn<(url: any, options?: any) => Promise<any>>(); mockFetch.mockImplementation((url, options) => { // Store the request details for later inspection mockFetch.mock.calls.push([url, options]); // Return a mock response return Promise.resolve({ ok: true, json: () => Promise.resolve({}), }); }); const model = new ChatOpenAI({ model: "gpt-4", apiKey: "test-key", configuration: { fetch: mockFetch, }, maxRetries: 0, }); const modelWithTools = model.bind({ tools: [weatherTool], strict: true, }); // This will fail since we're not returning a valid response in our mocked fetch function. await expect( modelWithTools.invoke("What's the weather like?") ).rejects.toThrow(); expect(mockFetch).toHaveBeenCalled(); const [_url, options] = mockFetch.mock.calls[0]; if (options && options.body) { expect(JSON.parse(options.body).tools[0].function).toHaveProperty( "strict", true ); } else { throw new Error("Body not found in request."); } }); it("Strict is false if supportsStrictToolCalling is false", async () => { const mockFetch = jest.fn<(url: any, options?: any) => Promise<any>>(); mockFetch.mockImplementation((url, options) => { // Store the request details for later inspection mockFetch.mock.calls.push([url, options]); // Return a mock response return Promise.resolve({ ok: true, json: () => Promise.resolve({}), }); }); const model = new ChatOpenAI({ model: "gpt-4", apiKey: "test-key", configuration: { fetch: mockFetch, }, maxRetries: 0, supportsStrictToolCalling: false, }); // Do NOT pass `strict` here since we're checking that it's set to true by default const modelWithTools = model.bindTools([weatherTool]); // This will fail since we're not returning a valid response in our mocked fetch function. await expect( modelWithTools.invoke("What's the weather like?") ).rejects.toThrow(); expect(mockFetch).toHaveBeenCalled(); const [_url, options] = mockFetch.mock.calls[0]; if (options && options.body) { expect(JSON.parse(options.body).tools[0].function).toHaveProperty( "strict", false ); } else { throw new Error("Body not found in request."); } }); it("Strict is set to true if passed in .withStructuredOutput", async () => { const mockFetch = jest.fn<(url: any, options?: any) => Promise<any>>(); mockFetch.mockImplementation((url, options) => { // Store the request details for later inspection mockFetch.mock.calls.push([url, options]); // Return a mock response return Promise.resolve({ ok: true, json: () => Promise.resolve({}), }); }); const model = new ChatOpenAI({ model: "doesnt-start-with-gpt-4", apiKey: "test-key", configuration: { fetch: mockFetch, }, maxRetries: 0, supportsStrictToolCalling: true, }); const modelWithTools = model.withStructuredOutput( z.object({ location: z.string().describe("The location to get the weather for"), }), { strict: true, } ); // This will fail since we're not returning a valid response in our mocked fetch function. await expect( modelWithTools.invoke("What's the weather like?") ).rejects.toThrow(); expect(mockFetch).toHaveBeenCalled(); const [_url, options] = mockFetch.mock.calls[0]; if (options && options.body) { const body = JSON.parse(options.body); expect(body.tools[0].function).toHaveProperty("strict", true); } else { throw new Error("Body not found in request."); } }); it("Strict is NOT passed to OpenAI if NOT passed in .withStructuredOutput", async () => { const mockFetch = jest.fn<(url: any, options?: any) => Promise<any>>(); mockFetch.mockImplementation((url, options) => { // Store the request details for later inspection mockFetch.mock.calls.push([url, options]); // Return a mock response return Promise.resolve({ ok: true, json: () => Promise.resolve({}), }); }); const model = new ChatOpenAI({ model: "doesnt-start-with-gpt-4", apiKey: "test-key", configuration: { fetch: mockFetch, }, maxRetries: 0, }); const modelWithTools = model.withStructuredOutput( z.object({ location: z.string().describe("The location to get the weather for"), }) ); // This will fail since we're not returning a valid response in our mocked fetch function. await expect( modelWithTools.invoke("What's the weather like?") ).rejects.toThrow(); expect(mockFetch).toHaveBeenCalled(); const [_url, options] = mockFetch.mock.calls[0]; if (options && options.body) { const body = JSON.parse(options.body); expect(body.tools[0].function).not.toHaveProperty("strict"); } else { throw new Error("Body not found in request."); } }); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/chat_models_structured_output.int.test.ts
import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { AIMessage, AIMessageChunk } from "@langchain/core/messages"; import { test, expect, describe, it } from "@jest/globals"; import { concat } from "@langchain/core/utils/stream"; import { ChatOpenAI } from "../chat_models.js"; test("withStructuredOutput zod schema function calling", async () => { const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-4-turbo-preview", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( calculatorSchema, { name: "calculator", } ); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are VERY bad at math and must always use a calculator."], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput zod schema streaming", async () => { const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-4-turbo-preview", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( calculatorSchema, { name: "calculator", } ); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are VERY bad at math and must always use a calculator."], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const stream = await chain.stream({}); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); const result = chunks.at(-1) ?? {}; expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput zod schema JSON mode", async () => { const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-4-turbo-preview", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( calculatorSchema, { name: "calculator", method: "jsonMode", } ); const prompt = ChatPromptTemplate.fromMessages([ [ "system", `You are VERY bad at math and must always use a calculator. Respond with a JSON object containing three keys: 'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide', 'number1': the first number to operate on, 'number2': the second number to operate on. `, ], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput JSON schema function calling", async () => { const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-4-turbo-preview", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput({ schema: zodToJsonSchema(calculatorSchema), name: "calculator", }); const prompt = ChatPromptTemplate.fromMessages([ ["system", `You are VERY bad at math and must always use a calculator.`], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput OpenAI function definition function calling", async () => { const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-4-turbo-preview", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput({ name: "calculator", parameters: zodToJsonSchema(calculatorSchema), }); const prompt = ChatPromptTemplate.fromMessages([ ["system", `You are VERY bad at math and must always use a calculator.`], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput JSON schema JSON mode", async () => { const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-4-turbo-preview", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( zodToJsonSchema(calculatorSchema), { name: "calculator", method: "jsonMode", } ); const prompt = ChatPromptTemplate.fromMessages([ [ "system", `You are VERY bad at math and must always use a calculator. Respond with a JSON object containing three keys: 'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide', 'number1': the first number to operate on, 'number2': the second number to operate on. `, ], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput JSON schema", async () => { const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-4-turbo-preview", }); const jsonSchema = { title: "calculator", description: "A simple calculator", type: "object", properties: { operation: { type: "string", enum: ["add", "subtract", "multiply", "divide"], }, number1: { type: "number" }, number2: { type: "number" }, }, }; const modelWithStructuredOutput = model.withStructuredOutput(jsonSchema); const prompt = ChatPromptTemplate.fromMessages([ [ "system", `You are VERY bad at math and must always use a calculator. Respond with a JSON object containing three keys: 'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide', 'number1': the first number to operate on, 'number2': the second number to operate on. `, ], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput includeRaw true", async () => { const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-4-turbo-preview", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( calculatorSchema, { name: "calculator", includeRaw: true, } ); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are VERY bad at math and must always use a calculator."], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("parsed" in result).toBe(true); // Need to make TS happy :) if (!("parsed" in result)) { throw new Error("parsed not in result"); } const { parsed } = result; expect("operation" in parsed).toBe(true); expect("number1" in parsed).toBe(true); expect("number2" in parsed).toBe(true); expect("raw" in result).toBe(true); // Need to make TS happy :) if (!("raw" in result)) { throw new Error("raw not in result"); } const { raw } = result as { raw: AIMessage }; expect(raw.additional_kwargs.tool_calls?.length).toBeGreaterThan(0); expect(raw.additional_kwargs.tool_calls?.[0].function.name).toBe( "calculator" ); expect( "operation" in JSON.parse(raw.additional_kwargs.tool_calls?.[0].function.arguments ?? "") ).toBe(true); expect( "number1" in JSON.parse(raw.additional_kwargs.tool_calls?.[0].function.arguments ?? "") ).toBe(true); expect( "number2" in JSON.parse(raw.additional_kwargs.tool_calls?.[0].function.arguments ?? "") ).toBe(true); }); test("parallelToolCalls param", async () => { const calculatorSchema = z .object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }) .describe("A tool to perform basic arithmetic operations"); const weatherSchema = z .object({ city: z.enum(["add", "subtract", "multiply", "divide"]), }) .describe("A tool to get the weather in a city"); const model = new ChatOpenAI({ model: "gpt-4o", temperature: 0, }).bindTools([ { type: "function", function: { name: "calculator", description: calculatorSchema.description, parameters: zodToJsonSchema(calculatorSchema), }, }, { type: "function", function: { name: "weather", description: weatherSchema.description, parameters: zodToJsonSchema(weatherSchema), }, }, ]); const response = await model.invoke( ["What is the weather in san francisco and what is 23716 times 27342?"], { parallel_tool_calls: false, } ); // console.log(response.tool_calls); expect(response.tool_calls?.length).toBe(1); }); test("Passing strict true forces the model to conform to the schema", async () => { const model = new ChatOpenAI({ model: "gpt-4o", temperature: 0, maxRetries: 0, }); const weatherTool = { type: "function" as const, function: { name: "get_current_weather", description: "Get the current weather in a location", parameters: zodToJsonSchema( z.object({ location: z.string().describe("The location to get the weather for"), }) ), }, }; const modelWithTools = model.bindTools([weatherTool], { strict: true, tool_choice: "get_current_weather", }); const result = await modelWithTools.invoke( "Whats the result of 173827 times 287326 divided by 2?" ); // Expect at least one tool call, allow multiple expect(result.tool_calls?.length).toBeGreaterThanOrEqual(1); expect(result.tool_calls?.[0].name).toBe("get_current_weather"); expect(result.tool_calls?.[0].args).toHaveProperty("location"); console.log(result.tool_calls?.[0].args); }); describe("response_format: json_schema", () => { const weatherSchema = z.object({ city: z.string().describe("The city to get the weather for"), state: z.string().describe("The state to get the weather for"), zipCode: z.string().describe("The zip code to get the weather for"), unit: z .enum(["fahrenheit", "celsius"]) .describe("The unit to get the weather in"), }); it("can invoke", async () => { const model = new ChatOpenAI({ model: "gpt-4o-2024-08-06", }).bind({ response_format: { type: "json_schema", json_schema: { name: "get_current_weather", description: "Get the current weather in a location", schema: zodToJsonSchema(weatherSchema), strict: true, }, }, }); const response = await model.invoke( "What is the weather in San Francisco, 91626 CA?" ); const parsed = JSON.parse(response.content as string); expect(parsed).toHaveProperty("city"); expect(parsed).toHaveProperty("state"); expect(parsed).toHaveProperty("zipCode"); expect(parsed).toHaveProperty("unit"); }); it("can stream", async () => { const model = new ChatOpenAI({ model: "gpt-4o-2024-08-06", }).bind({ response_format: { type: "json_schema", json_schema: { name: "get_current_weather", description: "Get the current weather in a location", schema: zodToJsonSchema(weatherSchema), strict: true, }, }, }); const stream = await model.stream( "What is the weather in San Francisco, 91626 CA?" ); let full: AIMessageChunk | undefined; for await (const chunk of stream) { full = !full ? chunk : concat(full, chunk); } expect(full).toBeDefined(); if (!full) return; const parsed = JSON.parse(full.content as string); expect(parsed).toHaveProperty("city"); expect(parsed).toHaveProperty("state"); expect(parsed).toHaveProperty("zipCode"); expect(parsed).toHaveProperty("unit"); }); it("can invoke with a zod schema passed in", async () => { const model = new ChatOpenAI({ model: "gpt-4o-2024-08-06", }).bind({ response_format: { type: "json_schema", json_schema: { name: "get_current_weather", description: "Get the current weather in a location", schema: weatherSchema, strict: true, }, }, }); const response = await model.invoke( "What is the weather in San Francisco, 91626 CA?" ); const parsed = JSON.parse(response.content as string); expect(parsed).toHaveProperty("city"); expect(parsed).toHaveProperty("state"); expect(parsed).toHaveProperty("zipCode"); expect(parsed).toHaveProperty("unit"); }); it("can stream with a zod schema passed in", async () => { const model = new ChatOpenAI({ model: "gpt-4o-2024-08-06", }).bind({ response_format: { type: "json_schema", json_schema: { name: "get_current_weather", description: "Get the current weather in a location", schema: weatherSchema, strict: true, }, }, }); const stream = await model.stream( "What is the weather in San Francisco, 91626 CA?" ); let full: AIMessageChunk | undefined; for await (const chunk of stream) { full = !full ? chunk : concat(full, chunk); } expect(full).toBeDefined(); if (!full) return; const parsed = JSON.parse(full.content as string); expect(parsed).toHaveProperty("city"); expect(parsed).toHaveProperty("state"); expect(parsed).toHaveProperty("zipCode"); expect(parsed).toHaveProperty("unit"); }); it("can be invoked with WSO", async () => { const model = new ChatOpenAI({ model: "gpt-4o-2024-08-06", }).withStructuredOutput(weatherSchema, { name: "get_current_weather", method: "jsonSchema", strict: true, }); const response = await model.invoke( "What is the weather in San Francisco, 91626 CA?" ); expect(response).toHaveProperty("city"); expect(response).toHaveProperty("state"); expect(response).toHaveProperty("zipCode"); expect(response).toHaveProperty("unit"); }); // Flaky test it.skip("can be streamed with WSO", async () => { const model = new ChatOpenAI({ model: "gpt-4o-2024-08-06", }).withStructuredOutput(weatherSchema, { name: "get_current_weather", method: "jsonSchema", strict: true, }); const stream = await model.stream( "What is the weather in San Francisco, 91626 CA?" ); // It should yield a single chunk let full: z.infer<typeof weatherSchema> | undefined; for await (const chunk of stream) { full = chunk; } expect(full).toBeDefined(); if (!full) return; expect(full).toHaveProperty("city"); expect(full).toHaveProperty("state"); expect(full).toHaveProperty("zipCode"); expect(full).toHaveProperty("unit"); }); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src/tests
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/azure/chat_models.standard.int.test.ts
/* eslint-disable no-process-env */ import { test, expect, beforeAll, afterAll } from "@jest/globals"; import { ChatModelIntegrationTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { AzureChatOpenAI } from "../../azure/chat_models.js"; import { ChatOpenAICallOptions } from "../../chat_models.js"; let openAIAPIKey: string | undefined; beforeAll(() => { if (process.env.OPENAI_API_KEY) { openAIAPIKey = process.env.OPENAI_API_KEY; process.env.OPENAI_API_KEY = ""; } if (!process.env.AZURE_OPENAI_API_KEY) { process.env.AZURE_OPENAI_API_KEY = process.env.TEST_AZURE_OPENAI_API_KEY; } if (!process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME) { process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = process.env.TEST_AZURE_OPENAI_API_DEPLOYMENT_NAME ?? process.env.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME; } if (!process.env.AZURE_OPENAI_BASE_PATH) { process.env.AZURE_OPENAI_BASE_PATH = process.env.TEST_AZURE_OPENAI_BASE_PATH; } if (!process.env.AZURE_OPENAI_API_VERSION) { process.env.AZURE_OPENAI_API_VERSION = process.env.TEST_AZURE_OPENAI_API_VERSION; } }); afterAll(() => { if (openAIAPIKey) { process.env.OPENAI_API_KEY = openAIAPIKey; } }); class AzureChatOpenAIStandardIntegrationTests extends ChatModelIntegrationTests< ChatOpenAICallOptions, AIMessageChunk > { constructor() { super({ Cls: AzureChatOpenAI, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, supportsParallelToolCalls: true, constructorArgs: { model: "gpt-3.5-turbo", maxRetries: 0, }, }); } async testUsageMetadataStreaming() { this.skipTestMessage( "testUsageMetadataStreaming", "AzureChatOpenAI", "Streaming tokens is not currently supported." ); } async testStreamTokensWithToolCalls() { this.skipTestMessage( "testStreamTokensWithToolCalls", "AzureChatOpenAI", "Streaming tokens is not currently supported." ); } async testInvokeMoreComplexTools() { this.skipTestMessage( "testInvokeMoreComplexTools", "AzureChatOpenAI", "AzureChatOpenAI does not support tool schemas which contain object with unknown/any parameters." + "AzureChatOpenAI only supports objects in schemas when the parameters are defined." ); } async testParallelToolCalling() { // Pass `true` in the second argument to only verify it can support parallel tool calls in the message history. // This is because the model struggles to actually call parallel tools. await super.testParallelToolCalling(undefined, true); } } const testClass = new AzureChatOpenAIStandardIntegrationTests(); test("AzureChatOpenAIStandardIntegrationTests", async () => { const testResults = await testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src/tests
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/azure/llms.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { LLMResult } from "@langchain/core/outputs"; import { StringPromptValue } from "@langchain/core/prompt_values"; import { CallbackManager } from "@langchain/core/callbacks/manager"; import { NewTokenIndices } from "@langchain/core/callbacks/base"; import { ClientSecretCredential, getBearerTokenProvider, } from "@azure/identity"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { AzureOpenAI } from "../../azure/llms.js"; // Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND; beforeAll(() => { if (!process.env.AZURE_OPENAI_API_KEY) { process.env.AZURE_OPENAI_API_KEY = process.env.TEST_AZURE_OPENAI_API_KEY; } if (!process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME) { process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = process.env.TEST_AZURE_OPENAI_API_DEPLOYMENT_NAME; } if (!process.env.AZURE_OPENAI_BASE_PATH) { process.env.AZURE_OPENAI_BASE_PATH = process.env.TEST_AZURE_OPENAI_BASE_PATH; } if (!process.env.AZURE_OPENAI_API_VERSION) { process.env.AZURE_OPENAI_API_VERSION = process.env.TEST_AZURE_OPENAI_API_VERSION; } }); test("Test Azure OpenAI invoke", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Print hello world"); // console.log({ res }); }); test("Test Azure OpenAI call", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.call("Print hello world", ["world"]); // console.log({ res }); }); test("Test Azure OpenAI with stop in object", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Print hello world", { stop: ["world"] }); // console.log({ res }); }); test("Test Azure OpenAI with timeout in call options", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); await expect(() => model.invoke("Print hello world", { timeout: 10, }) ).rejects.toThrow(); }, 5000); test("Test Azure OpenAI with timeout in call options and node adapter", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); await expect(() => model.invoke("Print hello world", { timeout: 10, }) ).rejects.toThrow(); }, 5000); test("Test Azure OpenAI with signal in call options", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); const controller = new AbortController(); await expect(() => { const ret = model.invoke("Print hello world", { signal: controller.signal, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); test("Test Azure OpenAI with signal in call options and node adapter", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); const controller = new AbortController(); await expect(() => { const ret = model.invoke("Print hello world", { signal: controller.signal, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); test("Test Azure OpenAI with concurrency == 1", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", maxConcurrency: 1, }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await Promise.all([ model.invoke("Print hello world"), model.invoke("Print hello world"), ]); // console.log({ res }); }); test("Test Azure OpenAI with maxTokens -1", async () => { const model = new AzureOpenAI({ maxTokens: -1, modelName: "gpt-3.5-turbo-instruct", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.call("Print hello world", ["world"]); // console.log({ res }); }); test("Test Azure OpenAI with model name", async () => { const model = new AzureOpenAI({ modelName: "gpt-3.5-turbo-instruct" }); expect(model).toBeInstanceOf(AzureOpenAI); const res = await model.invoke("Print hello world"); // console.log({ res }); expect(typeof res).toBe("string"); }); test("Test Azure OpenAI with versioned instruct model returns Azure OpenAI", async () => { const model = new AzureOpenAI({ modelName: "gpt-3.5-turbo-instruct-0914", }); expect(model).toBeInstanceOf(AzureOpenAI); const res = await model.invoke("Print hello world"); // console.log({ res }); expect(typeof res).toBe("string"); }); test("Test Azure OpenAI tokenUsage", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let tokenUsage = { completionTokens: 0, promptTokens: 0, totalTokens: 0, }; const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { tokenUsage = output.llmOutput?.tokenUsage; }, }), }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Hello"); // console.log({ res }); expect(tokenUsage.promptTokens).toBe(1); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }); test("Test Azure OpenAI in streaming mode", async () => { let nrNewTokens = 0; let streamedCompletion = ""; const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", streaming: true, callbacks: CallbackManager.fromHandlers({ async handleLLMNewToken(token: string) { nrNewTokens += 1; streamedCompletion += token; }, }), }); const res = await model.invoke("Print hello world"); // console.log({ res }); expect(nrNewTokens > 0).toBe(true); expect(res).toBe(streamedCompletion); }); test("Test Azure OpenAI in streaming mode with multiple prompts", async () => { let nrNewTokens = 0; const completions = [ ["", ""], ["", ""], ]; const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", streaming: true, n: 2, callbacks: CallbackManager.fromHandlers({ async handleLLMNewToken(token: string, idx: NewTokenIndices) { nrNewTokens += 1; completions[idx.prompt][idx.completion] += token; }, }), }); const res = await model.generate(["Print hello world", "print hello sea"]); // console.log( // res.generations, // res.generations.map((g) => g[0].generationInfo) // ); expect(nrNewTokens > 0).toBe(true); expect(res.generations.length).toBe(2); expect(res.generations.map((g) => g.map((gg) => gg.text))).toEqual( completions ); }); test("Test Azure OpenAI in streaming mode with multiple prompts", async () => { let nrNewTokens = 0; const completions = [[""], [""]]; const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo", streaming: true, n: 1, callbacks: CallbackManager.fromHandlers({ async handleLLMNewToken(token: string, idx: NewTokenIndices) { nrNewTokens += 1; completions[idx.prompt][idx.completion] += token; }, }), }); const res = await model.generate(["Print hello world", "print hello sea"]); // console.log( // res.generations, // res.generations.map((g) => g[0].generationInfo) // ); expect(nrNewTokens > 0).toBe(true); expect(res.generations.length).toBe(2); expect(res.generations.map((g) => g.map((gg) => gg.text))).toEqual( completions ); }); test("Test Azure OpenAI prompt value", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); const res = await model.generatePrompt([ new StringPromptValue("Print hello world"), ]); expect(res.generations.length).toBe(1); for (const generation of res.generations) { expect(generation.length).toBe(1); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for (const g of generation) { // console.log(g.text); } } // console.log({ res }); }); test("Test Azure OpenAI stream method", async () => { const model = new AzureOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo-instruct", }); const stream = await model.stream("Print hello world."); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); }); test("Test Azure OpenAI stream method with abort", async () => { await expect(async () => { const model = new AzureOpenAI({ maxTokens: 250, modelName: "gpt-3.5-turbo-instruct", }); const stream = await model.stream( "How is your day going? Be extremely verbose.", { signal: AbortSignal.timeout(1000), } ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); } }).rejects.toThrow(); }); test("Test Azure OpenAI stream method with early break", async () => { const model = new AzureOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo-instruct", }); const stream = await model.stream( "How is your day going? Be extremely verbose." ); let i = 0; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); i += 1; if (i > 5) { break; } } }); test("Test Azure OpenAI with bearer token credentials", async () => { const tenantId: string = getEnvironmentVariable("AZURE_TENANT_ID") ?? ""; const clientId: string = getEnvironmentVariable("AZURE_CLIENT_ID") ?? ""; const clientSecret: string = getEnvironmentVariable("AZURE_CLIENT_SECRET") ?? ""; const credentials = new ClientSecretCredential( tenantId, clientId, clientSecret ); const azureADTokenProvider = getBearerTokenProvider( credentials, "https://cognitiveservices.azure.com/.default" ); const model = new AzureOpenAI({ maxTokens: 5, modelName: "davinci-002", azureADTokenProvider, }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Print hello world"); // console.log({ res }); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src/tests
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/azure/chat_models.standard.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelUnitTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { AzureChatOpenAI } from "../../azure/chat_models.js"; import { ChatOpenAICallOptions } from "../../chat_models.js"; class AzureChatOpenAIStandardUnitTests extends ChatModelUnitTests< ChatOpenAICallOptions, AIMessageChunk > { constructor() { super({ Cls: AzureChatOpenAI, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: {}, }); process.env.AZURE_OPENAI_API_KEY = "test"; process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = "test"; process.env.AZURE_OPENAI_API_VERSION = "test"; process.env.AZURE_OPENAI_BASE_PATH = "test"; } testChatModelInitApiKey() { console.warn( "AzureChatOpenAI does not require a single API key. Skipping..." ); } } const testClass = new AzureChatOpenAIStandardUnitTests(); test("AzureChatOpenAIStandardUnitTests", () => { const testResults = testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src/tests
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/azure/chat_models.int.test.ts
/* eslint-disable no-process-env */ import { test, jest, expect } from "@jest/globals"; import { BaseMessage, ChatMessage, HumanMessage, SystemMessage, } from "@langchain/core/messages"; import { ChatGeneration, LLMResult } from "@langchain/core/outputs"; import { ChatPromptValue } from "@langchain/core/prompt_values"; import { PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts"; import { CallbackManager } from "@langchain/core/callbacks/manager"; import { NewTokenIndices } from "@langchain/core/callbacks/base"; import { InMemoryCache } from "@langchain/core/caches"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { ClientSecretCredential, getBearerTokenProvider, } from "@azure/identity"; import { AzureChatOpenAI } from "../../azure/chat_models.js"; // Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND; beforeAll(() => { if (!process.env.AZURE_OPENAI_API_KEY) { process.env.AZURE_OPENAI_API_KEY = process.env.TEST_AZURE_OPENAI_API_KEY; } if (!process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME) { process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = process.env.TEST_AZURE_OPENAI_API_DEPLOYMENT_NAME; } if (!process.env.AZURE_OPENAI_BASE_PATH) { process.env.AZURE_OPENAI_BASE_PATH = process.env.TEST_AZURE_OPENAI_BASE_PATH; } if (!process.env.AZURE_OPENAI_API_VERSION) { process.env.AZURE_OPENAI_API_VERSION = process.env.TEST_AZURE_OPENAI_API_VERSION; } }); test("Test Azure ChatOpenAI call method", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, }); const message = new HumanMessage("Hello!"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.call([message]); // console.log({ res }); }); test("Test Azure ChatOpenAI with SystemChatMessage", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, }); const system_message = new SystemMessage("You are to chat with a user."); const message = new HumanMessage("Hello!"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.call([system_message, message]); // console.log({ res }); }); test("Test Azure ChatOpenAI Generate", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, n: 2, }); const message = new HumanMessage("Hello!"); const res = await chat.generate([[message], [message]]); expect(res.generations.length).toBe(2); for (const generation of res.generations) { expect(generation.length).toBe(2); for (const message of generation) { // console.log(message.text); expect(typeof message.text).toBe("string"); } } // console.log({ res }); }); test("Test Azure ChatOpenAI Generate throws when one of the calls fails", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, n: 2, }); const message = new HumanMessage("Hello!"); await expect(() => chat.generate([[message], [message]], { signal: AbortSignal.timeout(10), }) ).rejects.toThrow(); }); test("Test Azure ChatOpenAI tokenUsage", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let tokenUsage = { completionTokens: 0, promptTokens: 0, totalTokens: 0, }; const model = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { // console.log(output); tokenUsage = output.llmOutput?.tokenUsage; }, }), }); const message = new HumanMessage("Hello"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke([message]); // console.log({ res }); expect(tokenUsage.promptTokens).toBeGreaterThan(0); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }); test("Test Azure ChatOpenAI tokenUsage with a batch", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let tokenUsage = { completionTokens: 0, promptTokens: 0, totalTokens: 0, }; const model = new AzureChatOpenAI({ temperature: 0, modelName: "gpt-3.5-turbo", callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { tokenUsage = output.llmOutput?.tokenUsage; }, }), }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.generate([ [new HumanMessage("Hello")], [new HumanMessage("Hi")], ]); // console.log(res); expect(tokenUsage.promptTokens).toBeGreaterThan(0); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }); test("Test Azure ChatOpenAI in streaming mode", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let nrNewTokens = 0; let streamedCompletion = ""; const model = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: true, maxTokens: 10, callbacks: [ { async handleLLMNewToken(token: string) { nrNewTokens += 1; streamedCompletion += token; }, }, ], }); const message = new HumanMessage("Hello!"); const result = await model.invoke([message]); expect(nrNewTokens > 0).toBe(true); expect(result.content).toBe(streamedCompletion); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }, 10000); test("Test Azure ChatOpenAI in streaming mode with n > 1 and multiple prompts", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let nrNewTokens = 0; const streamedCompletions = [ ["", ""], ["", ""], ]; const model = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: true, maxTokens: 10, n: 2, callbacks: [ { async handleLLMNewToken(token: string, idx: NewTokenIndices) { nrNewTokens += 1; streamedCompletions[idx.prompt][idx.completion] += token; }, }, ], }); const message1 = new HumanMessage("Hello!"); const message2 = new HumanMessage("Bye!"); const result = await model.generate([[message1], [message2]]); expect(nrNewTokens > 0).toBe(true); expect(result.generations.map((g) => g.map((gg) => gg.text))).toEqual( streamedCompletions ); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }, 10000); test("Test Azure ChatOpenAI prompt value", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, n: 2, }); const message = new HumanMessage("Hello!"); const res = await chat.generatePrompt([new ChatPromptValue([message])]); expect(res.generations.length).toBe(1); for (const generation of res.generations) { expect(generation.length).toBe(2); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for (const g of generation) { // console.log(g.text); } } // console.log({ res }); }); test("Test Azure OpenAI Chat, docs, prompt templates", async () => { const chat = new AzureChatOpenAI({ temperature: 0, maxTokens: 10 }); const systemPrompt = PromptTemplate.fromTemplate( "You are a helpful assistant that translates {input_language} to {output_language}." ); const chatPrompt = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(systemPrompt), HumanMessagePromptTemplate.fromTemplate("{text}"), ]); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ input_language: "English", output_language: "French", text: "I love programming.", }), ]); // console.log(responseA.generations); }, 5000); test("Test Azure ChatOpenAI with stop", async () => { const model = new AzureChatOpenAI({ maxTokens: 5 }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.call( [new HumanMessage("Print hello world")], ["world"] ); // console.log({ res }); }); test("Test Azure ChatOpenAI with stop in object", async () => { const model = new AzureChatOpenAI({ maxTokens: 5 }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke([new HumanMessage("Print hello world")], { stop: ["world"], }); // console.log({ res }); }); test("Test Azure ChatOpenAI with timeout in call options", async () => { const model = new AzureChatOpenAI({ maxTokens: 5 }); await expect(() => model.invoke([new HumanMessage("Print hello world")], { timeout: 10 }) ).rejects.toThrow(); }, 5000); test("Test Azure ChatOpenAI with timeout in call options and node adapter", async () => { const model = new AzureChatOpenAI({ maxTokens: 5 }); await expect(() => model.invoke([new HumanMessage("Print hello world")], { timeout: 10 }) ).rejects.toThrow(); }, 5000); test("Test Azure ChatOpenAI with signal in call options", async () => { const model = new AzureChatOpenAI({ maxTokens: 5 }); const controller = new AbortController(); await expect(() => { const ret = model.invoke([new HumanMessage("Print hello world")], { signal: controller.signal, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); test("Test Azure ChatOpenAI with signal in call options and node adapter", async () => { const model = new AzureChatOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); const controller = new AbortController(); await expect(() => { const ret = model.invoke([new HumanMessage("Print hello world")], { signal: controller.signal, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); test("Test Azure ChatOpenAI with specific roles in ChatMessage", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, }); const system_message = new ChatMessage( "You are to chat with a user.", "system" ); const user_message = new ChatMessage("Hello!", "user"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.call([system_message, user_message]); // console.log({ res }); }); test("Test Azure ChatOpenAI stream method", async () => { const model = new AzureChatOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo", }); const stream = await model.stream("Print hello world."); const chunks = []; for await (const chunk of stream) { // console.log(chunk); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); }); test("Test Azure ChatOpenAI stream method with abort", async () => { await expect(async () => { const model = new AzureChatOpenAI({ maxTokens: 100, modelName: "gpt-3.5-turbo", }); const stream = await model.stream( "How is your day going? Be extremely verbose.", { signal: AbortSignal.timeout(500), } ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); } }).rejects.toThrow(); }); test("Test Azure ChatOpenAI stream method with early break", async () => { const model = new AzureChatOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo", }); const stream = await model.stream( "How is your day going? Be extremely verbose." ); let i = 0; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); i += 1; if (i > 10) { break; } } }); test("Test Azure ChatOpenAI stream method, timeout error thrown from SDK", async () => { await expect(async () => { const model = new AzureChatOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo", timeout: 1, maxRetries: 0, }); const stream = await model.stream( "How is your day going? Be extremely verbose." ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); } }).rejects.toThrow(); }); test("Test Azure ChatOpenAI Function calling with streaming", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let finalResult: BaseMessage | undefined; const modelForFunctionCalling = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", temperature: 0, callbacks: [ { handleLLMEnd(output: LLMResult) { finalResult = (output.generations[0][0] as ChatGeneration).message; }, }, ], }); const stream = await modelForFunctionCalling.stream( "What is the weather in New York?", { functions: [ { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, ], function_call: { name: "get_current_weather", }, } ); const chunks = []; let streamedOutput; for await (const chunk of stream) { chunks.push(chunk); if (!streamedOutput) { streamedOutput = chunk; } else if (chunk) { streamedOutput = streamedOutput.concat(chunk); } } expect(finalResult).toEqual(streamedOutput); expect(chunks.length).toBeGreaterThan(1); expect(finalResult?.additional_kwargs?.function_call?.name).toBe( "get_current_weather" ); // console.log( // JSON.parse(finalResult?.additional_kwargs?.function_call?.arguments ?? "") // .location // ); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }); test("Test Azure ChatOpenAI can cache generations", async () => { const memoryCache = new InMemoryCache(); const lookupSpy = jest.spyOn(memoryCache, "lookup"); const updateSpy = jest.spyOn(memoryCache, "update"); const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, n: 2, cache: memoryCache, }); const message = new HumanMessage("Hello"); const res = await chat.generate([[message], [message]]); expect(res.generations.length).toBe(2); expect(lookupSpy).toHaveBeenCalledTimes(2); expect(updateSpy).toHaveBeenCalledTimes(2); lookupSpy.mockRestore(); updateSpy.mockRestore(); }); test("Test Azure ChatOpenAI can write and read cached generations", async () => { const memoryCache = new InMemoryCache(); const lookupSpy = jest.spyOn(memoryCache, "lookup"); const updateSpy = jest.spyOn(memoryCache, "update"); const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 100, n: 1, cache: memoryCache, }); const generateUncachedSpy = jest.spyOn(chat, "_generateUncached"); const messages = [ [ new HumanMessage("what color is the sky?"), new HumanMessage("what color is the ocean?"), ], [new HumanMessage("hello")], ]; const response1 = await chat.generate(messages); expect(generateUncachedSpy).toHaveBeenCalledTimes(1); generateUncachedSpy.mockRestore(); const response2 = await chat.generate(messages); expect(generateUncachedSpy).toHaveBeenCalledTimes(0); // Request should be cached, no need to generate. generateUncachedSpy.mockRestore(); expect(response1.generations.length).toBe(2); expect(response2.generations).toEqual(response1.generations); expect(lookupSpy).toHaveBeenCalledTimes(4); expect(updateSpy).toHaveBeenCalledTimes(2); lookupSpy.mockRestore(); updateSpy.mockRestore(); }); test("Test Azure ChatOpenAI should not reuse cache if function call args have changed", async () => { const memoryCache = new InMemoryCache(); const lookupSpy = jest.spyOn(memoryCache, "lookup"); const updateSpy = jest.spyOn(memoryCache, "update"); const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 100, n: 1, cache: memoryCache, }); const generateUncachedSpy = jest.spyOn(chat, "_generateUncached"); const messages = [ [ new HumanMessage("what color is the sky?"), new HumanMessage("what color is the ocean?"), ], [new HumanMessage("hello")], ]; const response1 = await chat.generate(messages); expect(generateUncachedSpy).toHaveBeenCalledTimes(1); generateUncachedSpy.mockRestore(); const response2 = await chat.generate(messages, { functions: [ { name: "extractor", description: "Extract fields from the input", parameters: { type: "object", properties: { tone: { type: "string", description: "the tone of the input", }, }, required: ["tone"], }, }, ], function_call: { name: "extractor", }, }); expect(generateUncachedSpy).toHaveBeenCalledTimes(0); // Request should not be cached since it's being called with different function call args expect(response1.generations.length).toBe(2); expect( (response2.generations[0][0] as ChatGeneration).message.additional_kwargs .function_call?.name ?? "" ).toEqual("extractor"); const response3 = await chat.generate(messages, { functions: [ { name: "extractor", description: "Extract fields from the input", parameters: { type: "object", properties: { tone: { type: "string", description: "the tone of the input", }, }, required: ["tone"], }, }, ], function_call: { name: "extractor", }, }); expect(response2.generations).toEqual(response3.generations); expect(lookupSpy).toHaveBeenCalledTimes(6); expect(updateSpy).toHaveBeenCalledTimes(4); lookupSpy.mockRestore(); updateSpy.mockRestore(); }); function createSampleMessages(): BaseMessage[] { // same example as in https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb return [ createSystemChatMessage( "You are a helpful, pattern-following assistant that translates corporate jargon into plain English." ), createSystemChatMessage( "New synergies will help drive top-line growth.", "example_user" ), createSystemChatMessage( "Things working well together will increase revenue.", "example_assistant" ), createSystemChatMessage( "Let's circle back when we have more bandwidth to touch base on opportunities for increased leverage.", "example_user" ), createSystemChatMessage( "Let's talk later when we're less busy about how to do better.", "example_assistant" ), new HumanMessage( "This late pivot means we don't have time to boil the ocean for the client deliverable." ), ]; } function createSystemChatMessage(text: string, name?: string) { const msg = new SystemMessage(text); msg.name = name; return msg; } test("Test Azure ChatOpenAI getNumTokensFromMessages gpt-3.5-turbo-0301 model for sample input", async () => { const messages: BaseMessage[] = createSampleMessages(); const chat = new AzureChatOpenAI({ azureOpenAIApiKey: "dummy", modelName: "gpt-3.5-turbo-0301", }); const { totalCount } = await chat.getNumTokensFromMessages(messages); expect(totalCount).toBe(127); }); test("Test Azure ChatOpenAI getNumTokensFromMessages gpt-4-0314 model for sample input", async () => { const messages: BaseMessage[] = createSampleMessages(); const chat = new AzureChatOpenAI({ azureOpenAIApiKey: "dummy", modelName: "gpt-4-0314", }); const { totalCount } = await chat.getNumTokensFromMessages(messages); expect(totalCount).toBe(129); }); test("Test Azure ChatOpenAI token usage reporting for streaming function calls", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let streamingTokenUsed = -1; let nonStreamingTokenUsed = -1; const humanMessage = "What a beautiful day!"; const extractionFunctionSchema = { name: "extractor", description: "Extracts fields from the input.", parameters: { type: "object", properties: { tone: { type: "string", enum: ["positive", "negative"], description: "The overall tone of the input", }, word_count: { type: "number", description: "The number of words in the input", }, chat_response: { type: "string", description: "A response to the human's input", }, }, required: ["tone", "word_count", "chat_response"], }, }; const streamingModel = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: true, maxRetries: 10, maxConcurrency: 10, temperature: 0, topP: 0, callbacks: [ { handleLLMEnd: async (output) => { streamingTokenUsed = output.llmOutput?.estimatedTokenUsage?.totalTokens; // console.log( // "streaming usage", // output.llmOutput?.estimatedTokenUsage // ); }, handleLLMError: async (_err) => { // console.error(err); }, }, ], }).bind({ seed: 42, functions: [extractionFunctionSchema], function_call: { name: "extractor" }, }); const nonStreamingModel = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: false, maxRetries: 10, maxConcurrency: 10, temperature: 0, topP: 0, callbacks: [ { handleLLMEnd: async (output) => { nonStreamingTokenUsed = output.llmOutput?.tokenUsage?.totalTokens; // console.log("non-streaming usage", output.llmOutput?.tokenUsage); }, handleLLMError: async (_err) => { // console.error(err); }, }, ], }).bind({ functions: [extractionFunctionSchema], function_call: { name: "extractor" }, }); const [nonStreamingResult, streamingResult] = await Promise.all([ nonStreamingModel.invoke([new HumanMessage(humanMessage)]), streamingModel.invoke([new HumanMessage(humanMessage)]), ]); if ( nonStreamingResult.additional_kwargs.function_call?.arguments && streamingResult.additional_kwargs.function_call?.arguments ) { // console.log( // `Function Call: ${JSON.stringify( // nonStreamingResult.additional_kwargs.function_call // )}` // ); const nonStreamingArguments = JSON.stringify( JSON.parse(nonStreamingResult.additional_kwargs.function_call.arguments) ); const streamingArguments = JSON.stringify( JSON.parse(streamingResult.additional_kwargs.function_call.arguments) ); if (nonStreamingArguments === streamingArguments) { expect(streamingTokenUsed).toEqual(nonStreamingTokenUsed); } } expect(streamingTokenUsed).toBeGreaterThan(-1); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }); test("Test Azure ChatOpenAI token usage reporting for streaming calls", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that from happening. process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; try { let streamingTokenUsed = -1; let nonStreamingTokenUsed = -1; const systemPrompt = "You are a helpful assistant"; const question = "What is the color of the night sky?"; const streamingModel = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: true, maxRetries: 10, maxConcurrency: 10, temperature: 0, topP: 0, callbacks: [ { handleLLMEnd: async (output) => { streamingTokenUsed = output.llmOutput?.estimatedTokenUsage?.totalTokens; // console.log( // "streaming usage", // output.llmOutput?.estimatedTokenUsage // ); }, handleLLMError: async (_err) => { // console.error(err); }, }, ], }); const nonStreamingModel = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: false, maxRetries: 10, maxConcurrency: 10, temperature: 0, topP: 0, callbacks: [ { handleLLMEnd: async (output) => { nonStreamingTokenUsed = output.llmOutput?.tokenUsage?.totalTokens; // console.log("non-streaming usage", output.llmOutput?.estimated); }, handleLLMError: async (_err) => { // console.error(err); }, }, ], }); const [nonStreamingResult, streamingResult] = await Promise.all([ nonStreamingModel.generate([ [new SystemMessage(systemPrompt), new HumanMessage(question)], ]), streamingModel.generate([ [new SystemMessage(systemPrompt), new HumanMessage(question)], ]), ]); expect(streamingTokenUsed).toBeGreaterThan(-1); if ( nonStreamingResult.generations[0][0].text === streamingResult.generations[0][0].text ) { expect(streamingTokenUsed).toEqual(nonStreamingTokenUsed); } } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; } }); // This test should be skipped if the required environment variables are not set // instead of failing the test. const tenantId: string = getEnvironmentVariable("AZURE_TENANT_ID") ?? ""; const clientId: string = getEnvironmentVariable("AZURE_CLIENT_ID") ?? ""; const clientSecret: string = getEnvironmentVariable("AZURE_CLIENT_SECRET") ?? ""; // eslint-disable-next-line @typescript-eslint/no-explicit-any let testFn: any = test; if (!tenantId || !clientId || !clientSecret) { // console.warn(`One or more required environment variables are not set. // Skipping "Test Azure ChatOpenAI with bearer token provider".`); testFn = test.skip; } testFn("Test Azure ChatOpenAI with bearer token provider", async () => { const credentials = new ClientSecretCredential( tenantId, clientId, clientSecret ); const azureADTokenProvider = getBearerTokenProvider( credentials, "https://cognitiveservices.azure.com/.default" ); const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 5, azureADTokenProvider, }); const message = new HumanMessage("Hello!"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([["system", "Say hi"], message]); // console.log(res); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src/tests
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/azure/embeddings.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { AzureOpenAIEmbeddings as OpenAIEmbeddings } from "../../azure/embeddings.js"; beforeAll(() => { if (!process.env.AZURE_OPENAI_API_KEY) { process.env.AZURE_OPENAI_API_KEY = process.env.TEST_AZURE_OPENAI_API_KEY; } if (!process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME) { process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = process.env.TEST_AZURE_OPENAI_API_DEPLOYMENT_NAME; } if (!process.env.AZURE_OPENAI_BASE_PATH) { process.env.AZURE_OPENAI_BASE_PATH = process.env.TEST_AZURE_OPENAI_BASE_PATH; } if (!process.env.AZURE_OPENAI_API_VERSION) { process.env.AZURE_OPENAI_API_VERSION = process.env.TEST_AZURE_OPENAI_API_VERSION; } }); test("Test AzureOpenAIEmbeddings.embedQuery", async () => { const embeddings = new OpenAIEmbeddings(); const res = await embeddings.embedQuery("Hello world"); expect(typeof res[0]).toBe("number"); }); test("Test AzureOpenAIEmbeddings.embedDocuments", async () => { const embeddings = new OpenAIEmbeddings(); const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]); expect(res).toHaveLength(2); expect(typeof res[0][0]).toBe("number"); expect(typeof res[1][0]).toBe("number"); }); test("Test AzureOpenAIEmbeddings concurrency", async () => { const embeddings = new OpenAIEmbeddings({ batchSize: 1, maxConcurrency: 2, }); const res = await embeddings.embedDocuments([ "Hello world", "Bye bye", "Hello world", "Bye bye", "Hello world", "Bye bye", ]); expect(res).toHaveLength(6); expect(res.find((embedding) => typeof embedding[0] !== "number")).toBe( undefined ); }); test("Test timeout error thrown from SDK", async () => { await expect(async () => { const model = new OpenAIEmbeddings({ timeout: 1, maxRetries: 0, }); await model.embedDocuments([ "Hello world", "Bye bye", "Hello world", "Bye bye", "Hello world", "Bye bye", ]); }).rejects.toThrow(); }); test("Test AzureOpenAIEmbeddings.embedQuery with v3 and dimensions", async () => { const embeddings = new OpenAIEmbeddings({ modelName: "text-embedding-3-small", dimensions: 127, }); const res = await embeddings.embedQuery("Hello world"); expect(typeof res[0]).toBe("number"); expect(res.length).toBe(127); }); test("Test AzureOpenAIEmbeddings.embedDocuments with v3 and dimensions", async () => { const embeddings = new OpenAIEmbeddings({ modelName: "text-embedding-3-small", dimensions: 127, }); const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]); expect(res).toHaveLength(2); expect(typeof res[0][0]).toBe("number"); expect(typeof res[1][0]).toBe("number"); expect(res[0].length).toBe(127); expect(res[1].length).toBe(127); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src/tests
lc_public_repos/langchainjs/libs/langchain-openai/src/tests/azure/chat_models.test.ts
import { AzureChatOpenAI } from "../../azure/chat_models.js"; test("Test Azure OpenAI serialization from azure endpoint", async () => { const chat = new AzureChatOpenAI({ azureOpenAIEndpoint: "https://foobar.openai.azure.com/", azureOpenAIApiDeploymentName: "gpt-4o", azureOpenAIApiVersion: "2024-08-01-preview", azureOpenAIApiKey: "foo", }); expect(JSON.stringify(chat)).toEqual( `{"lc":1,"type":"constructor","id":["langchain","chat_models","azure_openai","AzureChatOpenAI"],"kwargs":{"azure_endpoint":"https://foobar.openai.azure.com/","openai_api_key":{"lc":1,"type":"secret","id":["OPENAI_API_KEY"]},"deployment_name":"gpt-4o"}}` ); }); test("Test Azure OpenAI serialization from base path", async () => { const chat = new AzureChatOpenAI({ azureOpenAIBasePath: "https://foobar.openai.azure.com/openai/deployments/gpt-4o", azureOpenAIApiVersion: "2024-08-01-preview", azureOpenAIApiKey: "foo", }); expect(JSON.stringify(chat)).toEqual( `{"lc":1,"type":"constructor","id":["langchain","chat_models","azure_openai","AzureChatOpenAI"],"kwargs":{"openai_api_key":{"lc":1,"type":"secret","id":["OPENAI_API_KEY"]},"azure_endpoint":"https://foobar.openai.azure.com","deployment_name":"gpt-4o"}}` ); }); test("Test Azure OpenAI serialization from instance name", async () => { const chat = new AzureChatOpenAI({ azureOpenAIApiInstanceName: "foobar", azureOpenAIApiDeploymentName: "gpt-4o", azureOpenAIApiVersion: "2024-08-01-preview", azureOpenAIApiKey: "foo", }); expect(JSON.stringify(chat)).toEqual( `{"lc":1,"type":"constructor","id":["langchain","chat_models","azure_openai","AzureChatOpenAI"],"kwargs":{"openai_api_key":{"lc":1,"type":"secret","id":["OPENAI_API_KEY"]},"azure_endpoint":"https://foobar.openai.azure.com/","deployment_name":"gpt-4o"}}` ); });
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/utils/tools.ts
import { OpenAI as OpenAIClient } from "openai"; import { ToolDefinition } from "@langchain/core/language_models/base"; import { BindToolsInput } from "@langchain/core/language_models/chat_models"; import { convertToOpenAIFunction, isLangChainTool, } from "@langchain/core/utils/function_calling"; import { zodFunction } from "openai/helpers/zod"; /** * Formats a tool in either OpenAI format, or LangChain structured tool format * into an OpenAI tool format. If the tool is already in OpenAI format, return without * any changes. If it is in LangChain structured tool format, convert it to OpenAI tool format * using OpenAI's `zodFunction` util, falling back to `convertToOpenAIFunction` if the parameters * returned from the `zodFunction` util are not defined. * * @param {BindToolsInput} tool The tool to convert to an OpenAI tool. * @param {Object} [fields] Additional fields to add to the OpenAI tool. * @returns {ToolDefinition} The inputted tool in OpenAI tool format. */ export function _convertToOpenAITool( // eslint-disable-next-line @typescript-eslint/no-explicit-any tool: BindToolsInput, fields?: { /** * If `true`, model output is guaranteed to exactly match the JSON Schema * provided in the function definition. */ strict?: boolean; } ): OpenAIClient.ChatCompletionTool { let toolDef: OpenAIClient.ChatCompletionTool | undefined; if (isLangChainTool(tool)) { const oaiToolDef = zodFunction({ name: tool.name, parameters: tool.schema, description: tool.description, }); if (!oaiToolDef.function.parameters) { // Fallback to the `convertToOpenAIFunction` util if the parameters are not defined. toolDef = { type: "function", function: convertToOpenAIFunction(tool, fields), }; } else { toolDef = { type: oaiToolDef.type, function: { name: oaiToolDef.function.name, description: oaiToolDef.function.description, parameters: oaiToolDef.function.parameters, ...(fields?.strict !== undefined ? { strict: fields.strict } : {}), }, }; } } else { toolDef = tool as ToolDefinition; } if (fields?.strict !== undefined) { toolDef.function.strict = fields.strict; } return toolDef; }
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/utils/errors.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ /* eslint-disable no-param-reassign */ // Duplicate of core // TODO: Remove once we stop supporting 0.2.x core versions export type LangChainErrorCodes = | "INVALID_PROMPT_INPUT" | "INVALID_TOOL_RESULTS" | "MESSAGE_COERCION_FAILURE" | "MODEL_AUTHENTICATION" | "MODEL_NOT_FOUND" | "MODEL_RATE_LIMIT" | "OUTPUT_PARSING_FAILURE"; export function addLangChainErrorFields( error: any, lc_error_code: LangChainErrorCodes ) { (error as any).lc_error_code = lc_error_code; error.message = `${error.message}\n\nTroubleshooting URL: https://js.langchain.com/docs/troubleshooting/errors/${lc_error_code}/\n`; return error; }
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/utils/azure.ts
export interface OpenAIEndpointConfig { azureOpenAIApiDeploymentName?: string; azureOpenAIApiInstanceName?: string; azureOpenAIApiKey?: string; azureADTokenProvider?: () => Promise<string>; azureOpenAIBasePath?: string; baseURL?: string | null; azureOpenAIEndpoint?: string; } /** * This function generates an endpoint URL for (Azure) OpenAI * based on the configuration parameters provided. * * @param {OpenAIEndpointConfig} config - The configuration object for the (Azure) endpoint. * * @property {string} config.azureOpenAIApiDeploymentName - The deployment name of Azure OpenAI. * @property {string} config.azureOpenAIApiInstanceName - The instance name of Azure OpenAI, e.g. `example-resource`. * @property {string} config.azureOpenAIApiKey - The API Key for Azure OpenAI. * @property {string} config.azureOpenAIBasePath - The base path for Azure OpenAI, e.g. `https://example-resource.azure.openai.com/openai/deployments/`. * @property {string} config.baseURL - Some other custom base path URL. * @property {string} config.azureOpenAIEndpoint - The endpoint for the Azure OpenAI instance, e.g. `https://example-resource.azure.openai.com/`. * * The function operates as follows: * - If both `azureOpenAIBasePath` and `azureOpenAIApiDeploymentName` (plus `azureOpenAIApiKey`) are provided, it returns an URL combining these two parameters (`${azureOpenAIBasePath}/${azureOpenAIApiDeploymentName}`). * - If both `azureOpenAIEndpoint` and `azureOpenAIApiDeploymentName` (plus `azureOpenAIApiKey`) are provided, it returns an URL combining these two parameters (`${azureOpenAIEndpoint}/openai/deployments/${azureOpenAIApiDeploymentName}`). * - If `azureOpenAIApiKey` is provided, it checks for `azureOpenAIApiInstanceName` and `azureOpenAIApiDeploymentName` and throws an error if any of these is missing. If both are provided, it generates an URL incorporating these parameters. * - If none of the above conditions are met, return any custom `baseURL`. * - The function returns the generated URL as a string, or undefined if no custom paths are specified. * * @throws Will throw an error if the necessary parameters for generating the URL are missing. * * @returns {string | undefined} The generated (Azure) OpenAI endpoint URL. */ export function getEndpoint(config: OpenAIEndpointConfig) { const { azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName, azureOpenAIApiKey, azureOpenAIBasePath, baseURL, azureADTokenProvider, azureOpenAIEndpoint, } = config; if ( (azureOpenAIApiKey || azureADTokenProvider) && azureOpenAIBasePath && azureOpenAIApiDeploymentName ) { return `${azureOpenAIBasePath}/${azureOpenAIApiDeploymentName}`; } if ( (azureOpenAIApiKey || azureADTokenProvider) && azureOpenAIEndpoint && azureOpenAIApiDeploymentName ) { return `${azureOpenAIEndpoint}/openai/deployments/${azureOpenAIApiDeploymentName}`; } if (azureOpenAIApiKey || azureADTokenProvider) { if (!azureOpenAIApiInstanceName) { throw new Error( "azureOpenAIApiInstanceName is required when using azureOpenAIApiKey" ); } if (!azureOpenAIApiDeploymentName) { throw new Error( "azureOpenAIApiDeploymentName is a required parameter when using azureOpenAIApiKey" ); } return `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${azureOpenAIApiDeploymentName}`; } return baseURL; }
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/utils/openai.ts
import { APIConnectionTimeoutError, APIUserAbortError, OpenAI as OpenAIClient, } from "openai"; import { zodToJsonSchema } from "zod-to-json-schema"; import type { StructuredToolInterface } from "@langchain/core/tools"; import { convertToOpenAIFunction, convertToOpenAITool, } from "@langchain/core/utils/function_calling"; import { addLangChainErrorFields } from "./errors.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any export function wrapOpenAIClientError(e: any) { let error; if (e.constructor.name === APIConnectionTimeoutError.name) { error = new Error(e.message); error.name = "TimeoutError"; } else if (e.constructor.name === APIUserAbortError.name) { error = new Error(e.message); error.name = "AbortError"; } else if (e.status === 400 && e.message.includes("tool_calls")) { error = addLangChainErrorFields(e, "INVALID_TOOL_RESULTS"); } else if (e.status === 401) { error = addLangChainErrorFields(e, "MODEL_AUTHENTICATION"); } else if (e.status === 429) { error = addLangChainErrorFields(e, "MODEL_RATE_LIMIT"); } else if (e.status === 404) { error = addLangChainErrorFields(e, "MODEL_NOT_FOUND"); } else { error = e; } return error; } export { convertToOpenAIFunction as formatToOpenAIFunction, convertToOpenAITool as formatToOpenAITool, }; export function formatToOpenAIAssistantTool(tool: StructuredToolInterface) { return { type: "function", function: { name: tool.name, description: tool.description, parameters: zodToJsonSchema(tool.schema), }, }; } export type OpenAIToolChoice = | OpenAIClient.ChatCompletionToolChoiceOption | "any" | string; export function formatToOpenAIToolChoice( toolChoice?: OpenAIToolChoice ): OpenAIClient.ChatCompletionToolChoiceOption | undefined { if (!toolChoice) { return undefined; } else if (toolChoice === "any" || toolChoice === "required") { return "required"; } else if (toolChoice === "auto") { return "auto"; } else if (toolChoice === "none") { return "none"; } else if (typeof toolChoice === "string") { return { type: "function", function: { name: toolChoice, }, }; } else { return toolChoice; } }
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/utils/prompts.ts
/* eslint-disable import/no-extraneous-dependencies */ import type { BasePromptValue } from "@langchain/core/prompt_values"; import type { OpenAI } from "openai"; import { _convertMessagesToOpenAIParams } from "../chat_models.js"; /** * Convert a formatted LangChain prompt (e.g. pulled from the hub) into * a format expected by OpenAI's JS SDK. * * Requires the "@langchain/openai" package to be installed in addition * to the OpenAI SDK. * * @example * ```ts * import { convertPromptToOpenAI } from "langsmith/utils/hub/openai"; * import { pull } from "langchain/hub"; * * import OpenAI from 'openai'; * * const prompt = await pull("jacob/joke-generator"); * const formattedPrompt = await prompt.invoke({ * topic: "cats", * }); * * const { messages } = convertPromptToOpenAI(formattedPrompt); * * const openAIClient = new OpenAI(); * * const openaiResponse = await openAIClient.chat.completions.create({ * model: "gpt-4o", * messages, * }); * ``` * @param formattedPrompt * @returns A partial OpenAI payload. */ export function convertPromptToOpenAI(formattedPrompt: BasePromptValue): { messages: OpenAI.Chat.ChatCompletionMessageParam[]; } { const messages = formattedPrompt.toChatMessages(); return { messages: _convertMessagesToOpenAIParams( messages ) as OpenAI.Chat.ChatCompletionMessageParam[], }; }
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/utils/openai-format-fndef.ts
/** * Formatting function definitions for calculating openai function defination token usage. * * https://github.com/hmarr/openai-chat-tokens/blob/main/src/functions.ts * (c) 2023 Harry Marr * MIT license */ import OpenAI from "openai"; type OpenAIFunction = OpenAI.Chat.ChatCompletionCreateParams.Function; // Types representing the OpenAI function definitions. While the OpenAI client library // does have types for function definitions, the properties are just Record<string, unknown>, // which isn't very useful for type checking this formatting code. export interface FunctionDef extends Omit<OpenAIFunction, "parameters"> { name: string; description?: string; parameters: ObjectProp; } interface ObjectProp { type: "object"; properties?: { [key: string]: Prop; }; required?: string[]; } interface AnyOfProp { anyOf: Prop[]; } type Prop = { description?: string; } & ( | AnyOfProp | ObjectProp | { type: "string"; enum?: string[]; } | { type: "number" | "integer"; minimum?: number; maximum?: number; enum?: number[]; } | { type: "boolean" } | { type: "null" } | { type: "array"; items?: Prop; } ); function isAnyOfProp(prop: Prop): prop is AnyOfProp { return ( (prop as AnyOfProp).anyOf !== undefined && Array.isArray((prop as AnyOfProp).anyOf) ); } // When OpenAI use functions in the prompt, they format them as TypeScript definitions rather than OpenAPI JSON schemas. // This function converts the JSON schemas into TypeScript definitions. export function formatFunctionDefinitions(functions: FunctionDef[]) { const lines = ["namespace functions {", ""]; for (const f of functions) { if (f.description) { lines.push(`// ${f.description}`); } if (Object.keys(f.parameters.properties ?? {}).length > 0) { lines.push(`type ${f.name} = (_: {`); lines.push(formatObjectProperties(f.parameters, 0)); lines.push("}) => any;"); } else { lines.push(`type ${f.name} = () => any;`); } lines.push(""); } lines.push("} // namespace functions"); return lines.join("\n"); } // Format just the properties of an object (not including the surrounding braces) function formatObjectProperties(obj: ObjectProp, indent: number): string { const lines: string[] = []; for (const [name, param] of Object.entries(obj.properties ?? {})) { if (param.description && indent < 2) { lines.push(`// ${param.description}`); } if (obj.required?.includes(name)) { lines.push(`${name}: ${formatType(param, indent)},`); } else { lines.push(`${name}?: ${formatType(param, indent)},`); } } return lines.map((line) => " ".repeat(indent) + line).join("\n"); } // Format a single property type function formatType(param: Prop, indent: number): string { if (isAnyOfProp(param)) { return param.anyOf.map((v) => formatType(v, indent)).join(" | "); } switch (param.type) { case "string": if (param.enum) { return param.enum.map((v) => `"${v}"`).join(" | "); } return "string"; case "number": if (param.enum) { return param.enum.map((v) => `${v}`).join(" | "); } return "number"; case "integer": if (param.enum) { return param.enum.map((v) => `${v}`).join(" | "); } return "number"; case "boolean": return "boolean"; case "null": return "null"; case "object": return ["{", formatObjectProperties(param, indent + 2), "}"].join("\n"); case "array": if (param.items) { return `${formatType(param.items, indent)}[]`; } return "any[]"; default: return ""; } }
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/azure/llms.ts
import { type ClientOptions, AzureOpenAI as AzureOpenAIClient } from "openai"; import { type BaseLLMParams } from "@langchain/core/language_models/llms"; import { OpenAI } from "../llms.js"; import { OpenAIEndpointConfig, getEndpoint } from "../utils/azure.js"; import type { OpenAIInput, AzureOpenAIInput, OpenAICoreRequestOptions, LegacyOpenAIInput, } from "../types.js"; export class AzureOpenAI extends OpenAI { get lc_aliases(): Record<string, string> { return { openAIApiKey: "openai_api_key", openAIApiVersion: "openai_api_version", openAIBasePath: "openai_api_base", }; } constructor( fields?: Partial<OpenAIInput> & { openAIApiKey?: string; openAIApiVersion?: string; openAIBasePath?: string; deploymentName?: string; } & Partial<AzureOpenAIInput> & BaseLLMParams & { configuration?: ClientOptions & LegacyOpenAIInput; } ) { const newFields = fields ? { ...fields } : fields; if (newFields) { // don't rewrite the fields if they are already set newFields.azureOpenAIApiDeploymentName = newFields.azureOpenAIApiDeploymentName ?? newFields.deploymentName; newFields.azureOpenAIApiKey = newFields.azureOpenAIApiKey ?? newFields.openAIApiKey; newFields.azureOpenAIApiVersion = newFields.azureOpenAIApiVersion ?? newFields.openAIApiVersion; } super(newFields); } protected _getClientOptions(options: OpenAICoreRequestOptions | undefined) { if (!this.client) { const openAIEndpointConfig: OpenAIEndpointConfig = { azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName, azureOpenAIApiKey: this.azureOpenAIApiKey, azureOpenAIBasePath: this.azureOpenAIBasePath, azureADTokenProvider: this.azureADTokenProvider, baseURL: this.clientConfig.baseURL, }; const endpoint = getEndpoint(openAIEndpointConfig); const params = { ...this.clientConfig, baseURL: endpoint, timeout: this.timeout, maxRetries: 0, }; if (!this.azureADTokenProvider) { params.apiKey = openAIEndpointConfig.azureOpenAIApiKey; } if (!params.baseURL) { delete params.baseURL; } params.defaultHeaders = { ...params.defaultHeaders, "User-Agent": params.defaultHeaders?.["User-Agent"] ? `${params.defaultHeaders["User-Agent"]}: langchainjs-azure-openai-v2` : `langchainjs-azure-openai-v2`, }; this.client = new AzureOpenAIClient({ apiVersion: this.azureOpenAIApiVersion, azureADTokenProvider: this.azureADTokenProvider, ...params, }); } const requestOptions = { ...this.clientConfig, ...options, } as OpenAICoreRequestOptions; if (this.azureOpenAIApiKey) { requestOptions.headers = { "api-key": this.azureOpenAIApiKey, ...requestOptions.headers, }; requestOptions.query = { "api-version": this.azureOpenAIApiVersion, ...requestOptions.query, }; } return requestOptions; } // eslint-disable-next-line @typescript-eslint/no-explicit-any toJSON(): any { const json = super.toJSON() as unknown; function isRecord(obj: unknown): obj is Record<string, unknown> { return typeof obj === "object" && obj != null; } if (isRecord(json) && isRecord(json.kwargs)) { delete json.kwargs.azure_openai_base_path; delete json.kwargs.azure_openai_api_deployment_name; delete json.kwargs.azure_openai_api_key; delete json.kwargs.azure_openai_api_version; delete json.kwargs.azure_open_ai_base_path; } return json; } }
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/azure/chat_models.ts
import { type ClientOptions, AzureOpenAI as AzureOpenAIClient } from "openai"; import { LangSmithParams, type BaseChatModelParams, } from "@langchain/core/language_models/chat_models"; import { ChatOpenAI } from "../chat_models.js"; import { OpenAIEndpointConfig, getEndpoint } from "../utils/azure.js"; import { AzureOpenAIInput, LegacyOpenAIInput, OpenAIChatInput, OpenAICoreRequestOptions, } from "../types.js"; /** * Azure OpenAI chat model integration. * * Setup: * Install `@langchain/openai` and set the following environment variables: * * ```bash * npm install @langchain/openai * export AZURE_OPENAI_API_KEY="your-api-key" * export AZURE_OPENAI_API_DEPLOYMENT_NAME="your-deployment-name" * export AZURE_OPENAI_API_VERSION="your-version" * export AZURE_OPENAI_BASE_PATH="your-base-path" * ``` * * ## [Constructor args](https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html#constructor) * * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_openai.ChatOpenAICallOptions.html) * * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc. * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below: * * ```typescript * // When calling `.bind`, call options should be passed via the first argument * const llmWithArgsBound = llm.bind({ * stop: ["\n"], * tools: [...], * }); * * // When calling `.bindTools`, call options should be passed via the second argument * const llmWithTools = llm.bindTools( * [...], * { * tool_choice: "auto", * } * ); * ``` * * ## Examples * * <details open> * <summary><strong>Instantiate</strong></summary> * * ```typescript * import { AzureChatOpenAI } from '@langchain/openai'; * * const llm = new AzureChatOpenAI({ * azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY, // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY * azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME * azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME * azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION, // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION * temperature: 0, * maxTokens: undefined, * timeout: undefined, * maxRetries: 2, * // apiKey: "...", * // baseUrl: "...", * // other params... * }); * ``` * </details> * * <br /> * * <details> * <summary><strong>Invoking</strong></summary> * * ```typescript * const input = `Translate "I love programming" into French.`; * * // Models also accept a list of chat messages or a formatted prompt * const result = await llm.invoke(input); * console.log(result); * ``` * * ```txt * AIMessage { * "id": "chatcmpl-9u4Mpu44CbPjwYFkTbeoZgvzB00Tz", * "content": "J'adore la programmation.", * "response_metadata": { * "tokenUsage": { * "completionTokens": 5, * "promptTokens": 28, * "totalTokens": 33 * }, * "finish_reason": "stop", * "system_fingerprint": "fp_3aa7262c27" * }, * "usage_metadata": { * "input_tokens": 28, * "output_tokens": 5, * "total_tokens": 33 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Streaming Chunks</strong></summary> * * ```typescript * for await (const chunk of await llm.stream(input)) { * console.log(chunk); * } * ``` * * ```txt * AIMessageChunk { * "id": "chatcmpl-9u4NWB7yUeHCKdLr6jP3HpaOYHTqs", * "content": "" * } * AIMessageChunk { * "content": "J" * } * AIMessageChunk { * "content": "'adore" * } * AIMessageChunk { * "content": " la" * } * AIMessageChunk { * "content": " programmation",, * } * AIMessageChunk { * "content": ".",, * } * AIMessageChunk { * "content": "", * "response_metadata": { * "finish_reason": "stop", * "system_fingerprint": "fp_c9aa9c0491" * }, * } * AIMessageChunk { * "content": "", * "usage_metadata": { * "input_tokens": 28, * "output_tokens": 5, * "total_tokens": 33 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Aggregate Streamed Chunks</strong></summary> * * ```typescript * import { AIMessageChunk } from '@langchain/core/messages'; * import { concat } from '@langchain/core/utils/stream'; * * const stream = await llm.stream(input); * let full: AIMessageChunk | undefined; * for await (const chunk of stream) { * full = !full ? chunk : concat(full, chunk); * } * console.log(full); * ``` * * ```txt * AIMessageChunk { * "id": "chatcmpl-9u4PnX6Fy7OmK46DASy0bH6cxn5Xu", * "content": "J'adore la programmation.", * "response_metadata": { * "prompt": 0, * "completion": 0, * "finish_reason": "stop", * }, * "usage_metadata": { * "input_tokens": 28, * "output_tokens": 5, * "total_tokens": 33 * } * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Bind tools</strong></summary> * * ```typescript * import { z } from 'zod'; * * const GetWeather = { * name: "GetWeather", * description: "Get the current weather in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const GetPopulation = { * name: "GetPopulation", * description: "Get the current population in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]); * const aiMsg = await llmWithTools.invoke( * "Which city is hotter today and which is bigger: LA or NY?" * ); * console.log(aiMsg.tool_calls); * ``` * * ```txt * [ * { * name: 'GetWeather', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call', * id: 'call_uPU4FiFzoKAtMxfmPnfQL6UK' * }, * { * name: 'GetWeather', * args: { location: 'New York, NY' }, * type: 'tool_call', * id: 'call_UNkEwuQsHrGYqgDQuH9nPAtX' * }, * { * name: 'GetPopulation', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call', * id: 'call_kL3OXxaq9OjIKqRTpvjaCH14' * }, * { * name: 'GetPopulation', * args: { location: 'New York, NY' }, * type: 'tool_call', * id: 'call_s9KQB1UWj45LLGaEnjz0179q' * } * ] * ``` * </details> * * <br /> * * <details> * <summary><strong>Structured Output</strong></summary> * * ```typescript * import { z } from 'zod'; * * const Joke = z.object({ * setup: z.string().describe("The setup of the joke"), * punchline: z.string().describe("The punchline to the joke"), * rating: z.number().optional().describe("How funny the joke is, from 1 to 10") * }).describe('Joke to tell user.'); * * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" }); * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats"); * console.log(jokeResult); * ``` * * ```txt * { * setup: 'Why was the cat sitting on the computer?', * punchline: 'Because it wanted to keep an eye on the mouse!', * rating: 7 * } * ``` * </details> * * <br /> * * <details> * <summary><strong>JSON Object Response Format</strong></summary> * * ```typescript * const jsonLlm = llm.bind({ response_format: { type: "json_object" } }); * const jsonLlmAiMsg = await jsonLlm.invoke( * "Return a JSON object with key 'randomInts' and a value of 10 random ints in [0-99]" * ); * console.log(jsonLlmAiMsg.content); * ``` * * ```txt * { * "randomInts": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67] * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Multimodal</strong></summary> * * ```typescript * import { HumanMessage } from '@langchain/core/messages'; * * const imageUrl = "https://example.com/image.jpg"; * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer()); * const base64Image = Buffer.from(imageData).toString('base64'); * * const message = new HumanMessage({ * content: [ * { type: "text", text: "describe the weather in this image" }, * { * type: "image_url", * image_url: { url: `data:image/jpeg;base64,${base64Image}` }, * }, * ] * }); * * const imageDescriptionAiMsg = await llm.invoke([message]); * console.log(imageDescriptionAiMsg.content); * ``` * * ```txt * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions. * ``` * </details> * * <br /> * * <details> * <summary><strong>Usage Metadata</strong></summary> * * ```typescript * const aiMsgForMetadata = await llm.invoke(input); * console.log(aiMsgForMetadata.usage_metadata); * ``` * * ```txt * { input_tokens: 28, output_tokens: 5, total_tokens: 33 } * ``` * </details> * * <br /> * * <details> * <summary><strong>Logprobs</strong></summary> * * ```typescript * const logprobsLlm = new ChatOpenAI({ logprobs: true }); * const aiMsgForLogprobs = await logprobsLlm.invoke(input); * console.log(aiMsgForLogprobs.response_metadata.logprobs); * ``` * * ```txt * { * content: [ * { * token: 'J', * logprob: -0.000050616763, * bytes: [Array], * top_logprobs: [] * }, * { * token: "'", * logprob: -0.01868736, * bytes: [Array], * top_logprobs: [] * }, * { * token: 'ad', * logprob: -0.0000030545007, * bytes: [Array], * top_logprobs: [] * }, * { token: 'ore', logprob: 0, bytes: [Array], top_logprobs: [] }, * { * token: ' la', * logprob: -0.515404, * bytes: [Array], * top_logprobs: [] * }, * { * token: ' programm', * logprob: -0.0000118755715, * bytes: [Array], * top_logprobs: [] * }, * { token: 'ation', logprob: 0, bytes: [Array], top_logprobs: [] }, * { * token: '.', * logprob: -0.0000037697225, * bytes: [Array], * top_logprobs: [] * } * ], * refusal: null * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Response Metadata</strong></summary> * * ```typescript * const aiMsgForResponseMetadata = await llm.invoke(input); * console.log(aiMsgForResponseMetadata.response_metadata); * ``` * * ```txt * { * tokenUsage: { completionTokens: 5, promptTokens: 28, totalTokens: 33 }, * finish_reason: 'stop', * system_fingerprint: 'fp_3aa7262c27' * } * ``` * </details> */ export class AzureChatOpenAI extends ChatOpenAI { _llmType(): string { return "azure_openai"; } get lc_aliases(): Record<string, string> { return { openAIApiKey: "openai_api_key", openAIApiVersion: "openai_api_version", openAIBasePath: "openai_api_base", deploymentName: "deployment_name", azureOpenAIEndpoint: "azure_endpoint", azureOpenAIApiVersion: "openai_api_version", azureOpenAIBasePath: "openai_api_base", azureOpenAIApiDeploymentName: "deployment_name", }; } constructor( fields?: Partial<OpenAIChatInput> & Partial<AzureOpenAIInput> & { openAIApiKey?: string; openAIApiVersion?: string; openAIBasePath?: string; deploymentName?: string; } & BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput; } ) { const newFields = fields ? { ...fields } : fields; if (newFields) { // don't rewrite the fields if they are already set newFields.azureOpenAIApiDeploymentName = newFields.azureOpenAIApiDeploymentName ?? newFields.deploymentName; newFields.azureOpenAIApiKey = newFields.azureOpenAIApiKey ?? newFields.openAIApiKey; newFields.azureOpenAIApiVersion = newFields.azureOpenAIApiVersion ?? newFields.openAIApiVersion; } super(newFields); } getLsParams(options: this["ParsedCallOptions"]): LangSmithParams { const params = super.getLsParams(options); params.ls_provider = "azure"; return params; } protected _getClientOptions(options: OpenAICoreRequestOptions | undefined) { if (!this.client) { const openAIEndpointConfig: OpenAIEndpointConfig = { azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName, azureOpenAIApiKey: this.azureOpenAIApiKey, azureOpenAIBasePath: this.azureOpenAIBasePath, azureADTokenProvider: this.azureADTokenProvider, baseURL: this.clientConfig.baseURL, azureOpenAIEndpoint: this.azureOpenAIEndpoint, }; const endpoint = getEndpoint(openAIEndpointConfig); const params = { ...this.clientConfig, baseURL: endpoint, timeout: this.timeout, maxRetries: 0, }; if (!this.azureADTokenProvider) { params.apiKey = openAIEndpointConfig.azureOpenAIApiKey; } if (!params.baseURL) { delete params.baseURL; } params.defaultHeaders = { ...params.defaultHeaders, "User-Agent": params.defaultHeaders?.["User-Agent"] ? `${params.defaultHeaders["User-Agent"]}: langchainjs-azure-openai-v2` : `langchainjs-azure-openai-v2`, }; this.client = new AzureOpenAIClient({ apiVersion: this.azureOpenAIApiVersion, azureADTokenProvider: this.azureADTokenProvider, deployment: this.azureOpenAIApiDeploymentName, ...params, }); } const requestOptions = { ...this.clientConfig, ...options, } as OpenAICoreRequestOptions; if (this.azureOpenAIApiKey) { requestOptions.headers = { "api-key": this.azureOpenAIApiKey, ...requestOptions.headers, }; requestOptions.query = { "api-version": this.azureOpenAIApiVersion, ...requestOptions.query, }; } return requestOptions; } // eslint-disable-next-line @typescript-eslint/no-explicit-any toJSON(): any { const json = super.toJSON() as unknown; function isRecord(obj: unknown): obj is Record<string, unknown> { return typeof obj === "object" && obj != null; } if (isRecord(json) && isRecord(json.kwargs)) { delete json.kwargs.azure_openai_base_path; delete json.kwargs.azure_openai_api_deployment_name; delete json.kwargs.azure_openai_api_key; delete json.kwargs.azure_openai_api_version; delete json.kwargs.azure_open_ai_base_path; if (!json.kwargs.azure_endpoint && this.azureOpenAIEndpoint) { json.kwargs.azure_endpoint = this.azureOpenAIEndpoint; } if (!json.kwargs.azure_endpoint && this.azureOpenAIBasePath) { const parts = this.azureOpenAIBasePath.split("/openai/deployments/"); if (parts.length === 2 && parts[0].startsWith("http")) { const [endpoint] = parts; json.kwargs.azure_endpoint = endpoint; } } if (!json.kwargs.azure_endpoint && this.azureOpenAIApiInstanceName) { json.kwargs.azure_endpoint = `https://${this.azureOpenAIApiInstanceName}.openai.azure.com/`; } if (!json.kwargs.deployment_name && this.azureOpenAIApiDeploymentName) { json.kwargs.deployment_name = this.azureOpenAIApiDeploymentName; } if (!json.kwargs.deployment_name && this.azureOpenAIBasePath) { const parts = this.azureOpenAIBasePath.split("/openai/deployments/"); if (parts.length === 2) { const [, deployment] = parts; json.kwargs.deployment_name = deployment; } } if ( json.kwargs.azure_endpoint && json.kwargs.deployment_name && json.kwargs.openai_api_base ) { delete json.kwargs.openai_api_base; } if ( json.kwargs.azure_openai_api_instance_name && json.kwargs.azure_endpoint ) { delete json.kwargs.azure_openai_api_instance_name; } } return json; } }
0
lc_public_repos/langchainjs/libs/langchain-openai/src
lc_public_repos/langchainjs/libs/langchain-openai/src/azure/embeddings.ts
import { type ClientOptions, AzureOpenAI as AzureOpenAIClient, OpenAI as OpenAIClient, } from "openai"; import { OpenAIEmbeddings, OpenAIEmbeddingsParams } from "../embeddings.js"; import { AzureOpenAIInput, OpenAICoreRequestOptions, LegacyOpenAIInput, } from "../types.js"; import { getEndpoint, OpenAIEndpointConfig } from "../utils/azure.js"; import { wrapOpenAIClientError } from "../utils/openai.js"; export class AzureOpenAIEmbeddings extends OpenAIEmbeddings { constructor( fields?: Partial<OpenAIEmbeddingsParams> & Partial<AzureOpenAIInput> & { verbose?: boolean; /** The OpenAI API key to use. */ apiKey?: string; configuration?: ClientOptions; deploymentName?: string; openAIApiVersion?: string; }, configuration?: ClientOptions & LegacyOpenAIInput ) { const newFields = { ...fields }; if (Object.entries(newFields).length) { // don't rewrite the fields if they are already set newFields.azureOpenAIApiDeploymentName = newFields.azureOpenAIApiDeploymentName ?? newFields.deploymentName; newFields.azureOpenAIApiKey = newFields.azureOpenAIApiKey ?? newFields.apiKey; newFields.azureOpenAIApiVersion = newFields.azureOpenAIApiVersion ?? newFields.openAIApiVersion; } super(newFields, configuration); } protected async embeddingWithRetry( request: OpenAIClient.EmbeddingCreateParams ) { if (!this.client) { const openAIEndpointConfig: OpenAIEndpointConfig = { azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName, azureOpenAIApiKey: this.azureOpenAIApiKey, azureOpenAIBasePath: this.azureOpenAIBasePath, azureADTokenProvider: this.azureADTokenProvider, baseURL: this.clientConfig.baseURL, }; const endpoint = getEndpoint(openAIEndpointConfig); const params = { ...this.clientConfig, baseURL: endpoint, timeout: this.timeout, maxRetries: 0, }; if (!this.azureADTokenProvider) { params.apiKey = openAIEndpointConfig.azureOpenAIApiKey; } if (!params.baseURL) { delete params.baseURL; } params.defaultHeaders = { ...params.defaultHeaders, "User-Agent": params.defaultHeaders?.["User-Agent"] ? `${params.defaultHeaders["User-Agent"]}: langchainjs-azure-openai-v2` : `langchainjs-azure-openai-v2`, }; this.client = new AzureOpenAIClient({ apiVersion: this.azureOpenAIApiVersion, azureADTokenProvider: this.azureADTokenProvider, deployment: this.azureOpenAIApiDeploymentName, ...params, }); } const requestOptions: OpenAICoreRequestOptions = {}; if (this.azureOpenAIApiKey) { requestOptions.headers = { "api-key": this.azureOpenAIApiKey, ...requestOptions.headers, }; requestOptions.query = { "api-version": this.azureOpenAIApiVersion, ...requestOptions.query, }; } return this.caller.call(async () => { try { const res = await this.client.embeddings.create( request, requestOptions ); return res; } catch (e) { const error = wrapOpenAIClientError(e); throw error; } }); } }
0
lc_public_repos/langchainjs/libs/langchain-openai
lc_public_repos/langchainjs/libs/langchain-openai/scripts/jest-setup-after-env.js
import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { afterAll, jest } from "@jest/globals"; afterAll(awaitAllCallbacks); // Allow console.log to be disabled in tests if (process.env.DISABLE_CONSOLE_LOGS === "true") { console.log = jest.fn(); }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-gauth/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": ["ES2021", "ES2022.Object", "DOM"], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": ["src/**/*"], "exclude": ["node_modules", "dist", "docs"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-gauth/LICENSE
The MIT License Copyright (c) 2023 LangChain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-gauth/jest.config.cjs
/** @type {import('ts-jest').JestConfigWithTsJest} */ module.exports = { preset: "ts-jest/presets/default-esm", testEnvironment: "./jest.env.cjs", modulePathIgnorePatterns: ["dist/", "docs/"], moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, transform: { "^.+\\.tsx?$": ["@swc/jest"], }, transformIgnorePatterns: [ "/node_modules/", "\\.pnp\\.[^\\/]+$", "./scripts/jest-setup-after-env.js", ], setupFiles: ["dotenv/config"], testTimeout: 20_000, passWithNoTests: true, };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-gauth/jest.env.cjs
const { TestEnvironment } = require("jest-environment-node"); class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { constructor(config, context) { // Make `instanceof Float32Array` return true in tests // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 super(config, context); this.global.Float32Array = Float32Array; } } module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-gauth/README.md
# LangChain google-gauth This package contains resources to access Google AI/ML models and other Google services. Authorization to these services use either an API Key or service account credentials that are either stored on the local file system or are provided through the Google Cloud Platform environment it is running on. If you are running this on a platform where the credentials cannot be provided this way, consider using the @langchain/google-webauth package *instead*. You do not need to use both packages. See the section on **Authorization** below. ## Installation ```bash $ yarn add @langchain/google-gauth ``` ## Authorization Authorization is either done through the use of an API Key, if it is supported for the service you're using, or a Google Cloud Service Account. To handle service accounts, this package uses the `google-auth-library` package, and you may wish to consult the documentation for that library about how it does so. But in short, classes in this package will use credentials from the first of the following that apply: 1. An API Key that is passed to the constructor using the `apiKey` attribute 2. Credentials that are passed to the constructor using the `authInfo` attribute 3. An API Key that is set in the environment variable `API_KEY` 4. The Service Account credentials that are saved in a file. The path to this file is set in the `GOOGLE_APPLICATION_CREDENTIALS` environment variable. 5. If you are running on a Google Cloud Platform resource, or if you have logged in using `gcloud auth application-default login`, then the default credentials.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-gauth/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": [ "--workspaces-update=false" ] } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-gauth/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof"], ignorePatterns: [ ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-gauth/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals: [/node\:/, /@langchain\/core\//, /@langchain\/google-common/], entrypoints: { index: "index", utils: "utils", types: "types", }, tsConfigPath: resolve("./tsconfig.json"), cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-gauth/package.json
{ "name": "@langchain/google-gauth", "version": "0.1.3", "description": "Google auth based authentication support for Google services", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-google-gauth/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/google-gauth", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "clean": "rm -rf .turbo dist/", "prepack": "yarn build", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard:unit": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard": "yarn test:standard:unit && yarn test:standard:int", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "@langchain/google-common": "~0.1.3", "google-auth-library": "^8.9.0" }, "peerDependencies": { "@langchain/core": ">=0.2.21 <0.4.0" }, "devDependencies": { "@jest/globals": "^29.5.0", "@langchain/core": "workspace:*", "@langchain/scripts": ">=0.1.0 <0.2.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@tsconfig/recommended": "^1.0.3", "@typescript-eslint/eslint-plugin": "^6.12.0", "@typescript-eslint/parser": "^6.12.0", "dotenv": "^16.3.1", "dpdm": "^3.12.0", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "prettier": "^2.8.3", "release-it": "^17.6.0", "rollup": "^4.5.2", "ts-jest": "^29.1.0", "typescript": "<5.2.0", "zod": "^3.22.4" }, "publishConfig": { "access": "public" }, "exports": { ".": { "types": { "import": "./index.d.ts", "require": "./index.d.cts", "default": "./index.d.ts" }, "import": "./index.js", "require": "./index.cjs" }, "./utils": { "types": { "import": "./utils.d.ts", "require": "./utils.d.cts", "default": "./utils.d.ts" }, "import": "./utils.js", "require": "./utils.cjs" }, "./types": { "types": { "import": "./types.d.ts", "require": "./types.d.cts", "default": "./types.d.ts" }, "import": "./types.js", "require": "./types.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "index.cjs", "index.js", "index.d.ts", "index.d.cts", "utils.cjs", "utils.js", "utils.d.ts", "utils.d.cts", "types.cjs", "types.js", "types.d.ts", "types.d.cts" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-gauth/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": ["node_modules", "dist", "docs", "**/tests"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-gauth/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-gauth/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/libs/langchain-google-gauth
lc_public_repos/langchainjs/libs/langchain-google-gauth/src/auth.ts
import { Readable } from "stream"; import { AbstractStream, ensureAuthOptionScopes, GoogleAbstractedClient, GoogleAbstractedClientOps, GoogleConnectionParams, JsonStream, SseJsonStream, SseStream, } from "@langchain/google-common"; import { GoogleAuth, GoogleAuthOptions } from "google-auth-library"; export class NodeAbstractStream implements AbstractStream { private baseStream: AbstractStream; constructor(baseStream: AbstractStream, data: Readable) { this.baseStream = baseStream; const decoder = new TextDecoder("utf-8"); data.on("data", (data) => { const text = decoder.decode(data, { stream: true }); this.appendBuffer(text); }); data.on("end", () => { const rest = decoder.decode(); this.appendBuffer(rest); this.closeBuffer(); }); } appendBuffer(data: string): void { return this.baseStream.appendBuffer(data); } closeBuffer(): void { return this.baseStream.closeBuffer(); } // eslint-disable-next-line @typescript-eslint/no-explicit-any nextChunk(): Promise<any> { return this.baseStream.nextChunk(); } get streamDone(): boolean { return this.baseStream.streamDone; } } export class NodeJsonStream extends NodeAbstractStream { constructor(data: Readable) { super(new JsonStream(), data); } } export class NodeSseStream extends NodeAbstractStream { constructor(data: Readable) { super(new SseStream(), data); } } export class NodeSseJsonStream extends NodeAbstractStream { constructor(data: Readable) { super(new SseJsonStream(), data); } } export class GAuthClient implements GoogleAbstractedClient { gauth: GoogleAuth; constructor(fields?: GoogleConnectionParams<GoogleAuthOptions>) { const options = ensureAuthOptionScopes<GoogleAuthOptions>( fields?.authOptions, "scopes", fields?.platformType ); this.gauth = new GoogleAuth(options); } get clientType(): string { return "gauth"; } async getProjectId(): Promise<string> { return this.gauth.getProjectId(); } async request(opts: GoogleAbstractedClientOps): Promise<unknown> { try { const ret = await this.gauth.request(opts); const [contentType] = ret?.headers?.["content-type"]?.split(/;/) ?? [""]; if (opts.responseType !== "stream") { return ret; } else if (contentType === "text/event-stream") { return { ...ret, data: new NodeSseJsonStream(ret.data), }; } else { return { ...ret, data: new NodeJsonStream(ret.data), }; } // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (xx: any) { console.error("call to gauth.request", JSON.stringify(xx, null, 2)); console.error( "call to gauth.request opts=", JSON.stringify(opts, null, 2) ); console.error("call to gauth.request message:", xx?.message); throw xx; } } }
0
lc_public_repos/langchainjs/libs/langchain-google-gauth
lc_public_repos/langchainjs/libs/langchain-google-gauth/src/types.ts
export * from "@langchain/google-common/types";
0
lc_public_repos/langchainjs/libs/langchain-google-gauth
lc_public_repos/langchainjs/libs/langchain-google-gauth/src/llms.ts
import { GoogleAbstractedClient, GoogleBaseLLM, GoogleBaseLLMInput, } from "@langchain/google-common"; import { GoogleAuthOptions } from "google-auth-library"; import { GAuthClient } from "./auth.js"; /** * Input to LLM class. */ export interface GoogleLLMInput extends GoogleBaseLLMInput<GoogleAuthOptions> {} /** * Integration with a Google LLM. */ export class GoogleLLM extends GoogleBaseLLM<GoogleAuthOptions> implements GoogleLLMInput { // Used for tracing, replace with the same name as your class static lc_name() { return "GoogleLLM"; } lc_serializable = true; constructor(fields?: GoogleLLMInput) { super(fields); } buildAbstractedClient( fields: GoogleBaseLLMInput<GoogleAuthOptions> | undefined ): GoogleAbstractedClient { return new GAuthClient(fields); } }
0
lc_public_repos/langchainjs/libs/langchain-google-gauth
lc_public_repos/langchainjs/libs/langchain-google-gauth/src/index.ts
export * from "./chat_models.js"; export * from "./llms.js"; export * from "./embeddings.js"; export * from "./media.js";
0
lc_public_repos/langchainjs/libs/langchain-google-gauth
lc_public_repos/langchainjs/libs/langchain-google-gauth/src/chat_models.ts
import { ChatGoogleBase, ChatGoogleBaseInput, GoogleAbstractedClient, GoogleBaseLLMInput, } from "@langchain/google-common"; import { GoogleAuthOptions } from "google-auth-library"; import { GAuthClient } from "./auth.js"; /** * Input to chat model class. */ export interface ChatGoogleInput extends ChatGoogleBaseInput<GoogleAuthOptions> {} /** * Integration with a Google chat model. */ export class ChatGoogle extends ChatGoogleBase<GoogleAuthOptions> implements ChatGoogleInput { // Used for tracing, replace with the same name as your class static lc_name() { return "ChatGoogle"; } constructor(fields?: ChatGoogleInput) { super(fields); } buildAbstractedClient( fields: GoogleBaseLLMInput<GoogleAuthOptions> | undefined ): GoogleAbstractedClient { return new GAuthClient(fields); } }
0
lc_public_repos/langchainjs/libs/langchain-google-gauth
lc_public_repos/langchainjs/libs/langchain-google-gauth/src/embeddings.ts
import { GoogleAbstractedClient, GoogleConnectionParams, BaseGoogleEmbeddings, BaseGoogleEmbeddingsParams, } from "@langchain/google-common"; import { GoogleAuthOptions } from "google-auth-library"; import { GAuthClient } from "./auth.js"; /** * Input to LLM class. */ export interface GoogleEmbeddingsInput extends BaseGoogleEmbeddingsParams<GoogleAuthOptions> {} /** * Integration with an Google embeddings model. */ export class GoogleEmbeddings extends BaseGoogleEmbeddings<GoogleAuthOptions> implements GoogleEmbeddingsInput { // Used for tracing, replace with the same name as your class static lc_name() { return "GoogleEmbeddings"; } lc_serializable = true; constructor(fields: GoogleEmbeddingsInput) { super(fields); } buildAbstractedClient( fields?: GoogleConnectionParams<GoogleAuthOptions> ): GoogleAbstractedClient { return new GAuthClient(fields); } }
0
lc_public_repos/langchainjs/libs/langchain-google-gauth
lc_public_repos/langchainjs/libs/langchain-google-gauth/src/media.ts
import { GoogleAbstractedClient } from "@langchain/google-common"; import { BlobStoreGoogleCloudStorageBase, BlobStoreGoogleCloudStorageBaseParams, BlobStoreAIStudioFileBase, BlobStoreAIStudioFileBaseParams, } from "@langchain/google-common/experimental/media"; import { GoogleAuthOptions } from "google-auth-library"; import { GAuthClient } from "./auth.js"; export interface BlobStoreGoogleCloudStorageParams extends BlobStoreGoogleCloudStorageBaseParams<GoogleAuthOptions> {} export class BlobStoreGoogleCloudStorage extends BlobStoreGoogleCloudStorageBase<GoogleAuthOptions> { buildClient( fields?: BlobStoreGoogleCloudStorageParams ): GoogleAbstractedClient { return new GAuthClient(fields); } } export interface BlobStoreAIStudioFileParams extends BlobStoreAIStudioFileBaseParams<GoogleAuthOptions> {} export class BlobStoreAIStudioFile extends BlobStoreAIStudioFileBase<GoogleAuthOptions> { buildAbstractedClient( fields?: BlobStoreAIStudioFileParams ): GoogleAbstractedClient { return new GAuthClient(fields); } }
0
lc_public_repos/langchainjs/libs/langchain-google-gauth
lc_public_repos/langchainjs/libs/langchain-google-gauth/src/utils.ts
export * from "@langchain/google-common/utils";
0
lc_public_repos/langchainjs/libs/langchain-google-gauth/src
lc_public_repos/langchainjs/libs/langchain-google-gauth/src/tests/auth.test.ts
import { expect, test, describe } from "@jest/globals"; import { Readable } from "stream"; import { NodeJsonStream } from "../auth.js"; describe("NodeJsonStream", () => { test("stream", async () => { const data = ["[", '{"i": 1}', '{"i', '": 2}', "]"]; const source = new Readable({ read() { if (data.length > 0) { this.push(Buffer.from(data.shift() || "")); } else { this.push(null); } }, }); const stream = new NodeJsonStream(source); expect(await stream.nextChunk()).toEqual({ i: 1 }); expect(await stream.nextChunk()).toEqual({ i: 2 }); expect(await stream.nextChunk()).toBeNull(); expect(stream.streamDone).toEqual(true); }); test("stream multibyte", async () => { const data = [ "[", '{"i": 1, "msg":"hello👋"}', '{"i": 2,', '"msg":"こん', Buffer.from([0xe3]), // 1st byte of "に" Buffer.from([0x81, 0xab]), // 2-3rd bytes of "に" "ちは", Buffer.from([0xf0, 0x9f]), // first half bytes of "👋" Buffer.from([0x91, 0x8b]), // second half bytes of "👋" '"}', "]", ]; const source = new Readable({ read() { if (data.length > 0) { const next = data.shift(); this.push(typeof next === "string" ? Buffer.from(next) : next); } else { this.push(null); } }, }); const stream = new NodeJsonStream(source); expect(await stream.nextChunk()).toEqual({ i: 1, msg: "hello👋" }); expect(await stream.nextChunk()).toEqual({ i: 2, msg: "こんにちは👋" }); expect(await stream.nextChunk()).toBeNull(); expect(stream.streamDone).toEqual(true); }); });
0
lc_public_repos/langchainjs/libs/langchain-google-gauth/src
lc_public_repos/langchainjs/libs/langchain-google-gauth/src/tests/chat_models.int.test.ts
import { test } from "@jest/globals"; import { BaseLanguageModelInput } from "@langchain/core/language_models/base"; import { ChatPromptValue } from "@langchain/core/prompt_values"; import { AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, BaseMessageLike, HumanMessage, HumanMessageChunk, MessageContentComplex, SystemMessage, ToolMessage, } from "@langchain/core/messages"; import { BackedBlobStore, MediaBlob, MediaManager, ReadThroughBlobStore, SimpleWebBlobStore, } from "@langchain/google-common/experimental/utils/media_core"; import { GoogleCloudStorageUri } from "@langchain/google-common/experimental/media"; import { InMemoryStore } from "@langchain/core/stores"; import { GeminiTool } from "../types.js"; import { ChatGoogle } from "../chat_models.js"; import { BlobStoreGoogleCloudStorage } from "../media.js"; describe("GAuth Chat", () => { test("invoke", async () => { const model = new ChatGoogle(); try { const res = await model.invoke("What is 1 + 1?"); expect(res).toBeDefined(); expect(res._getType()).toEqual("ai"); const aiMessage = res as AIMessageChunk; expect(aiMessage.content).toBeDefined(); expect(typeof aiMessage.content).toBe("string"); const text = aiMessage.content as string; expect(text).toMatch(/(1 + 1 (equals|is|=) )?2.? ?/); /* expect(aiMessage.content.length).toBeGreaterThan(0); expect(aiMessage.content[0]).toBeDefined(); const content = aiMessage.content[0] as MessageContentComplex; expect(content).toHaveProperty("type"); expect(content.type).toEqual("text"); const textContent = content as MessageContentText; expect(textContent.text).toBeDefined(); expect(textContent.text).toEqual("2"); */ } catch (e) { console.error(e); throw e; } }); test("generate", async () => { const model = new ChatGoogle(); try { const messages: BaseMessage[] = [ new SystemMessage( "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails." ), new HumanMessage("Flip it"), new AIMessage("T"), new HumanMessage("Flip the coin again"), ]; const res = await model.predictMessages(messages); expect(res).toBeDefined(); expect(res._getType()).toEqual("ai"); const aiMessage = res as AIMessageChunk; expect(aiMessage.content).toBeDefined(); expect(typeof aiMessage.content).toBe("string"); const text = aiMessage.content as string; expect(["H", "T"]).toContainEqual(text); /* expect(aiMessage.content.length).toBeGreaterThan(0); expect(aiMessage.content[0]).toBeDefined(); const content = aiMessage.content[0] as MessageContentComplex; expect(content).toHaveProperty("type"); expect(content.type).toEqual("text"); const textContent = content as MessageContentText; expect(textContent.text).toBeDefined(); expect(["H", "T"]).toContainEqual(textContent.text); */ } catch (e) { console.error(e); throw e; } }); test("stream", async () => { const model = new ChatGoogle(); try { const input: BaseLanguageModelInput = new ChatPromptValue([ new SystemMessage( "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails." ), new HumanMessage("Flip it"), new AIMessage("T"), new HumanMessage("Flip the coin again"), ]); const res = await model.stream(input); const resArray: BaseMessageChunk[] = []; for await (const chunk of res) { resArray.push(chunk); } expect(resArray).toBeDefined(); expect(resArray.length).toBeGreaterThanOrEqual(1); const lastChunk = resArray[resArray.length - 1]; expect(lastChunk).toBeDefined(); expect(lastChunk._getType()).toEqual("ai"); const aiChunk = lastChunk as AIMessageChunk; console.log(aiChunk); console.log(JSON.stringify(resArray, null, 2)); } catch (e) { console.error(e); throw e; } }); test("function", async () => { const tools: GeminiTool[] = [ { functionDeclarations: [ { name: "test", description: "Run a test with a specific name and get if it passed or failed", parameters: { type: "object", properties: { testName: { type: "string", description: "The name of the test that should be run.", }, }, required: ["testName"], }, }, ], }, ]; const model = new ChatGoogle().bind({ tools }); const result = await model.invoke("Run a test on the cobalt project"); expect(result).toHaveProperty("content"); expect(result.content).toBe(""); const args = result?.lc_kwargs?.additional_kwargs; expect(args).toBeDefined(); expect(args).toHaveProperty("tool_calls"); expect(Array.isArray(args.tool_calls)).toBeTruthy(); expect(args.tool_calls).toHaveLength(1); const call = args.tool_calls[0]; expect(call).toHaveProperty("type"); expect(call.type).toBe("function"); expect(call).toHaveProperty("function"); const func = call.function; expect(func).toBeDefined(); expect(func).toHaveProperty("name"); expect(func.name).toBe("test"); expect(func).toHaveProperty("arguments"); expect(typeof func.arguments).toBe("string"); expect(func.arguments.replaceAll("\n", "")).toBe('{"testName":"cobalt"}'); }); test("function reply", async () => { const tools: GeminiTool[] = [ { functionDeclarations: [ { name: "test", description: "Run a test with a specific name and get if it passed or failed", parameters: { type: "object", properties: { testName: { type: "string", description: "The name of the test that should be run.", }, }, required: ["testName"], }, }, ], }, ]; const model = new ChatGoogle().bind({ tools }); const toolResult = { testPassed: true, }; const messages: BaseMessageLike[] = [ new HumanMessage("Run a test on the cobalt project."), new AIMessage("", { tool_calls: [ { id: "test", type: "function", function: { name: "test", arguments: '{"testName":"cobalt"}', }, }, ], }), new ToolMessage(JSON.stringify(toolResult), "test"), ]; const res = await model.stream(messages); const resArray: BaseMessageChunk[] = []; for await (const chunk of res) { resArray.push(chunk); } console.log(JSON.stringify(resArray, null, 2)); }); test("withStructuredOutput", async () => { const tool = { name: "get_weather", description: "Get the weather of a specific location and return the temperature in Celsius.", parameters: { type: "object", properties: { location: { type: "string", description: "The name of city to get the weather for.", }, }, required: ["location"], }, }; const model = new ChatGoogle().withStructuredOutput(tool); const result = await model.invoke("What is the weather in Paris?"); expect(result).toHaveProperty("location"); }); test("media - fileData", async () => { class MemStore extends InMemoryStore<MediaBlob> { get length() { return Object.keys(this.store).length; } } const aliasMemory = new MemStore(); const aliasStore = new BackedBlobStore({ backingStore: aliasMemory, defaultFetchOptions: { actionIfBlobMissing: undefined, }, }); const backingStore = new BlobStoreGoogleCloudStorage({ uriPrefix: new GoogleCloudStorageUri("gs://test-langchainjs/mediatest/"), defaultStoreOptions: { actionIfInvalid: "prefixPath", }, }); const blobStore = new ReadThroughBlobStore({ baseStore: aliasStore, backingStore, }); const resolver = new SimpleWebBlobStore(); const mediaManager = new MediaManager({ store: blobStore, resolvers: [resolver], }); const model = new ChatGoogle({ modelName: "gemini-1.5-flash", apiConfig: { mediaManager, }, }); const message: MessageContentComplex[] = [ { type: "text", text: "What is in this image?", }, { type: "media", fileUri: "https://js.langchain.com/v0.2/img/brand/wordmark.png", }, ]; const messages: BaseMessage[] = [ new HumanMessageChunk({ content: message }), ]; try { const res = await model.invoke(messages); console.log(res); expect(res).toBeDefined(); expect(res._getType()).toEqual("ai"); const aiMessage = res as AIMessageChunk; expect(aiMessage.content).toBeDefined(); expect(typeof aiMessage.content).toBe("string"); const text = aiMessage.content as string; expect(text).toMatch(/LangChain/); } catch (e) { console.error(e); throw e; } }); });
0
lc_public_repos/langchainjs/libs/langchain-google-gauth/src
lc_public_repos/langchainjs/libs/langchain-google-gauth/src/tests/media.int.test.ts
import fs from "fs/promises"; import { test } from "@jest/globals"; import { GoogleCloudStorageUri } from "@langchain/google-common/experimental/media"; import { MediaBlob } from "@langchain/google-common/experimental/utils/media_core"; import { BlobStoreGoogleCloudStorage, BlobStoreGoogleCloudStorageParams, } from "../media.js"; describe("GAuth GCS store", () => { test("save text no-metadata", async () => { const uriPrefix = new GoogleCloudStorageUri("gs://test-langchainjs/"); const uri = `gs://test-langchainjs/text/test-${Date.now()}-nm`; const content = "This is a test"; const blob = await MediaBlob.fromBlob( new Blob([content], { type: "text/plain" }), { path: uri, } ); const config: BlobStoreGoogleCloudStorageParams = { uriPrefix, }; const blobStore = new BlobStoreGoogleCloudStorage(config); const storedBlob = await blobStore.store(blob); // console.log(storedBlob); expect(storedBlob?.path).toEqual(uri); expect(await storedBlob?.asString()).toEqual(content); expect(storedBlob?.mimetype).toEqual("text/plain"); expect(storedBlob?.metadata).not.toHaveProperty("metadata"); expect(storedBlob?.size).toEqual(content.length); expect(storedBlob?.metadata?.kind).toEqual("storage#object"); }); test("save text with-metadata", async () => { const uriPrefix = new GoogleCloudStorageUri("gs://test-langchainjs/"); const uri = `gs://test-langchainjs/text/test-${Date.now()}-wm`; const content = "This is a test"; const blob = await MediaBlob.fromBlob( new Blob([content], { type: "text/plain" }), { path: uri, metadata: { alpha: "one", bravo: "two", }, } ); const config: BlobStoreGoogleCloudStorageParams = { uriPrefix, }; const blobStore = new BlobStoreGoogleCloudStorage(config); const storedBlob = await blobStore.store(blob); // console.log(storedBlob); expect(storedBlob?.path).toEqual(uri); expect(await storedBlob?.asString()).toEqual(content); expect(storedBlob?.mimetype).toEqual("text/plain"); expect(storedBlob?.metadata).toHaveProperty("metadata"); expect(storedBlob?.metadata?.metadata?.alpha).toEqual("one"); expect(storedBlob?.metadata?.metadata?.bravo).toEqual("two"); expect(storedBlob?.size).toEqual(content.length); expect(storedBlob?.metadata?.kind).toEqual("storage#object"); }); test("save image no-metadata", async () => { const filename = `src/tests/data/blue-square.png`; const dataBuffer = await fs.readFile(filename); const data = new Blob([dataBuffer], { type: "image/png" }); const uriPrefix = new GoogleCloudStorageUri("gs://test-langchainjs/"); const uri = `gs://test-langchainjs/image/test-${Date.now()}-nm`; const blob = await MediaBlob.fromBlob(data, { path: uri, }); const config: BlobStoreGoogleCloudStorageParams = { uriPrefix, }; const blobStore = new BlobStoreGoogleCloudStorage(config); const storedBlob = await blobStore.store(blob); // console.log(storedBlob); expect(storedBlob?.path).toEqual(uri); expect(storedBlob?.size).toEqual(176); expect(storedBlob?.mimetype).toEqual("image/png"); expect(storedBlob?.metadata?.kind).toEqual("storage#object"); }); test("get text no-metadata", async () => { const uriPrefix = new GoogleCloudStorageUri("gs://test-langchainjs/"); const uri: string = "gs://test-langchainjs/text/test-nm"; const config: BlobStoreGoogleCloudStorageParams = { uriPrefix, }; const blobStore = new BlobStoreGoogleCloudStorage(config); const blob = await blobStore.fetch(uri); // console.log(blob); expect(blob?.path).toEqual(uri); expect(await blob?.asString()).toEqual("This is a test"); expect(blob?.mimetype).toEqual("text/plain"); expect(blob?.metadata).not.toHaveProperty("metadata"); expect(blob?.size).toEqual(14); expect(blob?.metadata?.kind).toEqual("storage#object"); }); test("get text with-metadata", async () => { const uriPrefix = new GoogleCloudStorageUri("gs://test-langchainjs/"); const uri: string = "gs://test-langchainjs/text/test-wm"; const config: BlobStoreGoogleCloudStorageParams = { uriPrefix, }; const blobStore = new BlobStoreGoogleCloudStorage(config); const blob = await blobStore.fetch(uri); // console.log(blob); expect(blob?.path).toEqual(uri); expect(await blob?.asString()).toEqual("This is a test"); expect(blob?.mimetype).toEqual("text/plain"); expect(blob?.metadata).toHaveProperty("metadata"); expect(blob?.metadata?.metadata?.alpha).toEqual("one"); expect(blob?.metadata?.metadata?.bravo).toEqual("two"); expect(blob?.size).toEqual(14); expect(blob?.metadata?.kind).toEqual("storage#object"); }); test("get image no-metadata", async () => { const uriPrefix = new GoogleCloudStorageUri("gs://test-langchainjs/"); const uri: string = "gs://test-langchainjs/image/test-nm"; const config: BlobStoreGoogleCloudStorageParams = { uriPrefix, }; const blobStore = new BlobStoreGoogleCloudStorage(config); const blob = await blobStore.fetch(uri); // console.log(storedBlob); expect(blob?.path).toEqual(uri); expect(blob?.size).toEqual(176); expect(blob?.mimetype).toEqual("image/png"); expect(blob?.metadata?.kind).toEqual("storage#object"); }); });
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-exa/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": ["ES2021", "ES2022.Object", "DOM"], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": ["src/**/*"], "exclude": ["node_modules", "dist", "docs"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-exa/LICENSE
The MIT License Copyright (c) 2023 LangChain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-exa/jest.config.cjs
/** @type {import('ts-jest').JestConfigWithTsJest} */ module.exports = { preset: "ts-jest/presets/default-esm", testEnvironment: "./jest.env.cjs", modulePathIgnorePatterns: ["dist/", "docs/"], moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, transform: { "^.+\\.tsx?$": ["@swc/jest"], }, transformIgnorePatterns: [ "/node_modules/", "\\.pnp\\.[^\\/]+$", "./scripts/jest-setup-after-env.js", ], setupFiles: ["dotenv/config"], testTimeout: 20_000, passWithNoTests: true, collectCoverageFrom: ["src/**/*.ts"], };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-exa/jest.env.cjs
const { TestEnvironment } = require("jest-environment-node"); class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { constructor(config, context) { // Make `instanceof Float32Array` return true in tests // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 super(config, context); this.global.Float32Array = Float32Array; } } module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-exa/README.md
# @langchain/exa This package contains the LangChain.js integrations for exa through their SDK. ## Installation ```bash npm2yarn npm install @langchain/exa @langchain/core ``` ## Development To develop the exa package, you'll need to follow these instructions: ### Install dependencies ```bash yarn install ``` ### Build the package ```bash yarn build ``` Or from the repo root: ```bash yarn build --filter=@langchain/exa ``` ### Run tests Test files should live within a `tests/` file in the `src/` folder. Unit tests should end in `.test.ts` and integration tests should end in `.int.test.ts`: ```bash $ yarn test $ yarn test:int ``` ### Lint & Format Run the linter & formatter to ensure your code is up to standard: ```bash yarn lint && yarn format ``` ### Adding new entrypoints If you add a new file to be exported, either import & re-export from `src/index.ts`, or add it to the `entrypoints` field in the `config` variable located inside `langchain.config.js` and run `yarn build` to generate the new entrypoint.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-exa/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": [ "--workspaces-update=false" ] } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-exa/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof"], ignorePatterns: [ ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-exa/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals:[ /node\:/, /@langchain\/core\//, ], entrypoints: { index: "index", }, requiresOptionalDependency: [], tsConfigPath: resolve("./tsconfig.json"), cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-exa/package.json
{ "name": "@langchain/exa", "version": "0.1.0", "description": "Exa integration for LangChain.js", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-exa/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/exa", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "clean": "rm -rf .turbo dist/", "prepack": "yarn build", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "exa-js": "^1.0.12" }, "peerDependencies": { "@langchain/core": ">=0.2.21 <0.4.0" }, "devDependencies": { "@jest/globals": "^29.5.0", "@langchain/core": "workspace:*", "@langchain/scripts": ">=0.1.0 <0.2.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@tsconfig/recommended": "^1.0.3", "@types/uuid": "^9", "@typescript-eslint/eslint-plugin": "^6.12.0", "@typescript-eslint/parser": "^6.12.0", "dotenv": "^16.3.1", "dpdm": "^3.12.0", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "prettier": "^2.8.3", "release-it": "^17.6.0", "rollup": "^4.5.2", "ts-jest": "^29.1.0", "typescript": "<5.2.0" }, "publishConfig": { "access": "public" }, "exports": { ".": { "types": { "import": "./index.d.ts", "require": "./index.d.cts", "default": "./index.d.ts" }, "import": "./index.js", "require": "./index.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "index.cjs", "index.js", "index.d.ts", "index.d.cts" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-exa/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": ["node_modules", "dist", "docs", "**/tests"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-exa/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-exa/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/libs/langchain-exa
lc_public_repos/langchainjs/libs/langchain-exa/src/tools.ts
import { CallbackManagerForToolRun } from "@langchain/core/callbacks/manager"; import { Tool, type ToolParams } from "@langchain/core/tools"; import Exa, { ContentsOptions, RegularSearchOptions } from "exa-js"; /** * Options for the ExaSearchResults tool. */ export type ExaSearchRetrieverFields< T extends ContentsOptions = { text: true } > = ToolParams & { client: Exa.default; searchArgs?: RegularSearchOptions & T; }; /** * Exa search tool integration. * * Setup: * Install `@langchain/exa` and `exa-js`. You'll also need an API key. * * ```bash * npm install @langchain/exa exa-js * ``` * * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_exa.ExaSearchResults.html#constructor) * * <details open> * <summary><strong>Instantiate</strong></summary> * * ```typescript * import { ExaSearchResults } from "@langchain/exa"; * import Exa from "exa-js"; * * const client = new Exa(process.env.EXASEARCH_API_KEY); * * const tool = new ExaSearchResults({ * client, * searchArgs: { * numResults: 2, * }, * }); * ``` * </details> * * <br /> * * <details> * * <summary><strong>Invocation</strong></summary> * * ```typescript * await tool.invoke("what is the current weather in sf?"); * ``` * </details> * * <br /> * * <details> * * <summary><strong>Invocation with tool call</strong></summary> * * ```typescript * // This is usually generated by a model, but we'll create a tool call directly for demo purposes. * const modelGeneratedToolCall = { * args: { * input: "what is the current weather in sf?", * }, * id: "tool_call_id", * name: tool.name, * type: "tool_call", * }; * await tool.invoke(modelGeneratedToolCall); * ``` * * ```text * ToolMessage { * "content": "...", * "name": "exa_search_results_json", * "additional_kwargs": {}, * "response_metadata": {}, * "tool_call_id": "tool_call_id" * } * ``` * </details> */ export class ExaSearchResults< T extends ContentsOptions = { text: true } > extends Tool { static lc_name(): string { return "ExaSearchResults"; } description = "A wrapper around Exa Search. Input should be an Exa-optimized query. Output is a JSON array of the query results"; name = "exa_search_results_json"; private client: Exa.default; searchArgs?: RegularSearchOptions & T; constructor(fields: ExaSearchRetrieverFields<T>) { super(fields); this.client = fields.client; this.searchArgs = fields.searchArgs; } protected async _call( input: string, _runManager?: CallbackManagerForToolRun ): Promise<string> { return JSON.stringify( await this.client.searchAndContents<T>(input, this.searchArgs) ); } } export class ExaFindSimilarResults< T extends ContentsOptions = { text: true } > extends Tool { static lc_name(): string { return "ExaFindSimilarResults"; } description = "A wrapper around Exa Find Similar. Input should be an Exa-optimized query. Output is a JSON array of the query results"; name = "exa_find_similar_results_json"; private client: Exa.default; searchArgs?: RegularSearchOptions & T; constructor(fields: ExaSearchRetrieverFields<T>) { super(fields); this.client = fields.client; this.searchArgs = fields.searchArgs; } protected async _call( url: string, _runManager?: CallbackManagerForToolRun ): Promise<string> { return JSON.stringify( await this.client.findSimilarAndContents<T>(url, this.searchArgs) ); } }
0
lc_public_repos/langchainjs/libs/langchain-exa
lc_public_repos/langchainjs/libs/langchain-exa/src/retrievers.ts
import Exa, { ContentsOptions, RegularSearchOptions, SearchResponse, SearchResult, } from "exa-js"; import { BaseRetriever, BaseRetrieverInput } from "@langchain/core/retrievers"; import { Document } from "@langchain/core/documents"; /** * Interface for the fields required during the initialization of a * `ExaRetriever` instance. It extends the `BaseRetrieverInput` * interface and adds a `client` field of type `Exa`. */ export interface ExaRetrieverFields<T extends ContentsOptions = { text: true }> extends BaseRetrieverInput { client: Exa.default; searchArgs?: RegularSearchOptions & T; } export function _getMetadata<T extends ContentsOptions = { text: true }>( result: SearchResult<T> ): Record<string, unknown> { const newMetadata: Record<string, unknown> = { ...result }; delete newMetadata.text; return newMetadata; } /** * @example * ```typescript * const retriever = new ExaRetriever({ * client: new Exa( * process.env.EXA_API_KEY, * process.env.EXA_BASE_URL, * ), * }); * const docs = await retriever.getRelevantDocuments("hello"); * ``` */ export class ExaRetriever< T extends ContentsOptions = { text: true } > extends BaseRetriever { static lc_name() { return "ExaRetriever"; } lc_namespace = ["langchain", "retrievers", "exa"]; private client: Exa.default; searchArgs?: RegularSearchOptions & T; constructor(fields: ExaRetrieverFields<T>) { super(fields); this.client = fields.client; this.searchArgs = fields.searchArgs; } async _getRelevantDocuments(query: string): Promise<Document[]> { const res: SearchResponse<T> = await this.client.searchAndContents<T>( query, this.searchArgs ); const documents: Document[] = []; for (const result of res.results) { let pageContent; if ("text" in result) { pageContent = result.text; } else if ("highlights" in result) { pageContent = result.highlights.join("\n\n"); } else { pageContent = "No results found."; } documents.push( new Document({ pageContent, metadata: _getMetadata<T>(result), }) ); } return documents; } }
0
lc_public_repos/langchainjs/libs/langchain-exa
lc_public_repos/langchainjs/libs/langchain-exa/src/index.ts
export * from "./retrievers.js"; export * from "./tools.js";
0
lc_public_repos/langchainjs/libs/langchain-exa/src
lc_public_repos/langchainjs/libs/langchain-exa/src/tests/retrievers.test.ts
import { it, expect } from "@jest/globals"; import { SearchResult, TextContentsOptions } from "exa-js"; import { _getMetadata } from "../retrievers.js"; it("should exclude the text field from metadata", () => { const dummyResult: SearchResult<{ text: TextContentsOptions }> = { title: "title", url: "https://example.com", publishedDate: "01/01/2024", author: "me myself and i", score: 100, id: "very unique ID", text: "string!", }; const metadata = _getMetadata(dummyResult); expect("text" in metadata).toBe(false); });
0
lc_public_repos/langchainjs/libs/langchain-exa/src
lc_public_repos/langchainjs/libs/langchain-exa/src/tests/tools.int.test.ts
import { test, expect } from "@jest/globals"; import Exa from "exa-js"; import { ExaFindSimilarResults, ExaSearchResults } from "../tools.js"; test("ExaSearchResults can perform a search given a string query", async () => { const exaTool = new ExaSearchResults<{ text: true }>({ // @ts-expect-error type errors client: new Exa(), }); const toolData = await exaTool.invoke( "What does the AI company LangChain do?" ); const parsedData = JSON.parse(toolData); expect("results" in parsedData).toBeTruthy(); // console.log("results:", parsedData.results); expect(parsedData.results.length).toBeGreaterThan(0); }); test("ExaFindSimilarResults can perform a simalitaty search with a provided URL", async () => { const exaTool = new ExaFindSimilarResults<{ text: true }>({ // @ts-expect-error type errors client: new Exa(), }); const toolData = await exaTool.invoke("https://langchain.com"); const parsedData = JSON.parse(toolData); expect("results" in parsedData).toBeTruthy(); // console.log("results:", parsedData.results); expect(parsedData.results.length).toBeGreaterThan(0); });
0
lc_public_repos/langchainjs/libs/langchain-exa/src
lc_public_repos/langchainjs/libs/langchain-exa/src/tests/retrievers.int.test.ts
import { test, expect } from "@jest/globals"; import Exa from "exa-js"; import { ExaRetriever } from "../retrievers.js"; test("ExaRetriever can retrieve some data", async () => { const exaRetriever = new ExaRetriever<{ text: true }>({ // @ts-expect-error type errors client: new Exa(), }); const results = await exaRetriever.getRelevantDocuments( "What does the AI company LangChain do?" ); // console.log("results:", JSON.stringify(results, null, 2)); expect(results.length).toBeGreaterThan(0); // verify metadata fields are populated expect(results[0].metadata.url.length).toBeGreaterThan(1); expect(results[0].metadata.id.length).toBeGreaterThan(1); });
0
lc_public_repos/langchainjs/libs/langchain-exa
lc_public_repos/langchainjs/libs/langchain-exa/scripts/jest-setup-after-env.js
import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { afterAll, jest } from "@jest/globals"; afterAll(awaitAllCallbacks); // Allow console.log to be disabled in tests if (process.env.DISABLE_CONSOLE_LOGS === "true") { console.log = jest.fn(); }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-common/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": ["ES2021", "ES2022.Object", "DOM"], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": ["src/**/*"], "exclude": ["node_modules", "dist", "docs"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-common/LICENSE
The MIT License Copyright (c) 2023 LangChain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-common/jest.config.cjs
/** @type {import('ts-jest').JestConfigWithTsJest} */ module.exports = { preset: "ts-jest/presets/default-esm", testEnvironment: "./jest.env.cjs", modulePathIgnorePatterns: ["dist/", "docs/"], moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, transform: { "^.+\\.tsx?$": ["@swc/jest"], }, transformIgnorePatterns: [ "/node_modules/", "\\.pnp\\.[^\\/]+$", "./scripts/jest-setup-after-env.js", ], setupFiles: ["dotenv/config"], testTimeout: 20_000, };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-common/jest.env.cjs
const { TestEnvironment } = require("jest-environment-node"); class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { constructor(config, context) { // Make `instanceof Float32Array` return true in tests // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 super(config, context); this.global.Float32Array = Float32Array; } } module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-common/README.md
# LangChain google-common This package contains common resources to access Google AI/ML models and other Google services in an auth-independent way. AI/ML models are supported using the same interface no matter if you are using the Google AI Studio-based version of the model or the Google Cloud Vertex AI version of the model. ## Installation This is **not** a stand-alone package since it does not contain code to do authorization. Instead, you should install *one* of the following packages: * @langchain/google-gauth * @langchain/google-webauth See those packages for details about installation. This package does **not** depend on any Google library. Instead, it relies on REST calls to Google endpoints. This is deliberate to reduce (sometimes conflicting) dependencies and make it usable on platforms that do not include file storage. ## Google services supported * Gemini model through LLM and Chat classes (both through Google AI Studio and Google Cloud Vertex AI). Including: * Function/Tool support ## TODO Tasks and services still to be implemented: * PaLM Vertex AI support and backwards compatibility * PaLM MakerSuite support and backwards compatibility * Semantic Retrieval / AQA model * PaLM embeddings * Gemini embeddings * Multimodal embeddings * Vertex AI Search * Vertex AI Model Garden * Online prediction endpoints * Gemma * Google managed models * Claude * AI Studio Tuned Models * MakerSuite / Google Drive Hub * Google Cloud Vector Store
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-common/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": [ "--workspaces-update=false" ] } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-common/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof"], ignorePatterns: [ ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-common/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals: [/node\:/, /@langchain\/core\//], entrypoints: { index: "index", utils: "utils/index", types: "types", "experimental/media": "experimental/media", "experimental/utils/media_core": "experimental/utils/media_core", }, tsConfigPath: resolve("./tsconfig.json"), cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-common/package.json
{ "name": "@langchain/google-common", "version": "0.1.3", "description": "Core types and classes for Google services.", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-google-common/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/google-common", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "clean": "rm -rf .turbo dist/", "prepack": "yarn build", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "uuid": "^10.0.0", "zod-to-json-schema": "^3.22.4" }, "peerDependencies": { "@langchain/core": ">=0.2.21 <0.4.0" }, "devDependencies": { "@jest/globals": "^29.5.0", "@langchain/core": "workspace:*", "@langchain/scripts": ">=0.1.0 <0.2.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@tsconfig/recommended": "^1.0.3", "@typescript-eslint/eslint-plugin": "^6.12.0", "@typescript-eslint/parser": "^6.12.0", "dotenv": "^16.3.1", "dpdm": "^3.12.0", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "prettier": "^2.8.3", "release-it": "^17.6.0", "rollup": "^4.5.2", "ts-jest": "^29.1.0", "typescript": "<5.2.0", "zod": "^3.22.4" }, "publishConfig": { "access": "public" }, "exports": { ".": { "types": { "import": "./index.d.ts", "require": "./index.d.cts", "default": "./index.d.ts" }, "import": "./index.js", "require": "./index.cjs" }, "./utils": { "types": { "import": "./utils.d.ts", "require": "./utils.d.cts", "default": "./utils.d.ts" }, "import": "./utils.js", "require": "./utils.cjs" }, "./types": { "types": { "import": "./types.d.ts", "require": "./types.d.cts", "default": "./types.d.ts" }, "import": "./types.js", "require": "./types.cjs" }, "./experimental/media": { "types": { "import": "./experimental/media.d.ts", "require": "./experimental/media.d.cts", "default": "./experimental/media.d.ts" }, "import": "./experimental/media.js", "require": "./experimental/media.cjs" }, "./experimental/utils/media_core": { "types": { "import": "./experimental/utils/media_core.d.ts", "require": "./experimental/utils/media_core.d.cts", "default": "./experimental/utils/media_core.d.ts" }, "import": "./experimental/utils/media_core.js", "require": "./experimental/utils/media_core.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "index.cjs", "index.js", "index.d.ts", "index.d.cts", "utils.cjs", "utils.js", "utils.d.ts", "utils.d.cts", "types.cjs", "types.js", "types.d.ts", "types.d.cts", "experimental/media.cjs", "experimental/media.js", "experimental/media.d.ts", "experimental/media.d.cts", "experimental/utils/media_core.cjs", "experimental/utils/media_core.js", "experimental/utils/media_core.d.ts", "experimental/utils/media_core.d.cts" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-common/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": ["node_modules", "dist", "docs", "**/tests"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-common/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-common/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/libs/langchain-google-common
lc_public_repos/langchainjs/libs/langchain-google-common/src/connection.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; import { AsyncCaller, AsyncCallerCallOptions, } from "@langchain/core/utils/async_caller"; import { getRuntimeEnvironment } from "@langchain/core/utils/env"; import { BaseRunManager } from "@langchain/core/callbacks/manager"; import { BaseCallbackHandler } from "@langchain/core/callbacks/base"; import type { GoogleAIBaseLLMInput, GoogleConnectionParams, GooglePlatformType, GoogleResponse, GoogleLLMResponse, GoogleAIModelRequestParams, GoogleRawResponse, GoogleAIAPI, VertexModelFamily, GoogleAIAPIConfig, AnthropicAPIConfig, GeminiAPIConfig, } from "./types.js"; import { GoogleAbstractedClient, GoogleAbstractedClientOps, GoogleAbstractedClientOpsMethod, } from "./auth.js"; import { getGeminiAPI, modelToFamily, modelToPublisher, } from "./utils/index.js"; import { getAnthropicAPI } from "./utils/anthropic.js"; export abstract class GoogleConnection< CallOptions extends AsyncCallerCallOptions, ResponseType extends GoogleResponse > { caller: AsyncCaller; client: GoogleAbstractedClient; streaming: boolean; constructor( caller: AsyncCaller, client: GoogleAbstractedClient, streaming?: boolean ) { this.caller = caller; this.client = client; this.streaming = streaming ?? false; } abstract buildUrl(): Promise<string>; abstract buildMethod(): GoogleAbstractedClientOpsMethod; async _clientInfoHeaders(): Promise<Record<string, string>> { const { userAgent, clientLibraryVersion } = await this._getClientInfo(); return { "User-Agent": userAgent, "Client-Info": clientLibraryVersion, }; } async _getClientInfo(): Promise<{ userAgent: string; clientLibraryVersion: string; }> { const env = await getRuntimeEnvironment(); const langchain = env?.library ?? "langchain-js"; // TODO: Add an API for getting the current LangChain version const langchainVersion = "0"; const moduleName = await this._moduleName(); let clientLibraryVersion = `${langchain}/${langchainVersion}`; if (moduleName && moduleName.length) { clientLibraryVersion = `${clientLibraryVersion}-${moduleName}`; } return { userAgent: clientLibraryVersion, clientLibraryVersion: `${langchainVersion}-${moduleName}`, }; } async _moduleName(): Promise<string> { return this.constructor.name; } async additionalHeaders(): Promise<Record<string, string>> { return {}; } async _buildOpts( data: unknown | undefined, _options: CallOptions, requestHeaders: Record<string, string> = {} ): Promise<GoogleAbstractedClientOps> { const url = await this.buildUrl(); const method = this.buildMethod(); const infoHeaders = (await this._clientInfoHeaders()) ?? {}; const additionalHeaders = (await this.additionalHeaders()) ?? {}; const headers = { ...infoHeaders, ...additionalHeaders, ...requestHeaders, }; const opts: GoogleAbstractedClientOps = { url, method, headers, }; if (data && method === "POST") { opts.data = data; } if (this.streaming) { opts.responseType = "stream"; } else { opts.responseType = "json"; } return opts; } async _request( data: unknown | undefined, options: CallOptions, requestHeaders: Record<string, string> = {} ): Promise<ResponseType> { const opts = await this._buildOpts(data, options, requestHeaders); const callResponse = await this.caller.callWithOptions( { signal: options?.signal }, async () => this.client.request(opts) ); const response: unknown = callResponse; // Done for typecast safety, I guess return <ResponseType>response; } } export abstract class GoogleHostConnection< CallOptions extends AsyncCallerCallOptions, ResponseType extends GoogleResponse, AuthOptions > extends GoogleConnection<CallOptions, ResponseType> implements GoogleConnectionParams<AuthOptions> { // This does not default to a value intentionally. // Use the "platform" getter if you need this. platformType: GooglePlatformType | undefined; _endpoint: string | undefined; _location: string | undefined; apiVersion = "v1"; constructor( fields: GoogleConnectionParams<AuthOptions> | undefined, caller: AsyncCaller, client: GoogleAbstractedClient, streaming?: boolean ) { super(caller, client, streaming); this.caller = caller; this.platformType = fields?.platformType; this._endpoint = fields?.endpoint; this._location = fields?.location; this.apiVersion = fields?.apiVersion ?? this.apiVersion; this.client = client; } get platform(): GooglePlatformType { return this.platformType ?? this.computedPlatformType; } get computedPlatformType(): GooglePlatformType { return "gcp"; } get location(): string { return this._location ?? this.computedLocation; } get computedLocation(): string { return "us-central1"; } get endpoint(): string { return this._endpoint ?? this.computedEndpoint; } get computedEndpoint(): string { return `${this.location}-aiplatform.googleapis.com`; } buildMethod(): GoogleAbstractedClientOpsMethod { return "POST"; } } export abstract class GoogleRawConnection< CallOptions extends AsyncCallerCallOptions, AuthOptions > extends GoogleHostConnection<CallOptions, GoogleRawResponse, AuthOptions> { async _buildOpts( data: unknown | undefined, _options: CallOptions, requestHeaders: Record<string, string> = {} ): Promise<GoogleAbstractedClientOps> { const opts = await super._buildOpts(data, _options, requestHeaders); opts.responseType = "blob"; return opts; } } export abstract class GoogleAIConnection< CallOptions extends AsyncCallerCallOptions, InputType, AuthOptions, ResponseType extends GoogleResponse > extends GoogleHostConnection<CallOptions, ResponseType, AuthOptions> implements GoogleAIBaseLLMInput<AuthOptions> { model: string; modelName: string; client: GoogleAbstractedClient; _apiName?: string; apiConfig?: GoogleAIAPIConfig; constructor( fields: GoogleAIBaseLLMInput<AuthOptions> | undefined, caller: AsyncCaller, client: GoogleAbstractedClient, streaming?: boolean ) { super(fields, caller, client, streaming); this.client = client; this.modelName = fields?.model ?? fields?.modelName ?? this.model; this.model = this.modelName; this._apiName = fields?.apiName; this.apiConfig = { safetyHandler: fields?.safetyHandler, // For backwards compatibility ...fields?.apiConfig, }; } get modelFamily(): VertexModelFamily { return modelToFamily(this.model); } get modelPublisher(): string { return modelToPublisher(this.model); } get computedAPIName(): string { // At least at the moment, model publishers and APIs map the same return this.modelPublisher; } get apiName(): string { return this._apiName ?? this.computedAPIName; } get api(): GoogleAIAPI { switch (this.apiName) { case "google": return getGeminiAPI(this.apiConfig as GeminiAPIConfig); case "anthropic": return getAnthropicAPI(this.apiConfig as AnthropicAPIConfig); default: throw new Error(`Unknown API: ${this.apiName}`); } } get computedPlatformType(): GooglePlatformType { if (this.client.clientType === "apiKey") { return "gai"; } else { return "gcp"; } } get computedLocation(): string { switch (this.apiName) { case "google": return super.computedLocation; case "anthropic": return "us-east5"; default: throw new Error( `Unknown apiName: ${this.apiName}. Can't get location.` ); } } abstract buildUrlMethod(): Promise<string>; async buildUrlGenerativeLanguage(): Promise<string> { const method = await this.buildUrlMethod(); const url = `https://generativelanguage.googleapis.com/${this.apiVersion}/models/${this.model}:${method}`; return url; } async buildUrlVertex(): Promise<string> { const projectId = await this.client.getProjectId(); const method = await this.buildUrlMethod(); const publisher = this.modelPublisher; const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/publishers/${publisher}/models/${this.model}:${method}`; return url; } async buildUrl(): Promise<string> { switch (this.platform) { case "gai": return this.buildUrlGenerativeLanguage(); default: return this.buildUrlVertex(); } } abstract formatData( input: InputType, parameters: GoogleAIModelRequestParams ): Promise<unknown>; async request( input: InputType, parameters: GoogleAIModelRequestParams, options: CallOptions, runManager?: BaseRunManager ): Promise<ResponseType> { const moduleName = this.constructor.name; const streamingParameters: GoogleAIModelRequestParams = { ...parameters, streaming: this.streaming, }; const data = await this.formatData(input, streamingParameters); await runManager?.handleCustomEvent(`google-request-${moduleName}`, { data, parameters: streamingParameters, options, connection: { ...this, url: await this.buildUrl(), urlMethod: await this.buildUrlMethod(), modelFamily: this.modelFamily, modelPublisher: this.modelPublisher, computedPlatformType: this.computedPlatformType, }, }); const response = await this._request(data, options); await runManager?.handleCustomEvent(`google-response-${moduleName}`, { response, }); return response; } } export abstract class AbstractGoogleLLMConnection< MessageType, AuthOptions > extends GoogleAIConnection< BaseLanguageModelCallOptions, MessageType, AuthOptions, GoogleLLMResponse > { async buildUrlMethodGemini(): Promise<string> { return this.streaming ? "streamGenerateContent" : "generateContent"; } async buildUrlMethodClaude(): Promise<string> { return this.streaming ? "streamRawPredict" : "rawPredict"; } async buildUrlMethod(): Promise<string> { switch (this.modelFamily) { case "gemini": return this.buildUrlMethodGemini(); case "claude": return this.buildUrlMethodClaude(); default: throw new Error(`Unknown model family: ${this.modelFamily}`); } } async formatData( input: MessageType, parameters: GoogleAIModelRequestParams ): Promise<unknown> { return this.api.formatData(input, parameters); } } export interface GoogleCustomEventInfo { subEvent: string; module: string; } export abstract class GoogleRequestCallbackHandler extends BaseCallbackHandler { customEventInfo(eventName: string): GoogleCustomEventInfo { const names = eventName.split("-"); return { subEvent: names[1], module: names[2], }; } abstract handleCustomRequestEvent( eventName: string, eventInfo: GoogleCustomEventInfo, data: any, runId: string, tags?: string[], metadata?: Record<string, any> ): any; abstract handleCustomResponseEvent( eventName: string, eventInfo: GoogleCustomEventInfo, data: any, runId: string, tags?: string[], metadata?: Record<string, any> ): any; abstract handleCustomChunkEvent( eventName: string, eventInfo: GoogleCustomEventInfo, data: any, runId: string, tags?: string[], metadata?: Record<string, any> ): any; handleCustomEvent( eventName: string, data: any, runId: string, tags?: string[], metadata?: Record<string, any> ): any { if (!eventName) { return undefined; } const eventInfo = this.customEventInfo(eventName); switch (eventInfo.subEvent) { case "request": return this.handleCustomRequestEvent( eventName, eventInfo, data, runId, tags, metadata ); case "response": return this.handleCustomResponseEvent( eventName, eventInfo, data, runId, tags, metadata ); case "chunk": return this.handleCustomChunkEvent( eventName, eventInfo, data, runId, tags, metadata ); default: console.error( `Unexpected eventInfo for ${eventName} ${JSON.stringify( eventInfo, null, 1 )}` ); } } } export class GoogleRequestLogger extends GoogleRequestCallbackHandler { name: string = "GoogleRequestLogger"; log(eventName: string, data: any, tags?: string[]): undefined { const tagStr = tags ? `[${tags}]` : "[]"; console.log(`${eventName} ${tagStr} ${JSON.stringify(data, null, 1)}`); } handleCustomRequestEvent( eventName: string, _eventInfo: GoogleCustomEventInfo, data: any, _runId: string, tags?: string[], _metadata?: Record<string, any> ): any { this.log(eventName, data, tags); } handleCustomResponseEvent( eventName: string, _eventInfo: GoogleCustomEventInfo, data: any, _runId: string, tags?: string[], _metadata?: Record<string, any> ): any { this.log(eventName, data, tags); } handleCustomChunkEvent( eventName: string, _eventInfo: GoogleCustomEventInfo, data: any, _runId: string, tags?: string[], _metadata?: Record<string, any> ): any { this.log(eventName, data, tags); } } export class GoogleRequestRecorder extends GoogleRequestCallbackHandler { name = "GoogleRequestRecorder"; request: any = {}; response: any = {}; chunk: any[] = []; handleCustomRequestEvent( _eventName: string, _eventInfo: GoogleCustomEventInfo, data: any, _runId: string, _tags?: string[], _metadata?: Record<string, any> ): any { this.request = data; } handleCustomResponseEvent( _eventName: string, _eventInfo: GoogleCustomEventInfo, data: any, _runId: string, _tags?: string[], _metadata?: Record<string, any> ): any { this.response = data; } handleCustomChunkEvent( _eventName: string, _eventInfo: GoogleCustomEventInfo, data: any, _runId: string, _tags?: string[], _metadata?: Record<string, any> ): any { this.chunk.push(data); } }
0
lc_public_repos/langchainjs/libs/langchain-google-common
lc_public_repos/langchainjs/libs/langchain-google-common/src/auth.ts
import { ReadableJsonStream } from "./utils/stream.js"; import { GooglePlatformType } from "./types.js"; export type GoogleAbstractedClientOpsMethod = "GET" | "POST" | "DELETE"; export type GoogleAbstractedClientOpsResponseType = "json" | "stream" | "blob"; export type GoogleAbstractedClientOps = { url?: string; method?: GoogleAbstractedClientOpsMethod; headers?: Record<string, string>; data?: unknown; responseType?: GoogleAbstractedClientOpsResponseType; }; export interface GoogleAbstractedClient { request: (opts: GoogleAbstractedClientOps) => unknown; getProjectId: () => Promise<string>; get clientType(): string; } export abstract class GoogleAbstractedFetchClient implements GoogleAbstractedClient { abstract get clientType(): string; abstract getProjectId(): Promise<string>; abstract request(opts: GoogleAbstractedClientOps): unknown; async _buildData(res: Response, opts: GoogleAbstractedClientOps) { switch (opts.responseType) { case "json": return res.json(); case "stream": return new ReadableJsonStream(res.body); default: return res.blob(); } } async _request( url: string | undefined, opts: GoogleAbstractedClientOps, additionalHeaders: Record<string, string> ) { if (url == null) throw new Error("Missing URL"); const fetchOptions: { method?: string; headers: Record<string, string>; body?: string; } = { method: opts.method, headers: { "Content-Type": "application/json", ...(opts.headers ?? {}), ...(additionalHeaders ?? {}), }, }; if (opts.data !== undefined) { if (typeof opts.data === "string") { fetchOptions.body = opts.data; } else { fetchOptions.body = JSON.stringify(opts.data); } } const res = await fetch(url, fetchOptions); if (!res.ok) { const resText = await res.text(); const error = new Error( `Google request failed with status code ${res.status}: ${resText}` ); /* eslint-disable @typescript-eslint/no-explicit-any */ (error as any).response = res; (error as any).details = { url, opts, fetchOptions, result: res, }; /* eslint-enable @typescript-eslint/no-explicit-any */ throw error; } const data = await this._buildData(res, opts); return { data, config: {}, status: res.status, statusText: res.statusText, headers: res.headers, request: { responseURL: res.url }, }; } } export class ApiKeyGoogleAuth extends GoogleAbstractedFetchClient { apiKey: string; constructor(apiKey: string) { super(); this.apiKey = apiKey; } get clientType(): string { return "apiKey"; } getProjectId(): Promise<string> { throw new Error("APIs that require a project ID cannot use an API key"); // Perhaps we could implement this if needed: // https://cloud.google.com/docs/authentication/api-keys#get-info } request(opts: GoogleAbstractedClientOps): unknown { const authHeader = { "X-Goog-Api-Key": this.apiKey, }; return this._request(opts.url, opts, authHeader); } } export function aiPlatformScope(platform: GooglePlatformType): string[] { switch (platform) { case "gai": return ["https://www.googleapis.com/auth/generative-language"]; default: return ["https://www.googleapis.com/auth/cloud-platform"]; } } export function ensureAuthOptionScopes<AuthOptions>( authOption: AuthOptions | undefined, scopeProperty: string, scopesOrPlatform: string[] | GooglePlatformType | undefined ): AuthOptions { // If the property is already set, return it if (authOption && Object.hasOwn(authOption, scopeProperty)) { return authOption; } // Otherwise add it const scopes: string[] = Array.isArray(scopesOrPlatform) ? (scopesOrPlatform as string[]) : aiPlatformScope(scopesOrPlatform ?? "gcp"); return { [scopeProperty]: scopes, ...(authOption ?? {}), } as AuthOptions; }
0
lc_public_repos/langchainjs/libs/langchain-google-common
lc_public_repos/langchainjs/libs/langchain-google-common/src/types.ts
import type { BaseLLMParams } from "@langchain/core/language_models/llms"; import type { BaseChatModelCallOptions, BindToolsInput, } from "@langchain/core/language_models/chat_models"; import { BaseMessage, BaseMessageChunk, MessageContent, } from "@langchain/core/messages"; import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs"; import type { JsonStream } from "./utils/stream.js"; import { MediaManager } from "./experimental/utils/media_core.js"; import { AnthropicResponseData, AnthropicAPIConfig, } from "./types-anthropic.js"; export * from "./types-anthropic.js"; /** * Parameters needed to setup the client connection. * AuthOptions are something like GoogleAuthOptions (from google-auth-library) * or WebGoogleAuthOptions. */ export interface GoogleClientParams<AuthOptions> { authOptions?: AuthOptions; /** Some APIs allow an API key instead */ apiKey?: string; } /** * What platform is this running on? * gai - Google AI Studio / MakerSuite / Generative AI platform * gcp - Google Cloud Platform */ export type GooglePlatformType = "gai" | "gcp"; export interface GoogleConnectionParams<AuthOptions> extends GoogleClientParams<AuthOptions> { /** Hostname for the API call (if this is running on GCP) */ endpoint?: string; /** Region where the LLM is stored (if this is running on GCP) */ location?: string; /** The version of the API functions. Part of the path. */ apiVersion?: string; /** * What platform to run the service on. * If not specified, the class should determine this from other * means. Either way, the platform actually used will be in * the "platform" getter. */ platformType?: GooglePlatformType; } export const GoogleAISafetyCategory = { Harassment: "HARM_CATEGORY_HARASSMENT", HARASSMENT: "HARM_CATEGORY_HARASSMENT", HARM_CATEGORY_HARASSMENT: "HARM_CATEGORY_HARASSMENT", HateSpeech: "HARM_CATEGORY_HATE_SPEECH", HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH", HARM_CATEGORY_HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH", SexuallyExplicit: "HARM_CATEGORY_SEXUALLY_EXPLICIT", SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT", HARM_CATEGORY_SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT", Dangerous: "HARM_CATEGORY_DANGEROUS", DANGEROUS: "HARM_CATEGORY_DANGEROUS", HARM_CATEGORY_DANGEROUS: "HARM_CATEGORY_DANGEROUS", CivicIntegrity: "HARM_CATEGORY_CIVIC_INTEGRITY", CIVIC_INTEGRITY: "HARM_CATEGORY_CIVIC_INTEGRITY", HARM_CATEGORY_CIVIC_INTEGRITY: "HARM_CATEGORY_CIVIC_INTEGRITY", } as const; export type GoogleAISafetyCategory = (typeof GoogleAISafetyCategory)[keyof typeof GoogleAISafetyCategory]; export const GoogleAISafetyThreshold = { None: "BLOCK_NONE", NONE: "BLOCK_NONE", BLOCK_NONE: "BLOCK_NONE", Few: "BLOCK_ONLY_HIGH", FEW: "BLOCK_ONLY_HIGH", BLOCK_ONLY_HIGH: "BLOCK_ONLY_HIGH", Some: "BLOCK_MEDIUM_AND_ABOVE", SOME: "BLOCK_MEDIUM_AND_ABOVE", BLOCK_MEDIUM_AND_ABOVE: "BLOCK_MEDIUM_AND_ABOVE", Most: "BLOCK_LOW_AND_ABOVE", MOST: "BLOCK_LOW_AND_ABOVE", BLOCK_LOW_AND_ABOVE: "BLOCK_LOW_AND_ABOVE", Off: "OFF", OFF: "OFF", BLOCK_OFF: "OFF", } as const; export type GoogleAISafetyThreshold = (typeof GoogleAISafetyThreshold)[keyof typeof GoogleAISafetyThreshold]; export const GoogleAISafetyMethod = { Severity: "SEVERITY", Probability: "PROBABILITY", } as const; export type GoogleAISafetyMethod = (typeof GoogleAISafetyMethod)[keyof typeof GoogleAISafetyMethod]; export interface GoogleAISafetySetting { category: GoogleAISafetyCategory | string; threshold: GoogleAISafetyThreshold | string; method?: GoogleAISafetyMethod | string; // Just for Vertex AI? } export type GoogleAIResponseMimeType = "text/plain" | "application/json"; export interface GoogleAIModelParams { /** Model to use */ model?: string; /** * Model to use * Alias for `model` */ modelName?: string; /** Sampling temperature to use */ temperature?: number; /** * Maximum number of tokens to generate in the completion. */ maxOutputTokens?: number; /** * Top-p changes how the model selects tokens for output. * * Tokens are selected from most probable to least until the sum * of their probabilities equals the top-p value. * * For example, if tokens A, B, and C have a probability of * .3, .2, and .1 and the top-p value is .5, then the model will * select either A or B as the next token (using temperature). */ topP?: number; /** * Top-k changes how the model selects tokens for output. * * A top-k of 1 means the selected token is the most probable among * all tokens in the model’s vocabulary (also called greedy decoding), * while a top-k of 3 means that the next token is selected from * among the 3 most probable tokens (using temperature). */ topK?: number; stopSequences?: string[]; safetySettings?: GoogleAISafetySetting[]; convertSystemMessageToHumanContent?: boolean; /** * Available for `gemini-1.5-pro`. * The output format of the generated candidate text. * Supported MIME types: * - `text/plain`: Text output. * - `application/json`: JSON response in the candidates. * * @default "text/plain" */ responseMimeType?: GoogleAIResponseMimeType; /** * Whether or not to stream. * @default false */ streaming?: boolean; } export type GoogleAIToolType = BindToolsInput | GeminiTool; /** * The params which can be passed to the API at request time. */ export interface GoogleAIModelRequestParams extends GoogleAIModelParams { tools?: GoogleAIToolType[]; /** * Force the model to use tools in a specific way. * * | Mode | Description | * |----------|---------------------------------------------------------------------------------------------------------------------------------------------------------| * | "auto" | The default model behavior. The model decides whether to predict a function call or a natural language response. | * | "any" | The model must predict only function calls. To limit the model to a subset of functions, define the allowed function names in `allowed_function_names`. | * | "none" | The model must not predict function calls. This behavior is equivalent to a model request without any associated function declarations. | * | string | The string value must be one of the function names. This will force the model to predict the specified function call. | * * The tool configuration's "any" mode ("forced function calling") is supported for Gemini 1.5 Pro models only. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any tool_choice?: string | "auto" | "any" | "none" | Record<string, any>; /** * Allowed functions to call when the mode is "any". * If empty, any one of the provided functions are called. */ allowed_function_names?: string[]; } export interface GoogleAIBaseLLMInput<AuthOptions> extends BaseLLMParams, GoogleConnectionParams<AuthOptions>, GoogleAIModelParams, GoogleAISafetyParams, GoogleAIAPIParams {} export interface GoogleAIBaseLanguageModelCallOptions extends BaseChatModelCallOptions, GoogleAIModelRequestParams, GoogleAISafetyParams { /** * Whether or not to include usage data, like token counts * in the streamed response chunks. * @default true */ streamUsage?: boolean; } /** * Input to LLM class. */ export interface GoogleBaseLLMInput<AuthOptions> extends GoogleAIBaseLLMInput<AuthOptions> {} export interface GoogleResponse { // eslint-disable-next-line @typescript-eslint/no-explicit-any data: any; } export interface GoogleRawResponse extends GoogleResponse { data: Blob; } export interface GeminiPartText { text: string; } export interface GeminiPartInlineData { inlineData: { mimeType: string; data: string; }; } export interface GeminiPartFileData { fileData: { mimeType: string; fileUri: string; }; } // AI Studio only? export interface GeminiPartFunctionCall { functionCall: { name: string; args?: object; }; } // AI Studio Only? export interface GeminiPartFunctionResponse { functionResponse: { name: string; response: object; }; } export type GeminiPart = | GeminiPartText | GeminiPartInlineData | GeminiPartFileData | GeminiPartFunctionCall | GeminiPartFunctionResponse; export interface GeminiSafetySetting { category: string; threshold: string; } export type GeminiSafetyRating = { category: string; probability: string; } & Record<string, unknown>; // The "system" content appears to only be valid in the systemInstruction export type GeminiRole = "system" | "user" | "model" | "function"; export interface GeminiContent { parts: GeminiPart[]; role: GeminiRole; // Vertex AI requires the role } export interface GeminiTool { functionDeclarations?: GeminiFunctionDeclaration[]; } export interface GeminiFunctionDeclaration { name: string; description: string; parameters?: GeminiFunctionSchema; } export interface GeminiFunctionSchema { type: GeminiFunctionSchemaType; format?: string; description?: string; nullable?: boolean; enum?: string[]; properties?: Record<string, GeminiFunctionSchema>; required?: string[]; items?: GeminiFunctionSchema; } export type GeminiFunctionSchemaType = | "string" | "number" | "integer" | "boolean" | "array" | "object"; export interface GeminiGenerationConfig { stopSequences?: string[]; candidateCount?: number; maxOutputTokens?: number; temperature?: number; topP?: number; topK?: number; responseMimeType?: GoogleAIResponseMimeType; } export interface GeminiRequest { contents?: GeminiContent[]; systemInstruction?: GeminiContent; tools?: GeminiTool[]; toolConfig?: { functionCallingConfig: { mode: "auto" | "any" | "none"; allowedFunctionNames?: string[]; }; }; safetySettings?: GeminiSafetySetting[]; generationConfig?: GeminiGenerationConfig; } interface GeminiResponseCandidate { content: { parts: GeminiPart[]; role: string; }; finishReason: string; index: number; tokenCount?: number; safetyRatings: GeminiSafetyRating[]; } interface GeminiResponsePromptFeedback { blockReason?: string; safetyRatings: GeminiSafetyRating[]; } export interface GenerateContentResponseData { candidates: GeminiResponseCandidate[]; promptFeedback: GeminiResponsePromptFeedback; usageMetadata: Record<string, unknown>; } export type GoogleLLMModelFamily = null | "palm" | "gemini"; export type VertexModelFamily = GoogleLLMModelFamily | "claude"; export type GoogleLLMResponseData = | JsonStream | GenerateContentResponseData | GenerateContentResponseData[]; export interface GoogleLLMResponse extends GoogleResponse { data: GoogleLLMResponseData | AnthropicResponseData; } export interface GoogleAISafetyHandler { /** * A function that will take a response and return the, possibly modified, * response or throw an exception if there are safety issues. * * @throws GoogleAISafetyError */ handle(response: GoogleLLMResponse): GoogleLLMResponse; } export interface GoogleAISafetyParams { safetyHandler?: GoogleAISafetyHandler; } export type GeminiJsonSchema = Record<string, unknown> & { properties?: Record<string, GeminiJsonSchema>; type: GeminiFunctionSchemaType; }; export interface GeminiJsonSchemaDirty extends GeminiJsonSchema { items?: GeminiJsonSchemaDirty; properties?: Record<string, GeminiJsonSchemaDirty>; additionalProperties?: boolean; } export type GoogleAIAPI = { messageContentToParts?: (content: MessageContent) => Promise<GeminiPart[]>; baseMessageToContent?: ( message: BaseMessage, prevMessage: BaseMessage | undefined, useSystemInstruction: boolean ) => Promise<GeminiContent[]>; responseToString: (response: GoogleLLMResponse) => string; responseToChatGeneration: ( response: GoogleLLMResponse ) => ChatGenerationChunk | null; chunkToString: (chunk: BaseMessageChunk) => string; responseToBaseMessage: (response: GoogleLLMResponse) => BaseMessage; responseToChatResult: (response: GoogleLLMResponse) => ChatResult; formatData: ( input: unknown, parameters: GoogleAIModelRequestParams ) => Promise<unknown>; }; export interface GeminiAPIConfig { safetyHandler?: GoogleAISafetyHandler; mediaManager?: MediaManager; useSystemInstruction?: boolean; } export type GoogleAIAPIConfig = GeminiAPIConfig | AnthropicAPIConfig; export interface GoogleAIAPIParams { apiName?: string; apiConfig?: GoogleAIAPIConfig; }