index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-redis/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof"], ignorePatterns: [ ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-redis/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals: [/node\:/, /@langchain\/core\//], entrypoints: { index: "index", }, requiresOptionalDependency: [], tsConfigPath: resolve("./tsconfig.json"), cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-redis/package.json
{ "name": "@langchain/redis", "version": "0.1.0", "description": "Sample integration for LangChain.js", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-redis/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/redis", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "clean": "rm -rf .turbo dist/", "prepack": "yarn build", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "redis": "^4.6.13" }, "peerDependencies": { "@langchain/core": ">=0.2.21 <0.4.0" }, "devDependencies": { "@faker-js/faker": "^8.4.0", "@jest/globals": "^29.5.0", "@langchain/core": "workspace:*", "@langchain/scripts": ">=0.1.0 <0.2.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@tsconfig/recommended": "^1.0.3", "@types/uuid": "^9", "@typescript-eslint/eslint-plugin": "^6.12.0", "@typescript-eslint/parser": "^6.12.0", "dotenv": "^16.3.1", "dpdm": "^3.12.0", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "prettier": "^2.8.3", "release-it": "^17.6.0", "rollup": "^4.5.2", "ts-jest": "^29.1.0", "typescript": "<5.2.0", "uuid": "^10.0.0" }, "publishConfig": { "access": "public" }, "exports": { ".": { "types": { "import": "./index.d.ts", "require": "./index.d.cts", "default": "./index.d.ts" }, "import": "./index.js", "require": "./index.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "index.cjs", "index.js", "index.d.ts", "index.d.cts" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-redis/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": ["node_modules", "dist", "docs", "**/tests"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-redis/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-redis/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/libs/langchain-redis
lc_public_repos/langchainjs/libs/langchain-redis/src/chat_histories.ts
import { RedisClientOptions, RedisClientType, RedisModules, RedisFunctions, RedisScripts, } from "redis"; import { BaseListChatMessageHistory } from "@langchain/core/chat_history"; import { BaseMessage, mapChatMessagesToStoredMessages, mapStoredMessagesToChatMessages, } from "@langchain/core/messages"; import { pool } from "./connections.js"; /** * Type for the input to the `RedisChatMessageHistory` constructor. */ export type RedisChatMessageHistoryInput = { sessionId: string; sessionTTL?: number; config?: RedisClientOptions; // Typing issues with createClient output: https://github.com/redis/node-redis/issues/1865 // eslint-disable-next-line @typescript-eslint/no-explicit-any client?: any; }; /** * Class for storing chat message history using Redis. Extends the * `BaseListChatMessageHistory` class. * @example * ```typescript * const chatHistory = new RedisChatMessageHistory({ * sessionId: new Date().toISOString(), * sessionTTL: 300, * url: "redis: * }); * * const chain = new ConversationChain({ * llm: new ChatOpenAI({ modelName: "gpt-3.5-turbo", temperature: 0 }), * memory: { chatHistory }, * }); * * const response = await chain.invoke({ * input: "What did I just say my name was?", * }); * console.log({ response }); * ``` */ export class RedisChatMessageHistory extends BaseListChatMessageHistory { lc_namespace = ["langchain", "stores", "message", "redis"]; get lc_secrets() { return { "config.url": "REDIS_URL", "config.username": "REDIS_USERNAME", "config.password": "REDIS_PASSWORD", }; } public client: RedisClientType<RedisModules, RedisFunctions, RedisScripts>; private sessionId: string; private sessionTTL?: number; constructor(fields: RedisChatMessageHistoryInput) { super(fields); const { sessionId, sessionTTL, config, client } = fields; this.client = (client ?? pool.getClient(config)) as RedisClientType< RedisModules, RedisFunctions, RedisScripts >; this.sessionId = sessionId; this.sessionTTL = sessionTTL; } /** * Ensures the Redis client is ready to perform operations. If the client * is not ready, it attempts to connect to the Redis database. * @returns Promise resolving to true when the client is ready. */ async ensureReadiness() { if (!this.client.isReady) { await this.client.connect(); } return true; } /** * Retrieves all chat messages from the Redis database for the current * session. * @returns Promise resolving to an array of `BaseMessage` instances. */ async getMessages(): Promise<BaseMessage[]> { await this.ensureReadiness(); const rawStoredMessages = await this.client.lRange(this.sessionId, 0, -1); const orderedMessages = rawStoredMessages .reverse() .map((message) => JSON.parse(message)); return mapStoredMessagesToChatMessages(orderedMessages); } /** * Adds a new chat message to the Redis database for the current session. * @param message The `BaseMessage` instance to add. * @returns Promise resolving when the message has been added. */ async addMessage(message: BaseMessage): Promise<void> { await this.ensureReadiness(); const messageToAdd = mapChatMessagesToStoredMessages([message]); await this.client.lPush(this.sessionId, JSON.stringify(messageToAdd[0])); if (this.sessionTTL) { await this.client.expire(this.sessionId, this.sessionTTL); } } /** * Deletes all chat messages from the Redis database for the current * session. * @returns Promise resolving when the messages have been deleted. */ async clear(): Promise<void> { await this.ensureReadiness(); await this.client.del(this.sessionId); } }
0
lc_public_repos/langchainjs/libs/langchain-redis
lc_public_repos/langchainjs/libs/langchain-redis/src/vectorstores.ts
import type { createCluster, createClient, RediSearchSchema, SearchOptions, } from "redis"; import { SchemaFieldTypes, VectorAlgorithms } from "redis"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; // Adapated from internal redis types which aren't exported /** * Type for creating a schema vector field. It includes the algorithm, * distance metric, and initial capacity. */ export type CreateSchemaVectorField< T extends VectorAlgorithms, A extends Record<string, unknown> > = { ALGORITHM: T; DISTANCE_METRIC: "L2" | "IP" | "COSINE"; INITIAL_CAP?: number; } & A; /** * Type for creating a flat schema vector field. It extends * CreateSchemaVectorField with a block size property. */ export type CreateSchemaFlatVectorField = CreateSchemaVectorField< VectorAlgorithms.FLAT, { BLOCK_SIZE?: number; } >; /** * Type for creating a HNSW schema vector field. It extends * CreateSchemaVectorField with M, EF_CONSTRUCTION, and EF_RUNTIME * properties. */ export type CreateSchemaHNSWVectorField = CreateSchemaVectorField< VectorAlgorithms.HNSW, { M?: number; EF_CONSTRUCTION?: number; EF_RUNTIME?: number; } >; type CreateIndexOptions = NonNullable< Parameters<ReturnType<typeof createClient>["ft"]["create"]>[3] >; export type RedisSearchLanguages = `${NonNullable< CreateIndexOptions["LANGUAGE"] >}`; export type RedisVectorStoreIndexOptions = Omit< CreateIndexOptions, "LANGUAGE" > & { LANGUAGE?: RedisSearchLanguages }; /** * Interface for the configuration of the RedisVectorStore. It includes * the Redis client, index name, index options, key prefix, content key, * metadata key, vector key, and filter. */ export interface RedisVectorStoreConfig { redisClient: | ReturnType<typeof createClient> | ReturnType<typeof createCluster>; indexName: string; indexOptions?: CreateSchemaFlatVectorField | CreateSchemaHNSWVectorField; createIndexOptions?: Omit<RedisVectorStoreIndexOptions, "PREFIX">; // PREFIX must be set with keyPrefix keyPrefix?: string; contentKey?: string; metadataKey?: string; vectorKey?: string; filter?: RedisVectorStoreFilterType; } /** * Interface for the options when adding documents to the * RedisVectorStore. It includes keys and batch size. */ export interface RedisAddOptions { keys?: string[]; batchSize?: number; } /** * Type for the filter used in the RedisVectorStore. It is an array of * strings. * If a string is passed instead of an array the value is used directly, this * allows custom filters to be passed. */ export type RedisVectorStoreFilterType = string[] | string; /** * Class representing a RedisVectorStore. It extends the VectorStore class * and includes methods for adding documents and vectors, performing * similarity searches, managing the index, and more. */ export class RedisVectorStore extends VectorStore { declare FilterType: RedisVectorStoreFilterType; private redisClient: | ReturnType<typeof createClient> | ReturnType<typeof createCluster>; indexName: string; indexOptions: CreateSchemaFlatVectorField | CreateSchemaHNSWVectorField; createIndexOptions: CreateIndexOptions; keyPrefix: string; contentKey: string; metadataKey: string; vectorKey: string; filter?: RedisVectorStoreFilterType; _vectorstoreType(): string { return "redis"; } constructor( embeddings: EmbeddingsInterface, _dbConfig: RedisVectorStoreConfig ) { super(embeddings, _dbConfig); this.redisClient = _dbConfig.redisClient; this.indexName = _dbConfig.indexName; this.indexOptions = _dbConfig.indexOptions ?? { ALGORITHM: VectorAlgorithms.HNSW, DISTANCE_METRIC: "COSINE", }; this.keyPrefix = _dbConfig.keyPrefix ?? `doc:${this.indexName}:`; this.contentKey = _dbConfig.contentKey ?? "content"; this.metadataKey = _dbConfig.metadataKey ?? "metadata"; this.vectorKey = _dbConfig.vectorKey ?? "content_vector"; this.filter = _dbConfig.filter; this.createIndexOptions = { ON: "HASH", PREFIX: this.keyPrefix, ...(_dbConfig.createIndexOptions as CreateIndexOptions), }; } /** * Method for adding documents to the RedisVectorStore. It first converts * the documents to texts and then adds them as vectors. * @param documents The documents to add. * @param options Optional parameters for adding the documents. * @returns A promise that resolves when the documents have been added. */ async addDocuments(documents: Document[], options?: RedisAddOptions) { const texts = documents.map(({ pageContent }) => pageContent); return this.addVectors( await this.embeddings.embedDocuments(texts), documents, options ); } /** * Method for adding vectors to the RedisVectorStore. It checks if the * index exists and creates it if it doesn't, then adds the vectors in * batches. * @param vectors The vectors to add. * @param documents The documents associated with the vectors. * @param keys Optional keys for the vectors. * @param batchSize The size of the batches in which to add the vectors. Defaults to 1000. * @returns A promise that resolves when the vectors have been added. */ async addVectors( vectors: number[][], documents: Document[], { keys, batchSize = 1000 }: RedisAddOptions = {} ) { if (!vectors.length || !vectors[0].length) { throw new Error("No vectors provided"); } // check if the index exists and create it if it doesn't await this.createIndex(vectors[0].length); const info = await this.redisClient.ft.info(this.indexName); const lastKeyCount = parseInt(info.numDocs, 10) || 0; const multi = this.redisClient.multi(); vectors.map(async (vector, idx) => { const key = keys && keys.length ? keys[idx] : `${this.keyPrefix}${idx + lastKeyCount}`; const metadata = documents[idx] && documents[idx].metadata ? documents[idx].metadata : {}; multi.hSet(key, { [this.vectorKey]: this.getFloat32Buffer(vector), [this.contentKey]: documents[idx].pageContent, [this.metadataKey]: this.escapeSpecialChars(JSON.stringify(metadata)), }); // write batch if (idx % batchSize === 0) { await multi.exec(); } }); // insert final batch await multi.exec(); } /** * Method for performing a similarity search in the RedisVectorStore. It * returns the documents and their scores. * @param query The query vector. * @param k The number of nearest neighbors to return. * @param filter Optional filter to apply to the search. * @returns A promise that resolves to an array of documents and their scores. */ async similaritySearchVectorWithScore( query: number[], k: number, filter?: RedisVectorStoreFilterType ): Promise<[Document, number][]> { if (filter && this.filter) { throw new Error("cannot provide both `filter` and `this.filter`"); } const _filter = filter ?? this.filter; const results = await this.redisClient.ft.search( this.indexName, ...this.buildQuery(query, k, _filter) ); const result: [Document, number][] = []; if (results.total) { for (const res of results.documents) { if (res.value) { const document = res.value; if (document.vector_score) { result.push([ new Document({ pageContent: (document[this.contentKey] ?? "") as string, metadata: JSON.parse( this.unEscapeSpecialChars( (document.metadata ?? "{}") as string ) ), }), Number(document.vector_score), ]); } } } } return result; } /** * Static method for creating a new instance of RedisVectorStore from * texts. It creates documents from the texts and metadata, then adds them * to the RedisVectorStore. * @param texts The texts to add. * @param metadatas The metadata associated with the texts. * @param embeddings The embeddings to use. * @param dbConfig The configuration for the RedisVectorStore. * @param docsOptions The document options to use. * @returns A promise that resolves to a new instance of RedisVectorStore. */ static fromTexts( texts: string[], metadatas: object[] | object, embeddings: EmbeddingsInterface, dbConfig: RedisVectorStoreConfig, docsOptions?: RedisAddOptions ): Promise<RedisVectorStore> { const docs: Document[] = []; for (let i = 0; i < texts.length; i += 1) { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; const newDoc = new Document({ pageContent: texts[i], metadata, }); docs.push(newDoc); } return RedisVectorStore.fromDocuments( docs, embeddings, dbConfig, docsOptions ); } /** * Static method for creating a new instance of RedisVectorStore from * documents. It adds the documents to the RedisVectorStore. * @param docs The documents to add. * @param embeddings The embeddings to use. * @param dbConfig The configuration for the RedisVectorStore. * @param docsOptions The document options to use. * @returns A promise that resolves to a new instance of RedisVectorStore. */ static async fromDocuments( docs: Document[], embeddings: EmbeddingsInterface, dbConfig: RedisVectorStoreConfig, docsOptions?: RedisAddOptions ): Promise<RedisVectorStore> { const instance = new this(embeddings, dbConfig); await instance.addDocuments(docs, docsOptions); return instance; } /** * Method for checking if an index exists in the RedisVectorStore. * @returns A promise that resolves to a boolean indicating whether the index exists. */ async checkIndexExists() { try { await this.redisClient.ft.info(this.indexName); } catch (err) { // eslint-disable-next-line @typescript-eslint/no-explicit-any if ((err as any)?.message.includes("unknown command")) { throw new Error( "Failed to run FT.INFO command. Please ensure that you are running a RediSearch-capable Redis instance: https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/redis#setup" ); } // index doesn't exist return false; } return true; } /** * Method for creating an index in the RedisVectorStore. If the index * already exists, it does nothing. * @param dimensions The dimensions of the index * @returns A promise that resolves when the index has been created. */ async createIndex(dimensions = 1536): Promise<void> { if (await this.checkIndexExists()) { return; } const schema: RediSearchSchema = { [this.vectorKey]: { type: SchemaFieldTypes.VECTOR, TYPE: "FLOAT32", DIM: dimensions, ...this.indexOptions, }, [this.contentKey]: SchemaFieldTypes.TEXT, [this.metadataKey]: SchemaFieldTypes.TEXT, }; await this.redisClient.ft.create( this.indexName, schema, this.createIndexOptions ); } /** * Method for dropping an index from the RedisVectorStore. * @param deleteDocuments Optional boolean indicating whether to drop the associated documents. * @returns A promise that resolves to a boolean indicating whether the index was dropped. */ async dropIndex(deleteDocuments?: boolean): Promise<boolean> { try { const options = deleteDocuments ? { DD: deleteDocuments } : undefined; await this.redisClient.ft.dropIndex(this.indexName, options); return true; } catch (err) { return false; } } /** * Deletes vectors from the vector store. * @param params The parameters for deleting vectors. * @returns A promise that resolves when the vectors have been deleted. */ async delete(params: { deleteAll: boolean }): Promise<void> { if (params.deleteAll) { await this.dropIndex(true); } else { throw new Error(`Invalid parameters passed to "delete".`); } } private buildQuery( query: number[], k: number, filter?: RedisVectorStoreFilterType ): [string, SearchOptions] { const vectorScoreField = "vector_score"; let hybridFields = "*"; // if a filter is set, modify the hybrid query if (filter && filter.length) { // `filter` is a list of strings, then it's applied using the OR operator in the metadata key // for example: filter = ['foo', 'bar'] => this will filter all metadata containing either 'foo' OR 'bar' hybridFields = `@${this.metadataKey}:(${this.prepareFilter(filter)})`; } const baseQuery = `${hybridFields} => [KNN ${k} @${this.vectorKey} $vector AS ${vectorScoreField}]`; const returnFields = [this.metadataKey, this.contentKey, vectorScoreField]; const options: SearchOptions = { PARAMS: { vector: this.getFloat32Buffer(query), }, RETURN: returnFields, SORTBY: vectorScoreField, DIALECT: 2, LIMIT: { from: 0, size: k, }, }; return [baseQuery, options]; } private prepareFilter(filter: RedisVectorStoreFilterType) { if (Array.isArray(filter)) { return filter.map(this.escapeSpecialChars).join("|"); } return filter; } /** * Escapes all '-', ':', and '"' characters. * RediSearch considers these all as special characters, so we need * to escape them * @see https://redis.io/docs/stack/search/reference/query_syntax * * @param str * @returns */ private escapeSpecialChars(str: string) { return str .replaceAll("-", "\\-") .replaceAll(":", "\\:") .replaceAll(`"`, `\\"`); } /** * Unescapes all '-', ':', and '"' characters, returning the original string * * @param str * @returns */ private unEscapeSpecialChars(str: string) { return str .replaceAll("\\-", "-") .replaceAll("\\:", ":") .replaceAll(`\\"`, `"`); } /** * Converts the vector to the buffer Redis needs to * correctly store an embedding * * @param vector * @returns Buffer */ private getFloat32Buffer(vector: number[]) { return Buffer.from(new Float32Array(vector).buffer); } }
0
lc_public_repos/langchainjs/libs/langchain-redis
lc_public_repos/langchainjs/libs/langchain-redis/src/caches.ts
import type { createCluster, createClient } from "redis"; import { BaseCache, deserializeStoredGeneration, getCacheKey, serializeGeneration, } from "@langchain/core/caches"; import { Generation } from "@langchain/core/outputs"; /** * Represents the type of the Redis client used to interact with the Redis * database. */ type RedisClientType = | ReturnType<typeof createClient> | ReturnType<typeof createCluster>; /** * @deprecated Import from "@langchain/community/caches/ioredis" instead. * Represents a specific implementation of a caching mechanism using Redis * as the underlying storage system. It extends the `BaseCache` class and * overrides its methods to provide the Redis-specific logic. * * @example * ```typescript * const model = new ChatOpenAI({ * cache: new RedisCache(new Redis(), { ttl: 60 }), * }); * * // Invoke the model to perform an action * const response = await model.invoke("Do something random!"); * console.log(response); * ``` */ export class RedisCache extends BaseCache { private redisClient: RedisClientType; constructor(redisClient: RedisClientType) { super(); this.redisClient = redisClient; } /** * Retrieves data from the cache. It constructs a cache key from the given * `prompt` and `llmKey`, and retrieves the corresponding value from the * Redis database. * * @param prompt The prompt used to construct the cache key. * @param llmKey The LLM key used to construct the cache key. * @returns An array of Generations if found, null otherwise. */ public async lookup(prompt: string, llmKey: string) { let idx = 0; let key = getCacheKey(prompt, llmKey, String(idx)); let value = await this.redisClient.get(key); const generations: Generation[] = []; while (value) { const storedGeneration = JSON.parse(value); generations.push(deserializeStoredGeneration(storedGeneration)); idx += 1; key = getCacheKey(prompt, llmKey, String(idx)); value = await this.redisClient.get(key); } return generations.length > 0 ? generations : null; } /** * Updates the cache with new data. It constructs a cache key from the * given `prompt` and `llmKey`, and stores the `value` in the Redis * database. * @param prompt The prompt used to construct the cache key. * @param llmKey The LLM key used to construct the cache key. * @param value The value to be stored in the cache. */ public async update(prompt: string, llmKey: string, value: Generation[]) { for (let i = 0; i < value.length; i += 1) { const key = getCacheKey(prompt, llmKey, String(i)); await this.redisClient.set( key, JSON.stringify(serializeGeneration(value[i])) ); } } }
0
lc_public_repos/langchainjs/libs/langchain-redis
lc_public_repos/langchainjs/libs/langchain-redis/src/connections.ts
import { createClient, RedisClientOptions } from "redis"; // A minimalistic connection pool to avoid creating multiple connections class RedisConnectionPool { clients = new Map(); getClient(config: RedisClientOptions = {}) { if (!this.clients.has(config)) this.clients.set(config, createClient(config)); return this.clients.get(config); } } export const pool = new RedisConnectionPool();
0
lc_public_repos/langchainjs/libs/langchain-redis
lc_public_repos/langchainjs/libs/langchain-redis/src/index.ts
export * from "./chat_histories.js"; export * from "./vectorstores.js"; export * from "./caches.js";
0
lc_public_repos/langchainjs/libs/langchain-redis/src
lc_public_repos/langchainjs/libs/langchain-redis/src/tests/vectorstores.test.ts
/* eslint-disable @typescript-eslint/no-explicit-any */ import { jest, test, expect, describe } from "@jest/globals"; import { FakeEmbeddings } from "@langchain/core/utils/testing"; import { RedisVectorStore } from "../vectorstores.js"; const createRedisClientMockup = () => { const hSetMock = jest.fn(); return { ft: { info: jest.fn<any>().mockResolvedValue({ numDocs: 0, }), create: jest.fn(), search: jest.fn<any>().mockResolvedValue({ total: 0, documents: [], }), dropIndex: jest.fn(), }, hSet: hSetMock, multi: jest.fn<any>().mockImplementation(() => ({ exec: jest.fn(), hSet: hSetMock, })), }; }; test("RedisVectorStore with external keys", async () => { const client = createRedisClientMockup(); const embeddings = new FakeEmbeddings(); const store = new RedisVectorStore(embeddings, { redisClient: client as any, indexName: "documents", }); expect(store).toBeDefined(); await store.addDocuments( [ { pageContent: "hello", metadata: { a: 1, b: { nested: [1, { a: 4 }] }, }, }, ], { keys: ["id1"] } ); expect(client.hSet).toHaveBeenCalledTimes(1); expect(client.hSet).toHaveBeenCalledWith("id1", { content_vector: Buffer.from(new Float32Array([0.1, 0.2, 0.3, 0.4]).buffer), content: "hello", metadata: `{\\"a\\"\\:1,\\"b\\"\\:{\\"nested\\"\\:[1,{\\"a\\"\\:4}]}}`, }); const results = await store.similaritySearch("goodbye", 1); expect(results).toHaveLength(0); }); test("RedisVectorStore with generated keys", async () => { const client = createRedisClientMockup(); const embeddings = new FakeEmbeddings(); const store = new RedisVectorStore(embeddings, { redisClient: client as any, indexName: "documents", }); expect(store).toBeDefined(); await store.addDocuments([{ pageContent: "hello", metadata: { a: 1 } }]); expect(client.hSet).toHaveBeenCalledTimes(1); const results = await store.similaritySearch("goodbye", 1); expect(results).toHaveLength(0); }); test("RedisVectorStore with filters", async () => { const client = createRedisClientMockup(); const embeddings = new FakeEmbeddings(); const store = new RedisVectorStore(embeddings, { redisClient: client as any, indexName: "documents", }); expect(store).toBeDefined(); await store.similaritySearch("hello", 1, ["a", "b", "c"]); expect(client.ft.search).toHaveBeenCalledWith( "documents", "@metadata:(a|b|c) => [KNN 1 @content_vector $vector AS vector_score]", { PARAMS: { vector: Buffer.from(new Float32Array([0.1, 0.2, 0.3, 0.4]).buffer), }, RETURN: ["metadata", "content", "vector_score"], SORTBY: "vector_score", DIALECT: 2, LIMIT: { from: 0, size: 1, }, } ); }); test("RedisVectorStore with raw filter", async () => { const client = createRedisClientMockup(); const embeddings = new FakeEmbeddings(); const store = new RedisVectorStore(embeddings, { redisClient: client as any, indexName: "documents", }); expect(store).toBeDefined(); await store.similaritySearch("hello", 1, "a b c"); expect(client.ft.search).toHaveBeenCalledWith( "documents", "@metadata:(a b c) => [KNN 1 @content_vector $vector AS vector_score]", { PARAMS: { vector: Buffer.from(new Float32Array([0.1, 0.2, 0.3, 0.4]).buffer), }, RETURN: ["metadata", "content", "vector_score"], SORTBY: "vector_score", DIALECT: 2, LIMIT: { from: 0, size: 1, }, } ); }); describe("RedisVectorStore dropIndex", () => { const client = createRedisClientMockup(); const embeddings = new FakeEmbeddings(); const store = new RedisVectorStore(embeddings, { redisClient: client as any, indexName: "documents", }); test("without deleteDocuments param provided", async () => { await store.dropIndex(); expect(client.ft.dropIndex).toHaveBeenCalledWith("documents", undefined); }); test("with deleteDocuments as false", async () => { await store.dropIndex(false); expect(client.ft.dropIndex).toHaveBeenCalledWith("documents", undefined); }); test("with deleteDocument as true", async () => { await store.dropIndex(true); expect(client.ft.dropIndex).toHaveBeenCalledWith("documents", { DD: true, }); }); test("through delete convenience method", async () => { await store.delete({ deleteAll: true }); expect(client.ft.dropIndex).toHaveBeenCalledWith("documents", { DD: true, }); }); }); describe("RedisVectorStore createIndex when index does not exist", () => { test("calls ft.create with default create options", async () => { const client = createRedisClientMockup(); const embeddings = new FakeEmbeddings(); const store = new RedisVectorStore(embeddings, { redisClient: client as any, indexName: "documents", }); store.checkIndexExists = jest.fn<any>().mockResolvedValue(false); await store.createIndex(); expect(client.ft.create).toHaveBeenCalledWith( "documents", expect.any(Object), { ON: "HASH", PREFIX: "doc:documents:", } ); }); test("calls ft.create with custom options", async () => { const client = createRedisClientMockup(); const embeddings = new FakeEmbeddings(); const store = new RedisVectorStore(embeddings, { redisClient: client as any, indexName: "documents", createIndexOptions: { ON: "JSON", FILTER: '@indexName == "documents"', SCORE: 0.5, MAXTEXTFIELDS: true, TEMPORARY: 1000, NOOFFSETS: true, NOHL: true, NOFIELDS: true, NOFREQS: true, SKIPINITIALSCAN: true, STOPWORDS: ["a", "b"], LANGUAGE: "German", }, }); store.checkIndexExists = jest.fn<any>().mockResolvedValue(false); await store.createIndex(); expect(client.ft.create).toHaveBeenCalledWith( "documents", expect.any(Object), { ON: "JSON", PREFIX: "doc:documents:", FILTER: '@indexName == "documents"', SCORE: 0.5, MAXTEXTFIELDS: true, TEMPORARY: 1000, NOOFFSETS: true, NOHL: true, NOFIELDS: true, NOFREQS: true, SKIPINITIALSCAN: true, STOPWORDS: ["a", "b"], LANGUAGE: "German", } ); }); });
0
lc_public_repos/langchainjs/libs/langchain-redis/src
lc_public_repos/langchainjs/libs/langchain-redis/src/tests/vectorstores.int.test.ts
/* eslint-disable no-process-env */ /* eslint-disable no-promise-executor-return */ import { RedisClientType, createClient } from "redis"; import { v4 as uuidv4 } from "uuid"; import { test, expect } from "@jest/globals"; import { faker } from "@faker-js/faker"; import { Document } from "@langchain/core/documents"; import { SyntheticEmbeddings } from "@langchain/core/utils/testing"; import { RedisVectorStore } from "../vectorstores.js"; describe("RedisVectorStore", () => { let vectorStore: RedisVectorStore; let client: RedisClientType; beforeAll(async () => { client = createClient({ url: process.env.REDIS_URL }); await client.connect(); vectorStore = new RedisVectorStore(new SyntheticEmbeddings(), { redisClient: client as RedisClientType, indexName: "test-index", keyPrefix: "test:", }); }); afterAll(async () => { await vectorStore.delete({ deleteAll: true }); await client.quit(); }); test("auto-generated ids", async () => { const pageContent = faker.lorem.sentence(5); await vectorStore.addDocuments([{ pageContent, metadata: { foo: "bar" } }]); const results = await vectorStore.similaritySearch(pageContent, 1); expect(results).toEqual([ new Document({ metadata: { foo: "bar" }, pageContent }), ]); }); test("user-provided keys", async () => { const documentKey = `test:${uuidv4()}`; const pageContent = faker.lorem.sentence(5); await vectorStore.addDocuments([{ pageContent, metadata: {} }], { keys: [documentKey], }); const results = await vectorStore.similaritySearch(pageContent, 1); expect(results).toEqual([new Document({ metadata: {}, pageContent })]); }); test("metadata filtering", async () => { await vectorStore.dropIndex(); const pageContent = faker.lorem.sentence(5); const uuid = uuidv4(); await vectorStore.addDocuments([ { pageContent, metadata: { foo: "bar" } }, { pageContent, metadata: { foo: uuid } }, { pageContent, metadata: { foo: "qux" } }, ]); // If the filter wasn't working, we'd get all 3 documents back const results = await vectorStore.similaritySearch(pageContent, 3, [ `${uuid}`, ]); expect(results).toEqual([ new Document({ metadata: { foo: uuid }, pageContent }), ]); }); });
0
lc_public_repos/langchainjs/libs/langchain-redis/src
lc_public_repos/langchainjs/libs/langchain-redis/src/tests/chat_histories.int.test.ts
/* eslint-disable no-promise-executor-return */ import { test, expect } from "@jest/globals"; import { createClient } from "redis"; import { HumanMessage, AIMessage } from "@langchain/core/messages"; import { RedisChatMessageHistory } from "../chat_histories.js"; afterAll(async () => { const client = createClient(); await client.connect(); await client.flushDb(); await client.disconnect(); }); /** * To run this integration test, you need to have a Redis server running locally. * * `docker run -p 6379:6379 -p 8001:8001 redis/redis-stack:latest` */ test("Test Redis history store", async () => { const chatHistory = new RedisChatMessageHistory({ sessionId: new Date().toISOString(), }); const blankResult = await chatHistory.getMessages(); expect(blankResult).toStrictEqual([]); await chatHistory.addUserMessage("Who is the best vocalist?"); await chatHistory.addAIChatMessage("Ozzy Osbourne"); const expectedMessages = [ new HumanMessage("Who is the best vocalist?"), new AIMessage("Ozzy Osbourne"), ]; const resultWithHistory = await chatHistory.getMessages(); expect(resultWithHistory).toEqual(expectedMessages); }); test("Test clear Redis history store", async () => { const chatHistory = new RedisChatMessageHistory({ sessionId: new Date().toISOString(), }); await chatHistory.addUserMessage("Who is the best vocalist?"); await chatHistory.addAIChatMessage("Ozzy Osbourne"); const expectedMessages = [ new HumanMessage("Who is the best vocalist?"), new AIMessage("Ozzy Osbourne"), ]; const resultWithHistory = await chatHistory.getMessages(); expect(resultWithHistory).toEqual(expectedMessages); await chatHistory.clear(); const blankResult = await chatHistory.getMessages(); expect(blankResult).toStrictEqual([]); }); test("Test Redis history with a TTL", async () => { const chatHistory = new RedisChatMessageHistory({ sessionId: new Date().toISOString(), sessionTTL: 5, }); const blankResult = await chatHistory.getMessages(); expect(blankResult).toStrictEqual([]); await chatHistory.addUserMessage("Who is the best vocalist?"); await chatHistory.addAIChatMessage("Ozzy Osbourne"); const expectedMessages = [ new HumanMessage("Who is the best vocalist?"), new AIMessage("Ozzy Osbourne"), ]; const resultWithHistory = await chatHistory.getMessages(); expect(resultWithHistory).toEqual(expectedMessages); await new Promise((resolve) => setTimeout(resolve, 6000)); const expiredResult = await chatHistory.getMessages(); expect(expiredResult).toStrictEqual([]); });
0
lc_public_repos/langchainjs/libs/langchain-redis
lc_public_repos/langchainjs/libs/langchain-redis/scripts/jest-setup-after-env.js
import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { afterAll, jest } from "@jest/globals"; afterAll(awaitAllCallbacks); // Allow console.log to be disabled in tests if (process.env.DISABLE_CONSOLE_LOGS === "true") { console.log = jest.fn(); }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-openai/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": [ "ES2021", "ES2022.Object", "DOM" ], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": [ "src/**/*" ], "exclude": [ "node_modules", "dist", "docs" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-openai/LICENSE
The MIT License Copyright (c) Harrison Chase Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-openai/jest.config.cjs
/** @type {import('ts-jest').JestConfigWithTsJest} */ module.exports = { preset: "ts-jest/presets/default-esm", testEnvironment: "./jest.env.cjs", modulePathIgnorePatterns: ["dist/", "docs/"], moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, transform: { '^.+\\.tsx?$': ['@swc/jest'], }, transformIgnorePatterns: [ "/node_modules/", "\\.pnp\\.[^\\/]+$", "./scripts/jest-setup-after-env.js", ], setupFiles: ["dotenv/config"], setupFilesAfterEnv: ["./scripts/jest-setup-after-env.js"], testTimeout: 20_000, passWithNoTests: true };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-openai/babel.config.cjs
// babel.config.js module.exports = { presets: [["@babel/preset-env", { targets: { node: true } }]], };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-openai/jest.env.cjs
const { TestEnvironment } = require("jest-environment-node"); class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { constructor(config, context) { // Make `instanceof Float32Array` return true in tests // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 super(config, context); this.global.Float32Array = Float32Array; } } module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-openai/README.md
> [!IMPORTANT] > This package is now deprecated in favor of the new Azure integration in the OpenAI SDK. Please use the package [`@langchain/openai`](https://www.npmjs.com/package/@langchain/openai) instead. > You can find the migration guide [here](https://js.langchain.com/docs/integrations/llms/azure#migration-from-azure-openai-sdk). # @langchain/azure-openai This package contains the Azure SDK for OpenAI LangChain.js integrations. It provides Azure OpenAI support through the [Azure SDK for OpenAI](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai) library. ## Installation ```bash npm2yarn npm install @langchain/azure-openai @langchain/core ``` This package, along with the main LangChain package, depends on [`@langchain/core`](https://npmjs.com/package/@langchain/core/). If you are using this package with other LangChain packages, you should make sure that all of the packages depend on the same instance of @langchain/core. You can do so by adding appropriate fields to your project's `package.json` like this: ```json { "name": "your-project", "version": "0.0.0", "dependencies": { "@langchain/azure-openai": "^0.0.4", "@langchain/core": "^0.3.0" }, "resolutions": { "@langchain/core": "^0.3.0" }, "overrides": { "@langchain/core": "^0.3.0" }, "pnpm": { "overrides": { "@langchain/core": "^0.3.0" } } } ``` The field you need depends on the package manager you're using, but we recommend adding a field for the common `yarn`, `npm`, and `pnpm` to maximize compatibility. ## Chat Models This package contains the `AzureChatOpenAI` class, which is the recommended way to interface with deployed models on Azure OpenAI. To use, install the requirements, and configure your environment. ```bash export AZURE_OPENAI_API_ENDPOINT=<your_endpoint> export AZURE_OPENAI_API_KEY=<your_key> export AZURE_OPENAI_API_DEPLOYMENT_NAME=<your_deployment_name> ``` Then initialize the model and make the calls: ```typescript import { AzureChatOpenAI } from "@langchain/azure-openai"; const model = new AzureChatOpenAI({ // Note that the following are optional, and will default to the values below // if not provided. azureOpenAIEndpoint: process.env.AZURE_OPENAI_API_ENDPOINT, azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY, azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME, }); const response = await model.invoke(new HumanMessage("Hello world!")); ``` ### Streaming ```typescript import { AzureChatOpenAI } from "@langchain/azure-openai"; const model = new AzureChatOpenAI({ // Note that the following are optional, and will default to the values below // if not provided. azureOpenAIEndpoint: process.env.AZURE_OPENAI_API_ENDPOINT, azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY, azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME, }); const response = await model.stream(new HumanMessage("Hello world!")); ``` ## Embeddings This package also supports embeddings with Azure OpenAI. ```typescript import { AzureOpenAIEmbeddings } from "@langchain/azure-openai"; const embeddings = new AzureOpenAIEmbeddings({ // Note that the following are optional, and will default to the values below // if not provided. azureOpenAIEndpoint: process.env.AZURE_OPENAI_API_ENDPOINT, azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY, azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME, }); const res = await embeddings.embedQuery("Hello world"); ``` ## Using Azure managed identity If you're using [Azure Managed Identity](https://learn.microsoft.com/azure/ai-services/openai/how-to/managed-identity), you can also pass the credentials directly to the constructor: ```typescript import { DefaultAzureCredential } from "@azure/identity"; import { AzureOpenAI } from "@langchain/azure-openai"; const credentials = new DefaultAzureCredential(); const model = new AzureOpenAI({ credentials, azureOpenAIEndpoint: process.env.AZURE_OPENAI_API_ENDPOINT, azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME, }); ``` ## Compatibility with OpenAI API This library is provides compatibility with the OpenAI API. You can use an API key from OpenAI's developer portal like in the example below: ```typescript import { AzureOpenAI, OpenAIKeyCredential } from "@langchain/azure-openai"; const model = new AzureOpenAI({ modelName: "gpt-3.5-turbo", credentials: new OpenAIKeyCredential("<your_openai_api_key>"), }); ``` ## Development To develop the Azure OpenAI package, you'll need to follow these instructions: ### Install dependencies ```bash yarn install ``` ### Build the package ```bash yarn build ``` Or from the repo root: ```bash yarn build --filter=@langchain/azure-openai ``` ### Run tests Test files should live within a `tests/` file in the `src/` folder. Unit tests should end in `.test.ts` and integration tests should end in `.int.test.ts`: ```bash $ yarn test $ yarn test:int ``` ### Lint & Format Run the linter & formatter to ensure your code is up to standard: ```bash yarn lint && yarn format ``` ### Adding new entrypoints If you add a new file to be exported, either import & re-export from `src/index.ts`, or add it to `scripts/create-entrypoints.js` and run `yarn build` to generate the new entrypoint.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-openai/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": [ "--workspaces-update=false" ] } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-openai/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof", "eslint-plugin-jest"], ignorePatterns: [ "src/utils/@cfworker", "src/utils/fast-json-patch", "src/utils/js-sha1", ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], 'jest/no-focused-tests': 'error', }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-openai/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals: [/node\:/, /@langchain\/core\//], entrypoints: { index: "index", }, tsConfigPath: resolve("./tsconfig.json"), cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-openai/package.json
{ "name": "@langchain/azure-openai", "version": "0.0.11", "description": "Azure SDK for OpenAI integrations for LangChain.js", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/azure-openai", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "clean": "rm -rf .turbo dist/", "prepack": "yarn build", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "@azure/core-auth": "^1.5.0", "@azure/openai": "1.0.0-beta.11", "js-tiktoken": "^1.0.12", "zod": "^3.22.3", "zod-to-json-schema": "3.20.3" }, "peerDependencies": { "@langchain/core": ">=0.2.21 <0.4.0" }, "devDependencies": { "@azure/identity": "^4.2.1", "@jest/globals": "^29.5.0", "@langchain/core": "workspace:*", "@langchain/scripts": ">=0.1.0 <0.2.0", "@langchain/standard-tests": "0.0.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "dpdm": "^3.12.0", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-jest": "^27.6.0", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "prettier": "^2.8.3", "release-it": "^17.6.0", "rimraf": "^5.0.1", "typescript": "~5.1.6" }, "publishConfig": { "access": "public" }, "keywords": [ "llm", "ai", "gpt3", "chain", "prompt", "prompt engineering", "chatgpt", "machine learning", "ml", "openai", "embeddings", "vectorstores", "azure", "azure SDK" ], "exports": { ".": { "types": { "import": "./index.d.ts", "require": "./index.d.cts", "default": "./index.d.ts" }, "import": "./index.js", "require": "./index.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "index.cjs", "index.js", "index.d.ts", "index.d.cts" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-openai/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": [ "node_modules", "dist", "docs", "**/tests" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-openai/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-azure-openai/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/libs/langchain-azure-openai
lc_public_repos/langchainjs/libs/langchain-azure-openai/src/types.ts
import type { OpenAIClientOptions, AzureExtensionsOptions, ChatRequestMessage, } from "@azure/openai"; import type { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; import type { TiktokenModel } from "js-tiktoken/lite"; import type { EmbeddingsParams } from "@langchain/core/embeddings"; import type { KeyCredential, TokenCredential } from "@azure/core-auth"; // reexport this type from the included package so we can easily override and extend it if needed in the future // also makes it easier for folks to import this type without digging around into the dependent packages export type { TiktokenModel }; export declare interface AzureOpenAIInput { openAIApiKey?: string; /** * API key to use when making requests to Azure OpenAI. * Alias for `apiKey` */ azureOpenAIApiKey?: string; /** * API key to use when making requests to Azure OpenAI. */ apiKey?: string; /** * Endpoint to use when making requests to Azure OpenAI */ azureOpenAIEndpoint?: string; /** * Azure OpenAI API deployment name to use for completions when making requests to Azure OpenAI. * This is the name of the deployment you created in the Azure portal. * e.g. "my-openai-deployment" * this will be used in the endpoint URL: https://{InstanceName}.openai.azure.com/openai/deployments/my-openai-deployment/ */ azureOpenAIApiDeploymentName?: string; /** @deprecated Use "azureOpenAIApiDeploymentName" instead. */ azureOpenAIEmbeddingsApiDeploymentName?: string; /** * API version to use when making requests to Azure OpenAI. */ azureOpenAIApiVersion?: string; credentials?: KeyCredential | TokenCredential; } export declare interface OpenAIBaseInput { /** * Maximum number of tokens to generate in the completion. -1 returns as many * tokens as possible given the prompt and the model's maximum context size. */ maxTokens?: number; /** * The sampling temperature to use that controls the apparent creativity of generated completions. * Higher values will make output more random while lower values will make results more focused * and deterministic. * It is not recommended to modify temperature and top_p for the same completions request as the * interaction of these two settings is difficult to predict. */ temperature: number; /** * An alternative to sampling with temperature called nucleus sampling. This value causes the * model to consider the results of tokens with the provided probability mass. As an example, a * value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be * considered. * It is not recommended to modify temperature and top_p for the same completions request as the * interaction of these two settings is difficult to predict. */ topP: number; /** * A map between GPT token IDs and bias scores that influences the probability of specific tokens * appearing in a completions response. Token IDs are computed via external tokenizer tools, while * bias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to * a full ban or exclusive selection of a token, respectively. The exact behavior of a given bias * score varies by model. */ logitBias?: Record<string, number>; /** * An identifier for the caller or end user of the operation. This may be used for tracking * or rate-limiting purposes. */ user?: string; /** * The number of completions choices that should be generated per provided prompt as part of an * overall completions response. * Because this setting can generate many completions, it may quickly consume your token quota. * Use carefully and ensure reasonable settings for max_tokens and stop. */ n: number; /** * A value that influences the probability of generated tokens appearing based on their existing * presence in generated text. * Positive values will make tokens less likely to appear when they already exist and increase the * model's likelihood to output new topics. */ presencePenalty: number; /** * A value that influences the probability of generated tokens appearing based on their cumulative * frequency in generated text. * Positive values will make tokens less likely to appear as their frequency increases and * decrease the likelihood of the model repeating the same statements verbatim. */ frequencyPenalty: number; /** A collection of textual sequences that will end completions generation. */ stop?: string[]; /** A collection of textual sequences that will end completions generation. */ stopSequences?: string[]; /** Whether to stream the results or not. Enabling disables tokenUsage reporting */ streaming: boolean; /** * Model name to use * Alias for `model` */ modelName: string; /** Model name to use */ model?: string; /** Holds any additional parameters that are valid to pass to {@link * https://platform.openai.com/docs/api-reference/completions/create | * `openai.createCompletion`} that are not explicitly specified on this class. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any modelKwargs?: Record<string, any>; /** * Timeout to use when making requests to OpenAI. */ timeout?: number; } export declare interface OpenAIInput extends OpenAIBaseInput { /** * A value that controls the emission of log probabilities for the provided number of most likely * tokens within a completions response. */ logprobs?: number; /** * A value specifying whether completions responses should include input prompts as prefixes to * their generated output. */ echo?: boolean; /** * A value that controls how many completions will be internally generated prior to response * formulation. * When used together with n, best_of controls the number of candidate completions and must be * greater than n. * Because this setting can generate many completions, it may quickly consume your token quota. * Use carefully and ensure reasonable settings for max_tokens and stop. */ bestOf?: number; /** Batch size to use when passing multiple documents to generate */ batchSize: number; } export interface OpenAICallOptions extends BaseLanguageModelCallOptions { /** * Additional options to pass to the underlying axios request. */ options?: OpenAIClientOptions; } export interface OpenAIChatInput extends OpenAIBaseInput { /** ChatGPT messages to pass as a prefix to the prompt */ prefixMessages?: ChatRequestMessage[]; azureExtensionOptions?: AzureExtensionsOptions; } export interface OpenAIChatCallOptions extends OpenAICallOptions { promptIndex?: number; } export interface AzureOpenAIEmbeddingsParams extends EmbeddingsParams { /** * An identifier for the caller or end user of the operation. This may be used for tracking * or rate-limiting purposes. */ user?: string; /** * The model name to provide as part of this embeddings request. * Not applicable to Azure OpenAI, where deployment information should be included in the Azure * resource URI that's connected to. * Alias for `model` */ modelName?: string; /** * The model name to provide as part of this embeddings request. * Not applicable to Azure OpenAI, where deployment information should be included in the Azure * resource URI that's connected to. */ model?: string; /** * The maximum number of documents to embed in a single request. This is * limited by the OpenAI API to a maximum of 2048. */ batchSize?: number; /** * Whether to strip new lines from the input text. This is recommended by * OpenAI for older models, but may not be suitable for all use cases. * See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 */ stripNewLines?: boolean; /** * Timeout to use when making requests to OpenAI. */ timeout?: number; }
0
lc_public_repos/langchainjs/libs/langchain-azure-openai
lc_public_repos/langchainjs/libs/langchain-azure-openai/src/llms.ts
import type { TiktokenModel } from "js-tiktoken/lite"; import { type OpenAIClientOptions as AzureOpenAIClientOptions, OpenAIClient as AzureOpenAIClient, AzureKeyCredential, Completions, Choice, OpenAIKeyCredential, } from "@azure/openai"; import { calculateMaxTokens } from "@langchain/core/language_models/base"; import { BaseLLM, type BaseLLMParams, } from "@langchain/core/language_models/llms"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { chunkArray } from "@langchain/core/utils/chunk_array"; import { GenerationChunk, type LLMResult } from "@langchain/core/outputs"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { KeyCredential, TokenCredential, isTokenCredential, } from "@azure/core-auth"; import { AzureOpenAIInput, OpenAICallOptions, OpenAIInput } from "./types.js"; import { USER_AGENT_PREFIX } from "./constants.js"; /** * Interface for tracking token usage in OpenAI calls. */ export interface TokenUsage { completionTokens?: number; promptTokens?: number; totalTokens?: number; } /** @deprecated Import from "@langchain/openai" instead. */ export class AzureOpenAI< CallOptions extends OpenAICallOptions = OpenAICallOptions > extends BaseLLM<CallOptions> implements OpenAIInput, AzureOpenAIInput { static lc_name() { return "AzureOpenAI"; } get callKeys() { return [...super.callKeys, "options"]; } lc_serializable = true; get lc_secrets(): { [key: string]: string } | undefined { return { apiKey: "AZURE_OPENAI_API_KEY", openAIApiKey: "OPENAI_API_KEY", azureOpenAIApiKey: "AZURE_OPENAI_API_KEY", azureOpenAIEndpoint: "AZURE_OPENAI_API_ENDPOINT", azureOpenAIApiDeploymentName: "AZURE_OPENAI_API_DEPLOYMENT_NAME", }; } get lc_aliases(): Record<string, string> { return { modelName: "model", openAIApiKey: "openai_api_key", azureOpenAIApiKey: "azure_openai_api_key", azureOpenAIEndpoint: "azure_openai_api_endpoint", azureOpenAIApiDeploymentName: "azure_openai_api_deployment_name", }; } temperature = 0.7; maxTokens = 256; topP = 1; frequencyPenalty = 0; presencePenalty = 0; n = 1; bestOf?: number; logitBias?: Record<string, number>; modelName = "gpt-3.5-turbo-instruct"; model = "gpt-3.5-turbo-instruct"; modelKwargs?: OpenAIInput["modelKwargs"]; batchSize = 20; timeout?: number; stop?: string[]; stopSequences?: string[]; user?: string; streaming = false; azureOpenAIApiKey?: string; apiKey?: string; azureOpenAIEndpoint?: string; azureOpenAIApiDeploymentName?: string; logprobs?: number; echo?: boolean; private client: AzureOpenAIClient; constructor( fields?: Partial<OpenAIInput> & Partial<AzureOpenAIInput> & BaseLLMParams & { configuration?: AzureOpenAIClientOptions; } ) { super(fields ?? {}); this.azureOpenAIEndpoint = fields?.azureOpenAIEndpoint ?? getEnvironmentVariable("AZURE_OPENAI_API_ENDPOINT"); this.azureOpenAIApiDeploymentName = fields?.azureOpenAIApiDeploymentName ?? getEnvironmentVariable("AZURE_OPENAI_API_DEPLOYMENT_NAME"); const openAiApiKey = fields?.apiKey ?? fields?.openAIApiKey ?? getEnvironmentVariable("OPENAI_API_KEY"); this.azureOpenAIApiKey = fields?.apiKey ?? fields?.azureOpenAIApiKey ?? getEnvironmentVariable("AZURE_OPENAI_API_KEY") ?? openAiApiKey; this.apiKey = this.azureOpenAIApiKey; const azureCredential = fields?.credentials ?? (this.apiKey === openAiApiKey ? new OpenAIKeyCredential(this.apiKey ?? "") : new AzureKeyCredential(this.apiKey ?? "")); // eslint-disable-next-line no-instanceof/no-instanceof const isOpenAIApiKey = azureCredential instanceof OpenAIKeyCredential; if (!this.apiKey && !fields?.credentials) { throw new Error("Azure OpenAI API key not found"); } if (!this.azureOpenAIEndpoint && !isOpenAIApiKey) { throw new Error("Azure OpenAI Endpoint not found"); } if (!this.azureOpenAIApiDeploymentName && !isOpenAIApiKey) { throw new Error("Azure OpenAI Deployment name not found"); } this.maxTokens = fields?.maxTokens ?? this.maxTokens; this.temperature = fields?.temperature ?? this.temperature; this.topP = fields?.topP ?? this.topP; this.logitBias = fields?.logitBias; this.user = fields?.user; this.n = fields?.n ?? this.n; this.logprobs = fields?.logprobs; this.echo = fields?.echo; this.stop = fields?.stopSequences ?? fields?.stop; this.stopSequences = this.stop; this.presencePenalty = fields?.presencePenalty ?? this.presencePenalty; this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty; this.bestOf = fields?.bestOf ?? this.bestOf; this.modelName = fields?.model ?? fields?.modelName ?? this.model; this.model = this.modelName; this.modelKwargs = fields?.modelKwargs ?? {}; this.streaming = fields?.streaming ?? false; this.batchSize = fields?.batchSize ?? this.batchSize; if (this.streaming && this.bestOf && this.bestOf > 1) { throw new Error("Cannot stream results when bestOf > 1"); } const options = { userAgentOptions: { userAgentPrefix: USER_AGENT_PREFIX }, }; if (isOpenAIApiKey) { this.client = new AzureOpenAIClient( azureCredential as OpenAIKeyCredential ); } else if (isTokenCredential(azureCredential)) { this.client = new AzureOpenAIClient( this.azureOpenAIEndpoint ?? "", azureCredential as TokenCredential, options ); } else { this.client = new AzureOpenAIClient( this.azureOpenAIEndpoint ?? "", azureCredential as KeyCredential, options ); } } async *_streamResponseChunks( input: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): AsyncGenerator<GenerationChunk> { const deploymentName = this.azureOpenAIApiDeploymentName || this.model; const stream = await this.caller.call(() => this.client.streamCompletions(deploymentName, [input], { maxTokens: this.maxTokens, temperature: this.temperature, topP: this.topP, logitBias: this.logitBias, user: this.user, n: this.n, logprobs: this.logprobs, echo: this.echo, stop: this.stopSequences, presencePenalty: this.presencePenalty, frequencyPenalty: this.frequencyPenalty, bestOf: this.bestOf, requestOptions: { timeout: options?.timeout ?? this.timeout, }, abortSignal: options?.signal ?? undefined, ...this.modelKwargs, }) ); for await (const data of stream) { const choice = data?.choices[0]; if (!choice) { continue; } const chunk = new GenerationChunk({ text: choice.text, generationInfo: { finishReason: choice.finishReason, }, }); yield chunk; // eslint-disable-next-line no-void void runManager?.handleLLMNewToken(chunk.text ?? ""); } if (options.signal?.aborted) { throw new Error("AbortError"); } } async _generate( prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<LLMResult> { const deploymentName = this.azureOpenAIApiDeploymentName || this.model; if (this.maxTokens === -1) { if (prompts.length !== 1) { throw new Error( "max_tokens set to -1 not supported for multiple inputs" ); } this.maxTokens = await calculateMaxTokens({ prompt: prompts[0], // Cast here to allow for other models that may not fit the union modelName: this.model as TiktokenModel, }); } const subPrompts = chunkArray(prompts, this.batchSize); if (this.streaming) { const choices: Choice[] = []; for (let i = 0; i < subPrompts.length; i += 1) { let response: Omit<Completions, "choices" | "usage"> | undefined; const stream = await this.caller.call(() => this.client.streamCompletions(deploymentName, subPrompts[i], { maxTokens: this.maxTokens, temperature: this.temperature, topP: this.topP, logitBias: this.logitBias, user: this.user, n: this.n, logprobs: this.logprobs, echo: this.echo, stop: this.stopSequences, presencePenalty: this.presencePenalty, frequencyPenalty: this.frequencyPenalty, bestOf: this.bestOf, requestOptions: { timeout: options?.timeout ?? this.timeout, }, abortSignal: options?.signal ?? undefined, ...this.modelKwargs, }) ); for await (const message of stream) { if (!response) { response = { id: message.id, created: message.created, promptFilterResults: message.promptFilterResults, }; } // on all messages, update choice for (const part of message.choices) { if (!choices[part.index]) { choices[part.index] = part; } else { const choice = choices[part.index]; choice.text += part.text; choice.finishReason = part.finishReason; choice.logprobs = part.logprobs; } void runManager?.handleLLMNewToken(part.text, { prompt: Math.floor(part.index / this.n), completion: part.index % this.n, }); } } if (options.signal?.aborted) { throw new Error("AbortError"); } } const generations = chunkArray(choices, this.n).map((promptChoices) => promptChoices.map((choice) => ({ text: choice.text ?? "", generationInfo: { finishReason: choice.finishReason, logprobs: choice.logprobs, }, })) ); return { generations, llmOutput: { tokenUsage: { completionTokens: undefined, promptTokens: undefined, totalTokens: undefined, }, }, }; } else { const tokenUsage: TokenUsage = {}; const subPrompts = chunkArray(prompts, this.batchSize); const choices: Choice[] = []; for (let i = 0; i < subPrompts.length; i += 1) { const data = await this.caller.call(() => this.client.getCompletions(deploymentName, prompts, { maxTokens: this.maxTokens, temperature: this.temperature, topP: this.topP, logitBias: this.logitBias, user: this.user, n: this.n, logprobs: this.logprobs, echo: this.echo, stop: this.stopSequences, presencePenalty: this.presencePenalty, frequencyPenalty: this.frequencyPenalty, bestOf: this.bestOf, requestOptions: { timeout: options?.timeout ?? this.timeout, }, abortSignal: options?.signal ?? undefined, ...this.modelKwargs, }) ); choices.push(...data.choices); tokenUsage.completionTokens = (tokenUsage.completionTokens ?? 0) + data.usage.completionTokens; tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + data.usage.promptTokens; tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + data.usage.totalTokens; } const generations = chunkArray(choices, this.n).map((promptChoices) => promptChoices.map((choice) => { void runManager?.handleLLMNewToken(choice.text, { prompt: Math.floor(choice.index / this.n), completion: choice.index % this.n, }); return { text: choice.text ?? "", generationInfo: { finishReason: choice.finishReason, logprobs: choice.logprobs, }, }; }) ); return { generations, llmOutput: { tokenUsage: { completionTokens: tokenUsage.completionTokens, promptTokens: tokenUsage.promptTokens, totalTokens: tokenUsage.totalTokens, }, }, }; } } _llmType() { return "azure_openai"; } }
0
lc_public_repos/langchainjs/libs/langchain-azure-openai
lc_public_repos/langchainjs/libs/langchain-azure-openai/src/index.ts
export * from "./llms.js"; export * from "./chat_models.js"; export * from "./embeddings.js"; export * from "./types.js"; export * from "./utils/openai-format-fndef.js";
0
lc_public_repos/langchainjs/libs/langchain-azure-openai
lc_public_repos/langchainjs/libs/langchain-azure-openai/src/chat_models.ts
import { type OpenAIClientOptions as AzureOpenAIClientOptions, OpenAIClient as AzureOpenAIClient, AzureExtensionsOptions, ChatRequestMessage, ChatResponseMessage, AzureKeyCredential, ChatCompletions, EventStream, ChatCompletionsToolDefinition, ChatCompletionsNamedToolSelection, ChatCompletionsResponseFormat, OpenAIKeyCredential, } from "@azure/openai"; import { BaseChatModel, BaseChatModelParams, } from "@langchain/core/language_models/chat_models"; import { BaseFunctionCallOptions, FunctionCallOption, FunctionDefinition, TokenUsage, } from "@langchain/core/language_models/base"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { AIMessage, AIMessageChunk, BaseMessage, ChatMessage, ChatMessageChunk, FunctionMessageChunk, HumanMessageChunk, SystemMessageChunk, ToolMessage, ToolMessageChunk, } from "@langchain/core/messages"; import { ChatGeneration, ChatGenerationChunk, ChatResult, } from "@langchain/core/outputs"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { KeyCredential, TokenCredential, isTokenCredential, } from "@azure/core-auth"; import { NewTokenIndices } from "@langchain/core/callbacks/base"; import { AzureOpenAIInput, OpenAIChatInput, OpenAIChatCallOptions, } from "./types.js"; import { FunctionDef, formatFunctionDefinitions, } from "./utils/openai-format-fndef.js"; import { USER_AGENT_PREFIX } from "./constants.js"; function _convertDeltaToMessageChunk( delta: ChatResponseMessage, defaultRole?: string ) { const role = delta.role ?? defaultRole; const content = delta.content ?? ""; let additional_kwargs; if (delta.functionCall) { additional_kwargs = { function_call: delta.functionCall, }; } else if (delta.toolCalls) { additional_kwargs = { tool_calls: delta.toolCalls, }; } else { additional_kwargs = {}; } if (role === "user") { return new HumanMessageChunk({ content }); } else if (role === "assistant") { return new AIMessageChunk({ content, additional_kwargs }); } else if (role === "system") { return new SystemMessageChunk({ content }); } else if (role === "function") { return new FunctionMessageChunk({ content, additional_kwargs, name: delta.role, }); } else if (role === "tool") { return new ToolMessageChunk({ content, additional_kwargs, tool_call_id: delta.toolCalls[0].id, }); } else { return new ChatMessageChunk({ content, role }); } } function openAIResponseToChatMessage( message: ChatResponseMessage ): BaseMessage { switch (message.role) { case "assistant": return new AIMessage(message.content || "", { function_call: message.functionCall, tool_calls: message.toolCalls, }); default: return new ChatMessage(message.content || "", message.role ?? "unknown"); } } interface OpenAILLMOutput { tokenUsage: TokenUsage; } function extractGenericMessageCustomRole(message: ChatMessage) { if ( message.role !== "system" && message.role !== "assistant" && message.role !== "user" && message.role !== "function" && message.role !== "tool" ) { console.warn(`Unknown message role: ${message.role}`); } return message.role; } export function messageToOpenAIRole(message: BaseMessage): string { const type = message._getType(); switch (type) { case "system": return "system"; case "ai": return "assistant"; case "human": return "user"; case "function": return "function"; case "tool": return "tool"; case "generic": { if (!ChatMessage.isInstance(message)) throw new Error("Invalid generic chat message"); return extractGenericMessageCustomRole(message); } default: throw new Error(`Unknown message type: ${type}`); } } export interface ChatOpenAICallOptions extends OpenAIChatCallOptions, BaseFunctionCallOptions { tools?: ChatCompletionsToolDefinition[]; tool_choice?: ChatCompletionsNamedToolSelection; response_format?: ChatCompletionsResponseFormat; seed?: number; } /** @deprecated Import from "@langchain/openai" instead. */ export class AzureChatOpenAI extends BaseChatModel<ChatOpenAICallOptions> implements OpenAIChatInput, AzureOpenAIInput { static lc_name() { return "AzureChatOpenAI"; } get callKeys() { return [ ...super.callKeys, "options", "function_call", "functions", "tools", "tool_choice", "promptIndex", "response_format", "seed", ]; } get lc_secrets(): { [key: string]: string } | undefined { return { openAIApiKey: "OPENAI_API_KEY", azureOpenAIApiKey: "AZURE_OPENAI_API_KEY", azureOpenAIEndpoint: "AZURE_OPENAI_API_ENDPOINT", azureOpenAIApiDeploymentName: "AZURE_OPENAI_API_DEPLOYMENT_NAME", }; } get lc_aliases(): Record<string, string> { return { modelName: "model", openAIApiKey: "openai_api_key", azureOpenAIApiKey: "azure_openai_api_key", azureOpenAIEndpoint: "azure_openai_api_endpoint", azureOpenAIApiDeploymentName: "azure_openai_api_deployment_name", }; } lc_serializable = true; azureExtensionOptions?: AzureExtensionsOptions | undefined; maxTokens?: number | undefined; temperature: number; topP: number; logitBias?: Record<string, number> | undefined; user?: string | undefined; n: number; presencePenalty: number; frequencyPenalty: number; stop?: string[] | undefined; stopSequences?: string[] | undefined; streaming: boolean; modelName: string; model: string; modelKwargs?: OpenAIChatInput["modelKwargs"]; timeout?: number | undefined; azureOpenAIEndpoint?: string; azureOpenAIApiKey?: string; apiKey?: string; azureOpenAIApiDeploymentName?: string; private client: AzureOpenAIClient; constructor( fields?: Partial<OpenAIChatInput> & Partial<AzureOpenAIInput> & BaseChatModelParams & { configuration?: AzureOpenAIClientOptions; } ) { super(fields ?? {}); this.azureOpenAIEndpoint = fields?.azureOpenAIEndpoint ?? getEnvironmentVariable("AZURE_OPENAI_API_ENDPOINT"); this.azureOpenAIApiDeploymentName = (fields?.azureOpenAIEmbeddingsApiDeploymentName || fields?.azureOpenAIApiDeploymentName) ?? getEnvironmentVariable("AZURE_OPENAI_API_DEPLOYMENT_NAME"); const openAiApiKey = fields?.apiKey ?? fields?.openAIApiKey ?? getEnvironmentVariable("OPENAI_API_KEY"); this.azureOpenAIApiKey = fields?.apiKey ?? fields?.azureOpenAIApiKey ?? getEnvironmentVariable("AZURE_OPENAI_API_KEY") ?? openAiApiKey; this.apiKey = this.azureOpenAIApiKey; const azureCredential = fields?.credentials ?? (this.apiKey === openAiApiKey ? new OpenAIKeyCredential(this.apiKey ?? "") : new AzureKeyCredential(this.apiKey ?? "")); // eslint-disable-next-line no-instanceof/no-instanceof const isOpenAIApiKey = azureCredential instanceof OpenAIKeyCredential; if (!this.apiKey && !fields?.credentials) { throw new Error("Azure OpenAI API key not found"); } if (!this.azureOpenAIEndpoint && !isOpenAIApiKey) { throw new Error("Azure OpenAI Endpoint not found"); } if (!this.azureOpenAIApiDeploymentName && !isOpenAIApiKey) { throw new Error("Azure OpenAI Deployment name not found"); } this.modelName = fields?.model ?? fields?.modelName ?? this.model; this.model = this.modelName; this.modelKwargs = fields?.modelKwargs ?? {}; this.timeout = fields?.timeout; this.temperature = fields?.temperature ?? this.temperature; this.topP = fields?.topP ?? this.topP; this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty; this.presencePenalty = fields?.presencePenalty ?? this.presencePenalty; this.maxTokens = fields?.maxTokens; this.n = fields?.n ?? this.n; this.logitBias = fields?.logitBias; this.stop = fields?.stopSequences ?? fields?.stop; this.stopSequences = this.stop; this.user = fields?.user; this.azureExtensionOptions = fields?.azureExtensionOptions; this.streaming = fields?.streaming ?? false; const options = { userAgentOptions: { userAgentPrefix: USER_AGENT_PREFIX }, }; if (isOpenAIApiKey) { this.client = new AzureOpenAIClient( azureCredential as OpenAIKeyCredential ); } else if (isTokenCredential(azureCredential)) { this.client = new AzureOpenAIClient( this.azureOpenAIEndpoint ?? "", azureCredential as TokenCredential, options ); } else { this.client = new AzureOpenAIClient( this.azureOpenAIEndpoint ?? "", azureCredential as KeyCredential, options ); } } private formatMessages(messages: BaseMessage[]): ChatRequestMessage[] { return messages.map( (message: BaseMessage) => ({ role: messageToOpenAIRole(message), content: message.content, name: message.name, toolCalls: message.additional_kwargs.tool_calls, functionCall: message.additional_kwargs.function_call, toolCallId: (message as ToolMessage).tool_call_id, } as ChatRequestMessage) ); } protected async _streamChatCompletionsWithRetry( azureOpenAIMessages: ChatRequestMessage[], options: this["ParsedCallOptions"] ): Promise<EventStream<ChatCompletions>> { return this.caller.call(async () => { const deploymentName = this.azureOpenAIApiDeploymentName || this.model; const res = await this.client.streamChatCompletions( deploymentName, azureOpenAIMessages, { functions: options?.functions, functionCall: options?.function_call, maxTokens: this.maxTokens, temperature: this.temperature, topP: this.topP, logitBias: this.logitBias, user: this.user, n: this.n, stop: this.stopSequences, presencePenalty: this.presencePenalty, frequencyPenalty: this.frequencyPenalty, azureExtensionOptions: this.azureExtensionOptions, requestOptions: { timeout: options?.timeout ?? this.timeout, }, abortSignal: options?.signal ?? undefined, tools: options?.tools, toolChoice: options?.tool_choice, responseFormat: options?.response_format, seed: options?.seed, ...this.modelKwargs, } ); return res; }); } async *_streamResponseChunks( messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): AsyncGenerator<ChatGenerationChunk> { const azureOpenAIMessages: ChatRequestMessage[] = this.formatMessages(messages); let defaultRole: string | undefined; const streamIterable = await this._streamChatCompletionsWithRetry( azureOpenAIMessages, options ); for await (const data of streamIterable) { const choice = data?.choices[0]; if (!choice) { continue; } const { delta } = choice; if (!delta) { continue; } const chunk = _convertDeltaToMessageChunk(delta, defaultRole); defaultRole = delta.role ?? defaultRole; const newTokenIndices = { prompt: options.promptIndex ?? 0, completion: choice.index ?? 0, }; if (typeof chunk.content !== "string") { console.log( "[WARNING]: Received non-string content from OpenAI. This is currently not supported." ); continue; } const generationChunk = new ChatGenerationChunk({ message: chunk, text: chunk.content, generationInfo: newTokenIndices, }); yield generationChunk; // eslint-disable-next-line no-void void runManager?.handleLLMNewToken( generationChunk.text ?? "", newTokenIndices, undefined, undefined, undefined, { chunk: generationChunk } ); } if (options.signal?.aborted) { throw new Error("AbortError"); } } async _generate( messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<ChatResult> { const deploymentName = this.azureOpenAIApiDeploymentName || this.model; const tokenUsage: TokenUsage = {}; const azureOpenAIMessages: ChatRequestMessage[] = this.formatMessages(messages); if (!this.streaming) { const data = await this.caller.call(() => this.client.getChatCompletions(deploymentName, azureOpenAIMessages, { functions: options?.functions, functionCall: options?.function_call, maxTokens: this.maxTokens, temperature: this.temperature, topP: this.topP, logitBias: this.logitBias, user: this.user, n: this.n, stop: this.stopSequences, presencePenalty: this.presencePenalty, frequencyPenalty: this.frequencyPenalty, azureExtensionOptions: this.azureExtensionOptions, requestOptions: { timeout: options?.timeout ?? this.timeout, }, abortSignal: options?.signal ?? undefined, tools: options?.tools, toolChoice: options?.tool_choice, responseFormat: options?.response_format, seed: options?.seed, ...this.modelKwargs, }) ); const { completionTokens, promptTokens, totalTokens } = data?.usage ?? {}; if (completionTokens) { tokenUsage.completionTokens = (tokenUsage.completionTokens ?? 0) + completionTokens; } if (promptTokens) { tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + promptTokens; } if (totalTokens) { tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + totalTokens; } const generations: ChatGeneration[] = []; for (const part of data?.choices ?? []) { const text = part.message?.content ?? ""; const generation: ChatGeneration = { text, message: openAIResponseToChatMessage( part.message ?? { role: "assistant", content: text, toolCalls: [], } ), }; generation.generationInfo = { ...(part.finishReason ? { finish_reason: part.finishReason } : {}), }; generations.push(generation); } return { generations, llmOutput: { tokenUsage }, }; } else { const stream = this._streamResponseChunks(messages, options, runManager); const finalChunks: Record<number, ChatGenerationChunk> = {}; for await (const chunk of stream) { const index = (chunk.generationInfo as NewTokenIndices)?.completion ?? 0; if (finalChunks[index] === undefined) { finalChunks[index] = chunk; } else { finalChunks[index] = finalChunks[index].concat(chunk); } } const generations = Object.entries(finalChunks) .sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10)) .map(([_, value]) => value); const promptTokenUsage = await this.getEstimatedTokenCountFromPrompt( messages, options?.functions, options?.function_call ); const completionTokenUsage = await this.getNumTokensFromGenerations( generations ); tokenUsage.promptTokens = promptTokenUsage; tokenUsage.completionTokens = completionTokenUsage; tokenUsage.totalTokens = promptTokenUsage + completionTokenUsage; return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } }; } } /** * Estimate the number of tokens an array of generations have used. */ private async getNumTokensFromGenerations(generations: ChatGeneration[]) { const generationUsages = await Promise.all( generations.map(async (generation) => { if (generation.message.additional_kwargs?.function_call) { return (await this.getNumTokensFromMessages([generation.message])) .countPerMessage[0]; } else { return await this.getNumTokens(generation.message.content); } }) ); return generationUsages.reduce((a, b) => a + b, 0); } _llmType() { return "azure-openai"; } /** * Estimate the number of tokens a prompt will use. * Modified from: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts */ private async getEstimatedTokenCountFromPrompt( messages: BaseMessage[], functions?: FunctionDefinition[], function_call?: "none" | "auto" | FunctionCallOption ): Promise<number> { // It appears that if functions are present, the first system message is padded with a trailing newline. This // was inferred by trying lots of combinations of messages and functions and seeing what the token counts were. let tokens = (await this.getNumTokensFromMessages(messages)).totalCount; // If there are functions, add the function definitions as they count towards token usage if (functions && function_call !== "auto") { const promptDefinitions = formatFunctionDefinitions( functions as unknown as FunctionDef[] ); tokens += await this.getNumTokens(promptDefinitions); tokens += 9; // Add nine per completion } // If there's a system message _and_ functions are present, subtract four tokens. I assume this is because // functions typically add a system message, but reuse the first one if it's already there. This offsets // the extra 9 tokens added by the function definitions. if (functions && messages.find((m) => m._getType() === "system")) { tokens -= 4; } // If function_call is 'none', add one token. // If it's a FunctionCall object, add 4 + the number of tokens in the function name. // If it's undefined or 'auto', don't add anything. if (function_call === "none") { tokens += 1; } else if (typeof function_call === "object") { tokens += (await this.getNumTokens(function_call.name)) + 4; } return tokens; } async getNumTokensFromMessages(messages: BaseMessage[]) { let totalCount = 0; let tokensPerMessage = 0; let tokensPerName = 0; // From: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb if (this.model === "gpt-3.5-turbo-0301") { tokensPerMessage = 4; tokensPerName = -1; } else { tokensPerMessage = 3; tokensPerName = 1; } const countPerMessage = await Promise.all( messages.map(async (message) => { const textCount = await this.getNumTokens(message.content); const roleCount = await this.getNumTokens(messageToOpenAIRole(message)); const nameCount = message.name !== undefined ? tokensPerName + (await this.getNumTokens(message.name)) : 0; let count = textCount + tokensPerMessage + roleCount + nameCount; // From: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts messageTokenEstimate const openAIMessage = message; if (openAIMessage._getType() === "function") { count -= 2; } if (openAIMessage.additional_kwargs?.function_call) { count += 3; } if (openAIMessage?.additional_kwargs.function_call?.name) { count += await this.getNumTokens( openAIMessage.additional_kwargs.function_call?.name ); } if (openAIMessage.additional_kwargs.function_call?.arguments) { count += await this.getNumTokens( // Remove newlines and spaces JSON.stringify( JSON.parse( openAIMessage.additional_kwargs.function_call?.arguments ) ) ); } totalCount += count; return count; }) ); totalCount += 3; // every reply is primed with <|start|>assistant<|message|> return { totalCount, countPerMessage }; } /** @ignore */ _combineLLMOutput(...llmOutputs: OpenAILLMOutput[]): OpenAILLMOutput { return llmOutputs.reduce<{ [key in keyof OpenAILLMOutput]: Required<OpenAILLMOutput[key]>; }>( (acc, llmOutput) => { if (llmOutput && llmOutput.tokenUsage) { acc.tokenUsage.completionTokens += llmOutput.tokenUsage.completionTokens ?? 0; acc.tokenUsage.promptTokens += llmOutput.tokenUsage.promptTokens ?? 0; acc.tokenUsage.totalTokens += llmOutput.tokenUsage.totalTokens ?? 0; } return acc; }, { tokenUsage: { completionTokens: 0, promptTokens: 0, totalTokens: 0, }, } ); } }
0
lc_public_repos/langchainjs/libs/langchain-azure-openai
lc_public_repos/langchainjs/libs/langchain-azure-openai/src/embeddings.ts
import { Embeddings } from "@langchain/core/embeddings"; import { type OpenAIClientOptions as AzureOpenAIClientOptions, OpenAIClient as AzureOpenAIClient, AzureKeyCredential, OpenAIKeyCredential, } from "@azure/openai"; import { KeyCredential, TokenCredential, isTokenCredential, } from "@azure/core-auth"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { chunkArray } from "@langchain/core/utils/chunk_array"; import { AzureOpenAIInput, AzureOpenAIEmbeddingsParams } from "./types.js"; import { USER_AGENT_PREFIX } from "./constants.js"; /** @deprecated Import from "@langchain/openai" instead. */ export class AzureOpenAIEmbeddings extends Embeddings implements AzureOpenAIEmbeddingsParams, AzureOpenAIInput { modelName = "text-embedding-ada-002"; model = "text-embedding-ada-002"; batchSize = 512; stripNewLines = false; timeout?: number; user?: string; azureOpenAIApiKey?: string; apiKey?: string; azureOpenAIEndpoint?: string; azureOpenAIApiDeploymentName?: string; private client: AzureOpenAIClient; constructor( fields?: Partial<AzureOpenAIEmbeddingsParams> & Partial<AzureOpenAIInput> & { configuration?: AzureOpenAIClientOptions; } ) { const fieldsWithDefaults = { maxConcurrency: 2, ...fields }; super(fieldsWithDefaults); this.azureOpenAIApiDeploymentName = (fieldsWithDefaults?.azureOpenAIEmbeddingsApiDeploymentName || fieldsWithDefaults?.azureOpenAIApiDeploymentName) ?? (getEnvironmentVariable("AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME") || getEnvironmentVariable("AZURE_OPENAI_API_DEPLOYMENT_NAME")); this.azureOpenAIEndpoint = fields?.azureOpenAIEndpoint ?? getEnvironmentVariable("AZURE_OPENAI_API_ENDPOINT"); const openAiApiKey = fields?.apiKey ?? fields?.openAIApiKey ?? getEnvironmentVariable("OPENAI_API_KEY"); this.azureOpenAIApiKey = fields?.apiKey ?? fields?.azureOpenAIApiKey ?? getEnvironmentVariable("AZURE_OPENAI_API_KEY") ?? openAiApiKey; this.apiKey = this.azureOpenAIApiKey; const azureCredential = fields?.credentials ?? (this.apiKey === openAiApiKey ? new OpenAIKeyCredential(this.apiKey ?? "") : new AzureKeyCredential(this.apiKey ?? "")); // eslint-disable-next-line no-instanceof/no-instanceof const isOpenAIApiKey = azureCredential instanceof OpenAIKeyCredential; if (!this.apiKey && !fields?.credentials) { throw new Error("Azure OpenAI API key not found"); } if (!this.azureOpenAIEndpoint && !isOpenAIApiKey) { throw new Error("Azure OpenAI Endpoint not found"); } if (!this.azureOpenAIApiDeploymentName && !isOpenAIApiKey) { throw new Error("Azure OpenAI Deployment name not found"); } this.modelName = fieldsWithDefaults?.model ?? fieldsWithDefaults?.modelName ?? this.model; this.model = this.modelName; this.batchSize = fieldsWithDefaults?.batchSize ?? (this.apiKey ? 1 : this.batchSize); this.stripNewLines = fieldsWithDefaults?.stripNewLines ?? this.stripNewLines; this.timeout = fieldsWithDefaults?.timeout; const options = { userAgentOptions: { userAgentPrefix: USER_AGENT_PREFIX }, }; if (isOpenAIApiKey) { this.client = new AzureOpenAIClient( azureCredential as OpenAIKeyCredential ); } else if (isTokenCredential(azureCredential)) { this.client = new AzureOpenAIClient( this.azureOpenAIEndpoint ?? "", azureCredential as TokenCredential, options ); } else { this.client = new AzureOpenAIClient( this.azureOpenAIEndpoint ?? "", azureCredential as KeyCredential, options ); } } async embedDocuments(texts: string[]): Promise<number[][]> { const batches = chunkArray( this.stripNewLines ? texts.map((t) => t.replace(/\n/g, " ")) : texts, this.batchSize ); const batchRequests = batches.map((batch) => this.getEmbeddings(batch)); const embeddings = await Promise.all(batchRequests); return embeddings.flat(); } async embedQuery(document: string): Promise<number[]> { const input = [ this.stripNewLines ? document.replace(/\n/g, " ") : document, ]; const embeddings = await this.getEmbeddings(input); return embeddings.flat(); } private async getEmbeddings(input: string[]): Promise<number[][]> { const deploymentName = this.azureOpenAIApiDeploymentName || this.model; const res = await this.caller.call(() => this.client.getEmbeddings(deploymentName, input, { user: this.user, model: this.model, requestOptions: { timeout: this.timeout, }, }) ); return res.data.map((data) => data.embedding); } }
0
lc_public_repos/langchainjs/libs/langchain-azure-openai
lc_public_repos/langchainjs/libs/langchain-azure-openai/src/constants.ts
export const USER_AGENT_PREFIX = "langchainjs-azure-openai";
0
lc_public_repos/langchainjs/libs/langchain-azure-openai/src
lc_public_repos/langchainjs/libs/langchain-azure-openai/src/tests/chat_models-extended.int.test.ts
import { test, expect, jest } from "@jest/globals"; import { HumanMessage, ToolMessage } from "@langchain/core/messages"; import { InMemoryCache } from "@langchain/core/caches"; import { AzureChatOpenAI } from "../chat_models.js"; test("Test ChatOpenAI JSON mode", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo-1106", maxTokens: 128, }).bind({ response_format: { type: "json_object", }, }); const message = new HumanMessage("Hello!"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([["system", "Only return JSON"], message]); // console.log(JSON.stringify(res)); }); test("Test ChatOpenAI seed", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo-1106", maxTokens: 128, temperature: 1, }).bind({ seed: 123454930394983, }); const message = new HumanMessage("Say something random!"); const res = await chat.invoke([message]); // console.log(JSON.stringify(res)); const res2 = await chat.invoke([message]); expect(res).toEqual(res2); }); test("Test ChatOpenAI tool calling", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo-1106", maxTokens: 128, }).bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, }, ], tool_choice: "auto", }); const res = await chat.invoke([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], ]); // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls?.length).toBeGreaterThan(1); }); test("Test ChatOpenAI tool calling with ToolMessages", async () => { function getCurrentWeather(location: string) { if (location.toLowerCase().includes("tokyo")) { return JSON.stringify({ location, temperature: "10", unit: "celsius" }); } else if (location.toLowerCase().includes("san francisco")) { return JSON.stringify({ location, temperature: "72", unit: "fahrenheit", }); } else { return JSON.stringify({ location, temperature: "22", unit: "celsius" }); } } const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo-1106", maxTokens: 128, }).bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, }, ], tool_choice: "auto", }); const res = await chat.invoke([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], ]); // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls?.length).toBeGreaterThan(1); // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const toolMessages = res.additional_kwargs.tool_calls!.map( (toolCall) => new ToolMessage({ tool_call_id: toolCall.id, name: toolCall.function.name, content: getCurrentWeather( JSON.parse(toolCall.function.arguments).location ), }) ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const finalResponse = await chat.invoke([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], res, ...toolMessages, ]); // console.log(finalResponse); }); test("Test ChatOpenAI tool calling with streaming", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo-1106", maxTokens: 256, }).bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, }, ], tool_choice: "auto", }); const stream = await chat.stream([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], ]); let finalChunk; const chunks = []; for await (const chunk of stream) { // console.log(chunk.additional_kwargs.tool_calls); chunks.push(chunk); if (!finalChunk) { finalChunk = chunk; } else { finalChunk = finalChunk.concat(chunk); } } expect(chunks.length).toBeGreaterThan(1); // console.log(finalChunk?.additional_kwargs.tool_calls); expect(finalChunk?.additional_kwargs.tool_calls?.length).toBeGreaterThan(1); }); test("ChatOpenAI in JSON mode can cache generations", async () => { const memoryCache = new InMemoryCache(); const lookupSpy = jest.spyOn(memoryCache, "lookup"); const updateSpy = jest.spyOn(memoryCache, "update"); const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo-1106", temperature: 1, cache: memoryCache, }).bind({ response_format: { type: "json_object", }, }); const message = new HumanMessage( "Respond with a JSON object containing arbitrary fields." ); const res = await chat.invoke([message]); // console.log(res); const res2 = await chat.invoke([message]); // console.log(res2); expect(res).toEqual(res2); expect(lookupSpy).toHaveBeenCalledTimes(2); expect(updateSpy).toHaveBeenCalledTimes(1); lookupSpy.mockRestore(); updateSpy.mockRestore(); });
0
lc_public_repos/langchainjs/libs/langchain-azure-openai/src
lc_public_repos/langchainjs/libs/langchain-azure-openai/src/tests/llms.int.test.ts
import { test, expect } from "@jest/globals"; import { LLMResult } from "@langchain/core/outputs"; import { StringPromptValue } from "@langchain/core/prompt_values"; import { CallbackManager } from "@langchain/core/callbacks/manager"; import { NewTokenIndices } from "@langchain/core/callbacks/base"; import { ClientSecretCredential } from "@azure/identity"; import { TokenCredential } from "@azure/core-auth"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { OpenAIKeyCredential } from "@azure/openai"; import { AzureOpenAI } from "../llms.js"; test("Test OpenAI", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Print hello world"); // console.log({ res }); }); test("Test OpenAI with stop", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.call("Print hello world", ["world"]); // console.log({ res }); }); test("Test OpenAI with stop in object", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Print hello world", { stop: ["world"] }); // console.log({ res }); }); test("Test OpenAI with timeout in call options", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); await expect(() => model.invoke("Print hello world", { timeout: 10, }) ).rejects.toThrow(); }, 5000); test("Test OpenAI with timeout in call options and node adapter", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); await expect(() => model.invoke("Print hello world", { timeout: 10, }) ).rejects.toThrow(); }, 5000); test("Test OpenAI with signal in call options", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); const controller = new AbortController(); await expect(() => { const ret = model.invoke("Print hello world", { signal: controller.signal, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); test("Test OpenAI with signal in call options and node adapter", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); const controller = new AbortController(); await expect(() => { const ret = model.invoke("Print hello world", { signal: controller.signal, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); test("Test OpenAI with concurrency == 1", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", maxConcurrency: 1, }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await Promise.all([ model.invoke("Print hello world"), model.invoke("Print hello world"), ]); // console.log({ res }); }); test("Test OpenAI with maxTokens -1", async () => { const model = new AzureOpenAI({ maxTokens: -1, modelName: "gpt-3.5-turbo-instruct", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.call("Print hello world", ["world"]); // console.log({ res }); }); test("Test OpenAI with instruct model returns OpenAI", async () => { const model = new AzureOpenAI({ modelName: "gpt-3.5-turbo-instruct" }); expect(model).toBeInstanceOf(AzureOpenAI); const res = await model.invoke("Print hello world"); // console.log({ res }); expect(typeof res).toBe("string"); }); test("Test OpenAI with versioned instruct model returns OpenAI", async () => { const model = new AzureOpenAI({ modelName: "gpt-3.5-turbo-instruct-0914", }); expect(model).toBeInstanceOf(AzureOpenAI); const res = await model.invoke("Print hello world"); // console.log({ res }); expect(typeof res).toBe("string"); }); test("Test ChatOpenAI tokenUsage", async () => { let tokenUsage = { completionTokens: 0, promptTokens: 0, totalTokens: 0, }; const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { tokenUsage = output.llmOutput?.tokenUsage; }, }), }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Hello"); // console.log({ res }); expect(tokenUsage.promptTokens).toBe(1); }); test("Test OpenAI in streaming mode", async () => { let nrNewTokens = 0; let streamedCompletion = ""; const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", streaming: true, callbacks: CallbackManager.fromHandlers({ async handleLLMNewToken(token: string) { nrNewTokens += 1; streamedCompletion += token; }, }), }); const res = await model.invoke("Print hello world"); // console.log({ res }); expect(nrNewTokens > 0).toBe(true); expect(res).toBe(streamedCompletion); }); test("Test OpenAI in streaming mode with multiple prompts", async () => { let nrNewTokens = 0; const completions = [ ["", ""], ["", ""], ]; const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", streaming: true, n: 2, callbacks: CallbackManager.fromHandlers({ async handleLLMNewToken(token: string, idx: NewTokenIndices) { nrNewTokens += 1; completions[idx.prompt][idx.completion] += token; }, }), }); const res = await model.generate(["Print hello world", "print hello sea"]); // console.log( // res.generations, // res.generations.map((g) => g[0].generationInfo) // ); expect(nrNewTokens > 0).toBe(true); expect(res.generations.length).toBe(2); expect(res.generations.map((g) => g.map((gg) => gg.text))).toEqual( completions ); }); test("Test OpenAIChat in streaming mode with multiple prompts", async () => { let nrNewTokens = 0; const completions = [[""], [""]]; const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo", streaming: true, n: 1, callbacks: CallbackManager.fromHandlers({ async handleLLMNewToken(token: string, idx: NewTokenIndices) { nrNewTokens += 1; completions[idx.prompt][idx.completion] += token; }, }), }); const res = await model.generate(["Print hello world", "print hello sea"]); // console.log( // res.generations, // res.generations.map((g) => g[0].generationInfo) // ); expect(nrNewTokens > 0).toBe(true); expect(res.generations.length).toBe(2); expect(res.generations.map((g) => g.map((gg) => gg.text))).toEqual( completions ); }); test("Test OpenAI prompt value", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); const res = await model.generatePrompt([ new StringPromptValue("Print hello world"), ]); expect(res.generations.length).toBe(1); for (const generation of res.generations) { expect(generation.length).toBe(1); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for (const g of generation) { // console.log(g.text); } } // console.log({ res }); }); test("Test OpenAI stream method", async () => { const model = new AzureOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo-instruct", }); const stream = await model.stream("Print hello world."); const chunks = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); }); test("Test OpenAI stream method with abort", async () => { await expect(async () => { const model = new AzureOpenAI({ maxTokens: 250, modelName: "gpt-3.5-turbo-instruct", }); const stream = await model.stream( "How is your day going? Be extremely verbose.", { signal: AbortSignal.timeout(1000), } ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); } }).rejects.toThrow(); }); test("Test OpenAI stream method with early break", async () => { const model = new AzureOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo-instruct", }); const stream = await model.stream( "How is your day going? Be extremely verbose." ); let i = 0; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); i += 1; if (i > 5) { break; } } }); test("Test OpenAI with Token credentials ", async () => { const tenantId: string = getEnvironmentVariable("AZURE_TENANT_ID") ?? ""; const clientId: string = getEnvironmentVariable("AZURE_CLIENT_ID") ?? ""; const clientSecret: string = getEnvironmentVariable("AZURE_CLIENT_SECRET") ?? ""; const credentials: TokenCredential = new ClientSecretCredential( tenantId, clientId, clientSecret ); const model = new AzureOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", credentials, }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Print hello world"); // console.log({ res }); }); test("Test Azure OpenAI with key credentials ", async () => { const model = new AzureOpenAI({ maxTokens: 5, modelName: "davinci-002", azureOpenAIApiKey: getEnvironmentVariable("AZURE_OPENAI_API_KEY") ?? "", azureOpenAIEndpoint: getEnvironmentVariable("AZURE_OPENAI_API_ENDPOINT") ?? "", azureOpenAIApiDeploymentName: getEnvironmentVariable("AZURE_OPENAI_API_DEPLOYMENT_NAME") ?? "", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Print hello world"); // console.log({ res }); }); test("Test OpenAI with OpenAI API key credentials ", async () => { const openAiKey: string = getEnvironmentVariable("OPENAI_API_KEY") ?? ""; const credentials = new OpenAIKeyCredential(openAiKey); const model = new AzureOpenAI({ maxTokens: 5, modelName: "davinci-002", credentials, azureOpenAIEndpoint: "", azureOpenAIApiDeploymentName: "", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Print hello world"); // console.log({ res }); });
0
lc_public_repos/langchainjs/libs/langchain-azure-openai/src
lc_public_repos/langchainjs/libs/langchain-azure-openai/src/tests/chat_models-vision.int.test.ts
import { test } from "@jest/globals"; import { HumanMessage } from "@langchain/core/messages"; import * as fs from "node:fs/promises"; import { fileURLToPath } from "node:url"; import * as path from "node:path"; import { AzureChatOpenAI } from "../chat_models.js"; test("Test ChatOpenAI with a file", async () => { const __filename = fileURLToPath(import.meta.url); const __dirname = path.dirname(__filename); const imageData = await fs.readFile(path.join(__dirname, "/data/hotdog.jpg")); const chat = new AzureChatOpenAI({ modelName: "gpt-4-vision-preview", maxTokens: 1024, }); const message = new HumanMessage({ content: [ { type: "text", text: "What's in this image?", }, { type: "image_url", image_url: { url: `data:image/jpeg;base64,${imageData.toString("base64")}`, }, }, ], }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([message]); // console.log({ res }); }); test("Test ChatOpenAI with a URL", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-4-vision-preview", maxTokens: 1024, }); const message = new HumanMessage({ content: [ { type: "text", text: "What does this image say?", }, { type: "image_url", image_url: "https://www.freecodecamp.org/news/content/images/2023/05/Screenshot-2023-05-29-at-5.40.38-PM.png", }, ], }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([message]); // console.log({ res }); });
0
lc_public_repos/langchainjs/libs/langchain-azure-openai/src
lc_public_repos/langchainjs/libs/langchain-azure-openai/src/tests/chat_models.int.test.ts
import { test, jest, expect } from "@jest/globals"; import { BaseMessage, ChatMessage, HumanMessage, SystemMessage, } from "@langchain/core/messages"; import { ChatGeneration, LLMResult } from "@langchain/core/outputs"; import { ChatPromptValue } from "@langchain/core/prompt_values"; import { PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts"; import { CallbackManager } from "@langchain/core/callbacks/manager"; import { NewTokenIndices } from "@langchain/core/callbacks/base"; import { InMemoryCache } from "@langchain/core/caches"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { OpenAIKeyCredential } from "@azure/openai"; import { AzureChatOpenAI } from "../chat_models.js"; test("Test ChatOpenAI", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, }); const message = new HumanMessage("Hello!"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.call([message]); // console.log({ res }); }); test("Test ChatOpenAI with SystemChatMessage", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, }); const system_message = new SystemMessage("You are to chat with a user."); const message = new HumanMessage("Hello!"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.call([system_message, message]); // console.log({ res }); }); test("Test ChatOpenAI Generate", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, n: 2, }); const message = new HumanMessage("Hello!"); const res = await chat.generate([[message], [message]]); expect(res.generations.length).toBe(2); for (const generation of res.generations) { expect(generation.length).toBe(2); for (const message of generation) { // console.log(message.text); expect(typeof message.text).toBe("string"); } } // console.log({ res }); }); test("Test ChatOpenAI Generate throws when one of the calls fails", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, n: 2, }); const message = new HumanMessage("Hello!"); await expect(() => chat.generate([[message], [message]], { signal: AbortSignal.timeout(10), }) ).rejects.toThrow(); }); test("Test ChatOpenAI tokenUsage", async () => { let tokenUsage = { completionTokens: 0, promptTokens: 0, totalTokens: 0, }; const model = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { // console.log(output); tokenUsage = output.llmOutput?.tokenUsage; }, }), }); const message = new HumanMessage("Hello"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke([message]); // console.log({ res }); expect(tokenUsage.promptTokens).toBeGreaterThan(0); }); test("Test ChatOpenAI tokenUsage with a batch", async () => { let tokenUsage = { completionTokens: 0, promptTokens: 0, totalTokens: 0, }; const model = new AzureChatOpenAI({ temperature: 0, modelName: "gpt-3.5-turbo", callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { tokenUsage = output.llmOutput?.tokenUsage; }, }), }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.generate([ [new HumanMessage("Hello")], [new HumanMessage("Hi")], ]); // console.log(res); expect(tokenUsage.promptTokens).toBeGreaterThan(0); }); test("Test ChatOpenAI in streaming mode", async () => { let nrNewTokens = 0; let streamedCompletion = ""; const model = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: true, maxTokens: 10, callbacks: [ { async handleLLMNewToken(token: string) { nrNewTokens += 1; streamedCompletion += token; }, }, ], }); const message = new HumanMessage("Hello!"); const result = await model.invoke([message]); expect(nrNewTokens > 0).toBe(true); expect(result.content).toBe(streamedCompletion); }, 10000); test("Test ChatOpenAI in streaming mode with n > 1 and multiple prompts", async () => { let nrNewTokens = 0; const streamedCompletions = [ ["", ""], ["", ""], ]; const model = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: true, maxTokens: 10, n: 2, callbacks: [ { async handleLLMNewToken(token: string, idx: NewTokenIndices) { nrNewTokens += 1; streamedCompletions[idx.prompt][idx.completion] += token; }, }, ], }); const message1 = new HumanMessage("Hello!"); const message2 = new HumanMessage("Bye!"); const result = await model.generate([[message1], [message2]]); expect(nrNewTokens > 0).toBe(true); expect(result.generations.map((g) => g.map((gg) => gg.text))).toEqual( streamedCompletions ); }, 10000); test("Test ChatOpenAI prompt value", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, n: 2, }); const message = new HumanMessage("Hello!"); const res = await chat.generatePrompt([new ChatPromptValue([message])]); expect(res.generations.length).toBe(1); for (const generation of res.generations) { expect(generation.length).toBe(2); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for (const g of generation) { // console.log(g.text); } } // console.log({ res }); }); test("OpenAI Chat, docs, prompt templates", async () => { const chat = new AzureChatOpenAI({ temperature: 0, maxTokens: 10 }); const systemPrompt = PromptTemplate.fromTemplate( "You are a helpful assistant that translates {input_language} to {output_language}." ); const chatPrompt = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(systemPrompt), HumanMessagePromptTemplate.fromTemplate("{text}"), ]); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ input_language: "English", output_language: "French", text: "I love programming.", }), ]); // console.log(responseA.generations); }, 5000); test("Test OpenAI with stop", async () => { const model = new AzureChatOpenAI({ maxTokens: 5 }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.call( [new HumanMessage("Print hello world")], ["world"] ); // console.log({ res }); }); test("Test OpenAI with stop in object", async () => { const model = new AzureChatOpenAI({ maxTokens: 5 }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke([new HumanMessage("Print hello world")], { stop: ["world"], }); // console.log({ res }); }); test("Test OpenAI with timeout in call options", async () => { const model = new AzureChatOpenAI({ maxTokens: 5 }); await expect(() => model.invoke([new HumanMessage("Print hello world")], { timeout: 10 }) ).rejects.toThrow(); }, 5000); test("Test OpenAI with timeout in call options and node adapter", async () => { const model = new AzureChatOpenAI({ maxTokens: 5 }); await expect(() => model.invoke([new HumanMessage("Print hello world")], { timeout: 10 }) ).rejects.toThrow(); }, 5000); test("Test OpenAI with signal in call options", async () => { const model = new AzureChatOpenAI({ maxTokens: 5 }); const controller = new AbortController(); await expect(() => { const ret = model.invoke([new HumanMessage("Print hello world")], { signal: controller.signal, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); test("Test OpenAI with signal in call options and node adapter", async () => { const model = new AzureChatOpenAI({ maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); const controller = new AbortController(); await expect(() => { const ret = model.invoke([new HumanMessage("Print hello world")], { signal: controller.signal, }); controller.abort(); return ret; }).rejects.toThrow(); }, 5000); test("Test OpenAI with specific roles in ChatMessage", async () => { const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, }); const system_message = new ChatMessage( "You are to chat with a user.", "system" ); const user_message = new ChatMessage("Hello!", "user"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.call([system_message, user_message]); // console.log({ res }); }); test("Test ChatOpenAI stream method", async () => { const model = new AzureChatOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo", }); const stream = await model.stream("Print hello world."); const chunks = []; for await (const chunk of stream) { // console.log(chunk); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); }); test("Test ChatOpenAI stream method with abort", async () => { await expect(async () => { const model = new AzureChatOpenAI({ maxTokens: 100, modelName: "gpt-3.5-turbo", }); const stream = await model.stream( "How is your day going? Be extremely verbose.", { signal: AbortSignal.timeout(500), } ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); } }).rejects.toThrow(); }); test("Test ChatOpenAI stream method with early break", async () => { const model = new AzureChatOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo", }); const stream = await model.stream( "How is your day going? Be extremely verbose." ); let i = 0; // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); i += 1; if (i > 10) { break; } } }); test("Test ChatOpenAI stream method, timeout error thrown from SDK", async () => { await expect(async () => { const model = new AzureChatOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo", timeout: 1, }); const stream = await model.stream( "How is your day going? Be extremely verbose." ); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(chunk); } }).rejects.toThrow(); }); test("Function calling with streaming", async () => { let finalResult: BaseMessage | undefined; const modelForFunctionCalling = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", temperature: 0, callbacks: [ { handleLLMEnd(output: LLMResult) { finalResult = (output.generations[0][0] as ChatGeneration).message; }, }, ], }); const stream = await modelForFunctionCalling.stream( "What is the weather in New York?", { functions: [ { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, ], function_call: { name: "get_current_weather", }, } ); const chunks = []; let streamedOutput; for await (const chunk of stream) { chunks.push(chunk); if (!streamedOutput) { streamedOutput = chunk; } else if (chunk) { streamedOutput = streamedOutput.concat(chunk); } } expect(finalResult).toEqual(streamedOutput); expect(chunks.length).toBeGreaterThan(1); expect(finalResult?.additional_kwargs?.function_call?.name).toBe( "get_current_weather" ); // console.log( // JSON.parse(finalResult?.additional_kwargs?.function_call?.arguments ?? "") // .location // ); }); test("ChatOpenAI can cache generations", async () => { const memoryCache = new InMemoryCache(); const lookupSpy = jest.spyOn(memoryCache, "lookup"); const updateSpy = jest.spyOn(memoryCache, "update"); const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10, n: 2, cache: memoryCache, }); const message = new HumanMessage("Hello"); const res = await chat.generate([[message], [message]]); expect(res.generations.length).toBe(2); expect(lookupSpy).toHaveBeenCalledTimes(2); expect(updateSpy).toHaveBeenCalledTimes(2); lookupSpy.mockRestore(); updateSpy.mockRestore(); }); test("ChatOpenAI can write and read cached generations", async () => { const memoryCache = new InMemoryCache(); const lookupSpy = jest.spyOn(memoryCache, "lookup"); const updateSpy = jest.spyOn(memoryCache, "update"); const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 100, n: 1, cache: memoryCache, }); const generateUncachedSpy = jest.spyOn(chat, "_generateUncached"); const messages = [ [ new HumanMessage("what color is the sky?"), new HumanMessage("what color is the ocean?"), ], [new HumanMessage("hello")], ]; const response1 = await chat.generate(messages); expect(generateUncachedSpy).toHaveBeenCalledTimes(1); generateUncachedSpy.mockRestore(); const response2 = await chat.generate(messages); expect(generateUncachedSpy).toHaveBeenCalledTimes(0); // Request should be cached, no need to generate. generateUncachedSpy.mockRestore(); expect(response1.generations.length).toBe(2); expect(response2.generations).toEqual(response1.generations); expect(lookupSpy).toHaveBeenCalledTimes(4); expect(updateSpy).toHaveBeenCalledTimes(2); lookupSpy.mockRestore(); updateSpy.mockRestore(); }); test("ChatOpenAI should not reuse cache if function call args have changed", async () => { const memoryCache = new InMemoryCache(); const lookupSpy = jest.spyOn(memoryCache, "lookup"); const updateSpy = jest.spyOn(memoryCache, "update"); const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 100, n: 1, cache: memoryCache, }); const generateUncachedSpy = jest.spyOn(chat, "_generateUncached"); const messages = [ [ new HumanMessage("what color is the sky?"), new HumanMessage("what color is the ocean?"), ], [new HumanMessage("hello")], ]; const response1 = await chat.generate(messages); expect(generateUncachedSpy).toHaveBeenCalledTimes(1); generateUncachedSpy.mockRestore(); const response2 = await chat.generate(messages, { functions: [ { name: "extractor", description: "Extract fields from the input", parameters: { type: "object", properties: { tone: { type: "string", description: "the tone of the input", }, }, required: ["tone"], }, }, ], function_call: { name: "extractor", }, }); expect(generateUncachedSpy).toHaveBeenCalledTimes(0); // Request should not be cached since it's being called with different function call args expect(response1.generations.length).toBe(2); expect( (response2.generations[0][0] as ChatGeneration).message.additional_kwargs .function_call?.name ?? "" ).toEqual("extractor"); const response3 = await chat.generate(messages, { functions: [ { name: "extractor", description: "Extract fields from the input", parameters: { type: "object", properties: { tone: { type: "string", description: "the tone of the input", }, }, required: ["tone"], }, }, ], function_call: { name: "extractor", }, }); expect(response2.generations).toEqual(response3.generations); expect(lookupSpy).toHaveBeenCalledTimes(6); expect(updateSpy).toHaveBeenCalledTimes(4); lookupSpy.mockRestore(); updateSpy.mockRestore(); }); function createSampleMessages(): BaseMessage[] { // same example as in https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb return [ createSystemChatMessage( "You are a helpful, pattern-following assistant that translates corporate jargon into plain English." ), createSystemChatMessage( "New synergies will help drive top-line growth.", "example_user" ), createSystemChatMessage( "Things working well together will increase revenue.", "example_assistant" ), createSystemChatMessage( "Let's circle back when we have more bandwidth to touch base on opportunities for increased leverage.", "example_user" ), createSystemChatMessage( "Let's talk later when we're less busy about how to do better.", "example_assistant" ), new HumanMessage( "This late pivot means we don't have time to boil the ocean for the client deliverable." ), ]; } function createSystemChatMessage(text: string, name?: string) { const msg = new SystemMessage(text); msg.name = name; return msg; } test("getNumTokensFromMessages gpt-3.5-turbo-0301 model for sample input", async () => { const messages: BaseMessage[] = createSampleMessages(); const chat = new AzureChatOpenAI({ azureOpenAIApiKey: "dummy", modelName: "gpt-3.5-turbo-0301", }); const { totalCount } = await chat.getNumTokensFromMessages(messages); expect(totalCount).toBe(127); }); test("getNumTokensFromMessages gpt-4-0314 model for sample input", async () => { const messages: BaseMessage[] = createSampleMessages(); const chat = new AzureChatOpenAI({ azureOpenAIApiKey: "dummy", modelName: "gpt-4-0314", }); const { totalCount } = await chat.getNumTokensFromMessages(messages); expect(totalCount).toBe(129); }); test("Test ChatOpenAI token usage reporting for streaming function calls", async () => { let streamingTokenUsed = -1; let nonStreamingTokenUsed = -1; const humanMessage = "What a beautiful day!"; const extractionFunctionSchema = { name: "extractor", description: "Extracts fields from the input.", parameters: { type: "object", properties: { tone: { type: "string", enum: ["positive", "negative"], description: "The overall tone of the input", }, word_count: { type: "number", description: "The number of words in the input", }, chat_response: { type: "string", description: "A response to the human's input", }, }, required: ["tone", "word_count", "chat_response"], }, }; const streamingModel = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: true, maxRetries: 10, maxConcurrency: 10, temperature: 0, topP: 0, callbacks: [ { handleLLMEnd: async (output) => { streamingTokenUsed = output.llmOutput?.estimatedTokenUsage?.totalTokens; // console.log("streaming usage", output.llmOutput?.estimatedTokenUsage); }, handleLLMError: async (_err) => { // console.error(err); }, }, ], }).bind({ functions: [extractionFunctionSchema], function_call: { name: "extractor" }, }); const nonStreamingModel = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: false, maxRetries: 10, maxConcurrency: 10, temperature: 0, topP: 0, callbacks: [ { handleLLMEnd: async (output) => { nonStreamingTokenUsed = output.llmOutput?.tokenUsage?.totalTokens; // console.log("non-streaming usage", output.llmOutput?.tokenUsage); }, handleLLMError: async (_err) => { // console.error(err); }, }, ], }).bind({ functions: [extractionFunctionSchema], function_call: { name: "extractor" }, }); const [nonStreamingResult, streamingResult] = await Promise.all([ nonStreamingModel.invoke([new HumanMessage(humanMessage)]), streamingModel.invoke([new HumanMessage(humanMessage)]), ]); if ( nonStreamingResult.additional_kwargs.function_call?.arguments && streamingResult.additional_kwargs.function_call?.arguments ) { const nonStreamingArguments = JSON.stringify( JSON.parse(nonStreamingResult.additional_kwargs.function_call.arguments) ); const streamingArguments = JSON.stringify( JSON.parse(streamingResult.additional_kwargs.function_call.arguments) ); if (nonStreamingArguments === streamingArguments) { expect(streamingTokenUsed).toEqual(nonStreamingTokenUsed); } } expect(streamingTokenUsed).toBeGreaterThan(-1); }); test("Test ChatOpenAI token usage reporting for streaming calls", async () => { let streamingTokenUsed = -1; let nonStreamingTokenUsed = -1; const systemPrompt = "You are a helpful assistant"; const question = "What is the color of the night sky?"; const streamingModel = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: true, maxRetries: 10, maxConcurrency: 10, temperature: 0, topP: 0, callbacks: [ { handleLLMEnd: async (output) => { streamingTokenUsed = output.llmOutput?.estimatedTokenUsage?.totalTokens; // console.log("streaming usage", output.llmOutput?.estimatedTokenUsage); }, handleLLMError: async (_err) => { // console.error(err); }, }, ], }); const nonStreamingModel = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", streaming: false, maxRetries: 10, maxConcurrency: 10, temperature: 0, topP: 0, callbacks: [ { handleLLMEnd: async (output) => { nonStreamingTokenUsed = output.llmOutput?.tokenUsage?.totalTokens; // console.log("non-streaming usage", output.llmOutput?.estimated); }, handleLLMError: async (_err) => { // console.error(err); }, }, ], }); const [nonStreamingResult, streamingResult] = await Promise.all([ nonStreamingModel.generate([ [new SystemMessage(systemPrompt), new HumanMessage(question)], ]), streamingModel.generate([ [new SystemMessage(systemPrompt), new HumanMessage(question)], ]), ]); expect(streamingTokenUsed).toBeGreaterThan(-1); if ( nonStreamingResult.generations[0][0].text === streamingResult.generations[0][0].text ) { expect(streamingTokenUsed).toEqual(nonStreamingTokenUsed); } }); test("Test Azure ChatOpenAI with key credentials ", async () => { const model = new AzureChatOpenAI({ maxTokens: 5, modelName: "davinci-002", azureOpenAIApiKey: getEnvironmentVariable("AZURE_OPENAI_API_KEY") ?? "", azureOpenAIEndpoint: getEnvironmentVariable("AZURE_OPENAI_API_ENDPOINT") ?? "", azureOpenAIApiDeploymentName: getEnvironmentVariable("AZURE_OPENAI_API_DEPLOYMENT_NAME") ?? "", }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Print hello world"); // console.log({ res }); }); test("Test ChatOpenAI with OpenAI API key credentials", async () => { const openAiKey: string = getEnvironmentVariable("OPENAI_API_KEY") ?? ""; const credentials = new OpenAIKeyCredential(openAiKey); const chat = new AzureChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 5, credentials, azureOpenAIEndpoint: "", azureOpenAIApiDeploymentName: "", }); const message = new HumanMessage("Hello!"); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await chat.invoke([["system", "Say hi"], message]); // console.log(res); });
0
lc_public_repos/langchainjs/libs/langchain-azure-openai/src
lc_public_repos/langchainjs/libs/langchain-azure-openai/src/tests/embeddings.int.test.ts
import { test, expect } from "@jest/globals"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { ClientSecretCredential, TokenCredential } from "@azure/identity"; import { OpenAIKeyCredential } from "@azure/openai"; import { AzureOpenAIEmbeddings } from "../embeddings.js"; test("Test OpenAIEmbeddings.embedQuery", async () => { const embeddings = new AzureOpenAIEmbeddings(); const res = await embeddings.embedQuery("Hello world"); expect(typeof res[0]).toBe("number"); }); test("Test OpenAIEmbeddings.embedDocuments", async () => { const embeddings = new AzureOpenAIEmbeddings(); const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]); expect(res).toHaveLength(2); expect(typeof res[0][0]).toBe("number"); expect(typeof res[1][0]).toBe("number"); }); test("Test OpenAIEmbeddings.embedDocuments batching", async () => { const embeddings = new AzureOpenAIEmbeddings({ batchSize: 16, }); const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]); expect(res).toHaveLength(2); expect(typeof res[0][0]).toBe("number"); expect(typeof res[1][0]).toBe("number"); }); test("Test OpenAIEmbeddings concurrency", async () => { const embeddings = new AzureOpenAIEmbeddings({ batchSize: 1, maxConcurrency: 2, }); const res = await embeddings.embedDocuments([ "Hello world", "Bye bye", "Hello world", "Bye bye", "Hello world", "Bye bye", ]); expect(res).toHaveLength(6); expect(res.find((embedding) => typeof embedding[0] !== "number")).toBe( undefined ); }); test("Test timeout error thrown from SDK", async () => { await expect(async () => { const model = new AzureOpenAIEmbeddings({ timeout: 1, }); await model.embedDocuments([ "Hello world", "Bye bye", "Hello world", "Bye bye", "Hello world", "Bye bye", ]); }).rejects.toThrow(); }); test("Test OpenAIEmbeddings.embedQuery with TokenCredentials", async () => { const tenantId: string = getEnvironmentVariable("AZURE_TENANT_ID") ?? ""; const clientId: string = getEnvironmentVariable("AZURE_CLIENT_ID") ?? ""; const clientSecret: string = getEnvironmentVariable("AZURE_CLIENT_SECRET") ?? ""; const credentials: TokenCredential = new ClientSecretCredential( tenantId, clientId, clientSecret ); const embeddings = new AzureOpenAIEmbeddings({ credentials }); const res = await embeddings.embedQuery("Hello world"); expect(typeof res[0]).toBe("number"); }); test("Test OpenAIEmbeddings.embedQuery with key credentials ", async () => { const embeddings = new AzureOpenAIEmbeddings({ modelName: "text-embedding-ada-002", azureOpenAIApiKey: getEnvironmentVariable("AZURE_OPENAI_API_KEY") ?? "", azureOpenAIEndpoint: getEnvironmentVariable("AZURE_OPENAI_API_ENDPOINT") ?? "", azureOpenAIApiDeploymentName: getEnvironmentVariable("AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME") ?? "", }); const res = await embeddings.embedQuery("Hello world"); expect(typeof res[0]).toBe("number"); }); test("Test OpenAIEmbeddings.embedQuery with OpenAI API key credentials", async () => { const openAiKey: string = getEnvironmentVariable("OPENAI_API_KEY") ?? ""; const credentials = new OpenAIKeyCredential(openAiKey); const embeddings = new AzureOpenAIEmbeddings({ credentials, azureOpenAIEndpoint: "", azureOpenAIApiDeploymentName: "", }); const res = await embeddings.embedQuery("Hello world"); expect(typeof res[0]).toBe("number"); });
0
lc_public_repos/langchainjs/libs/langchain-azure-openai/src
lc_public_repos/langchainjs/libs/langchain-azure-openai/src/utils/openai-format-fndef.ts
/** * Formatting function definitions for calculating openai function defination token usage. * * https://github.com/hmarr/openai-chat-tokens/blob/main/src/functions.ts * (c) 2023 Harry Marr * MIT license */ import { FunctionDefinition } from "@azure/openai"; export interface FunctionDef extends Omit<FunctionDefinition, "parameters"> { name: string; description?: string; parameters: ObjectProp; } interface ObjectProp { type: "object"; properties?: { [key: string]: Prop; }; required?: string[]; } interface AnyOfProp { anyOf: Prop[]; } type Prop = { description?: string; } & ( | AnyOfProp | ObjectProp | { type: "string"; enum?: string[]; } | { type: "number" | "integer"; minimum?: number; maximum?: number; enum?: number[]; } | { type: "boolean" } | { type: "null" } | { type: "array"; items?: Prop; } ); function isAnyOfProp(prop: Prop): prop is AnyOfProp { return ( (prop as AnyOfProp).anyOf !== undefined && Array.isArray((prop as AnyOfProp).anyOf) ); } // When OpenAI use functions in the prompt, they format them as TypeScript definitions rather than OpenAPI JSON schemas. // This function converts the JSON schemas into TypeScript definitions. export function formatFunctionDefinitions(functions: FunctionDef[]) { const lines = ["namespace functions {", ""]; for (const f of functions) { if (f.description) { lines.push(`// ${f.description}`); } if (Object.keys(f.parameters.properties ?? {}).length > 0) { lines.push(`type ${f.name} = (_: {`); lines.push(formatObjectProperties(f.parameters, 0)); lines.push("}) => any;"); } else { lines.push(`type ${f.name} = () => any;`); } lines.push(""); } lines.push("} // namespace functions"); return lines.join("\n"); } // Format just the properties of an object (not including the surrounding braces) function formatObjectProperties(obj: ObjectProp, indent: number): string { const lines: string[] = []; for (const [name, param] of Object.entries(obj.properties ?? {})) { if (param.description && indent < 2) { lines.push(`// ${param.description}`); } if (obj.required?.includes(name)) { lines.push(`${name}: ${formatType(param, indent)},`); } else { lines.push(`${name}?: ${formatType(param, indent)},`); } } return lines.map((line) => " ".repeat(indent) + line).join("\n"); } // Format a single property type function formatType(param: Prop, indent: number): string { if (isAnyOfProp(param)) { return param.anyOf.map((v) => formatType(v, indent)).join(" | "); } switch (param.type) { case "string": if (param.enum) { return param.enum.map((v) => `"${v}"`).join(" | "); } return "string"; case "number": if (param.enum) { return param.enum.map((v) => `${v}`).join(" | "); } return "number"; case "integer": if (param.enum) { return param.enum.map((v) => `${v}`).join(" | "); } return "number"; case "boolean": return "boolean"; case "null": return "null"; case "object": return ["{", formatObjectProperties(param, indent + 2), "}"].join("\n"); case "array": if (param.items) { return `${formatType(param.items, indent)}[]`; } return "any[]"; default: return ""; } }
0
lc_public_repos/langchainjs/libs/langchain-azure-openai
lc_public_repos/langchainjs/libs/langchain-azure-openai/scripts/jest-setup-after-env.js
import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { afterAll, jest } from "@jest/globals"; afterAll(awaitAllCallbacks); // Allow console.log to be disabled in tests if (process.env.DISABLE_CONSOLE_LOGS === "true") { console.log = jest.fn(); }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-nomic/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": ["ES2021", "ES2022.Object", "DOM"], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": ["src/**/*"], "exclude": ["node_modules", "dist", "docs"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-nomic/LICENSE
The MIT License Copyright (c) 2023 LangChain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-nomic/jest.config.cjs
/** @type {import('ts-jest').JestConfigWithTsJest} */ module.exports = { preset: "ts-jest/presets/default-esm", testEnvironment: "./jest.env.cjs", modulePathIgnorePatterns: ["dist/", "docs/"], moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, transform: { "^.+\\.tsx?$": ["@swc/jest"], }, transformIgnorePatterns: [ "/node_modules/", "\\.pnp\\.[^\\/]+$", "./scripts/jest-setup-after-env.js", ], setupFiles: ["dotenv/config"], testTimeout: 20_000, passWithNoTests: true, };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-nomic/jest.env.cjs
const { TestEnvironment } = require("jest-environment-node"); class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { constructor(config, context) { // Make `instanceof Float32Array` return true in tests // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 super(config, context); this.global.Float32Array = Float32Array; } } module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-nomic/README.md
# @langchain/nomic This package contains the LangChain.js integrations for Nomic via the @nomic-ai/atlas package. ## Installation ```bash npm2yarn npm install @langchain/nomic @langchain/core ``` ## Embeddings This package adds support for Nomic embeddings. Currently, they offer two embeddings models: - `nomic-embed-text-v1` - `nomic-embed-text-v1.5` `nomic-embed-text-v1.5` allows for you to customize the number of dimensions returned. It defaults to the largest possible number of dimensions (768), or you can select 64, 128, 256, or 512. Now set the necessary environment variable (or pass it in via the constructor): ```bash export NOMIC_API_KEY= ``` ```typescript import { NomicEmbeddings } from "@langchain/nomic"; const nomicEmbeddings = new NomicEmbeddings({ apiKey: process.env.NOMIC_API_KEY, // Default value. modelName: "nomic-embed-text-v1", // Default value. }); const docs = [ "hello world", "nomic embeddings!", "super special langchain integration package", "what color is the sky?", ]; const embeddings = await nomicEmbeddings.embedDocuments(docs); ``` ## Development To develop the `@langchain/nomic` package, you'll need to follow these instructions: ### Install dependencies ```bash yarn install ``` ### Build the package ```bash yarn build ``` Or from the repo root: ```bash yarn build --filter=@langchain/nomic ``` ### Run tests Test files should live within a `tests/` file in the `src/` folder. Unit tests should end in `.test.ts` and integration tests should end in `.int.test.ts`: ```bash $ yarn test $ yarn test:int ``` ### Lint & Format Run the linter & formatter to ensure your code is up to standard: ```bash yarn lint && yarn format ``` ### Adding new entrypoints If you add a new file to be exported, either import & re-export from `src/index.ts`, or add it to the `entrypoints` field in the `config` variable located inside `langchain.config.js` and run `yarn build` to generate the new entrypoint.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-nomic/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": [ "--workspaces-update=false" ] } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-nomic/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof"], ignorePatterns: [ ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-nomic/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals: [/node\:/, /@langchain\/core\//], entrypoints: { index: "index", }, tsConfigPath: resolve("./tsconfig.json"), cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-nomic/package.json
{ "name": "@langchain/nomic", "version": "0.1.0", "description": "Nomic integration for LangChain.js", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-nomic/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/nomic", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "clean": "rm -rf .turbo dist/", "prepack": "yarn build", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "@nomic-ai/atlas": "^0.8.0" }, "peerDependencies": { "@langchain/core": ">=0.3.0 <0.4.0" }, "devDependencies": { "@jest/globals": "^29.5.0", "@langchain/core": "workspace:*", "@langchain/openai": "workspace:^", "@langchain/scripts": ">=0.1.0 <0.2.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@tsconfig/recommended": "^1.0.3", "@types/uuid": "^9", "@typescript-eslint/eslint-plugin": "^6.12.0", "@typescript-eslint/parser": "^6.12.0", "dotenv": "^16.3.1", "dpdm": "^3.12.0", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "prettier": "^2.8.3", "release-it": "^17.6.0", "rollup": "^4.5.2", "ts-jest": "^29.1.0", "typescript": "<5.2.0" }, "publishConfig": { "access": "public" }, "exports": { ".": { "types": { "import": "./index.d.ts", "require": "./index.d.cts", "default": "./index.d.ts" }, "import": "./index.js", "require": "./index.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "index.cjs", "index.js", "index.d.ts", "index.d.cts" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-nomic/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": ["node_modules", "dist", "docs", "**/tests"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-nomic/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-nomic/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/libs/langchain-nomic
lc_public_repos/langchainjs/libs/langchain-nomic/src/index.ts
export * from "./embeddings.js";
0
lc_public_repos/langchainjs/libs/langchain-nomic
lc_public_repos/langchainjs/libs/langchain-nomic/src/embeddings.ts
import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { Embeddings, type EmbeddingsParams } from "@langchain/core/embeddings"; import { chunkArray } from "@langchain/core/utils/chunk_array"; import { AtlasUser } from "@nomic-ai/atlas"; export type EmbeddingTaskType = | "search_query" | "search_document" | "clustering" | "classification"; /** * Interface for NomicEmbeddings parameters. Extends EmbeddingsParams and * defines additional parameters specific to the NomicEmbeddings class. */ export interface NomicEmbeddingsParams extends EmbeddingsParams { /** * The API key to use. * @default {process.env.NOMIC_API_KEY} */ apiKey?: string; /** * The name of the model to use. * Alias for `model` * @default {"nomic-embed-text-v1"} */ modelName?: string; /** * The name of the model to use. * @default {"nomic-embed-text-v1"} */ model?: string; /** * The task your embeddings should be specialized for: * search_query, search_document, clustering, classification. * @default {"search_document"} */ taskType?: EmbeddingTaskType; /** * The maximum number of documents to embed in a single request. * @default {400} */ batchSize?: number; /** * Whether to strip new lines from the input text. This is recommended, * but may not be suitable for all use cases. * @default {true} */ stripNewLines?: boolean; /** * The output size of the embedding model. Applicable only to models * that support variable dimensionality and defaults to the models * largest embedding size. */ dimensionality?: number; } export interface NomicEmbeddingsResult { embeddings: Array<number[]>; usage: { total_tokens: number; }; } /** * Class for generating embeddings using the Nomic API. */ export class NomicEmbeddings extends Embeddings implements NomicEmbeddingsParams { modelName = "nomic-embed-text-v1"; model = "nomic-embed-text-v1"; taskType: EmbeddingTaskType = "search_document"; batchSize = 400; stripNewLines = true; client: AtlasUser; dimensionality?: number; get lc_secrets(): { [key: string]: string } | undefined { return { promptLayerApiKey: "NOMIC_API_KEY", }; } constructor(fields?: Partial<NomicEmbeddingsParams>) { super(fields ?? {}); const apiKey = fields?.apiKey ?? getEnvironmentVariable("NOMIC_API_KEY"); if (!apiKey) { throw new Error("NOMIC_API_KEY is required."); } this.client = new AtlasUser({ apiKey }); this.modelName = fields?.model ?? fields?.modelName ?? this.model; this.model = this.modelName; this.taskType = fields?.taskType ?? this.taskType; this.batchSize = fields?.batchSize ?? this.batchSize; this.stripNewLines = fields?.stripNewLines ?? this.stripNewLines; this.dimensionality = fields?.dimensionality; } /** * Method to generate embeddings for an array of documents. Splits the * documents into batches and makes requests to the Nomic API to generate * embeddings. * @param {Array<string>} texts Array of documents to generate embeddings for. * @returns {Promise<number[][]>} Promise that resolves to a 2D array of embeddings for each document. */ async embedDocuments(texts: string[]): Promise<number[][]> { const batches = chunkArray( this.stripNewLines ? texts.map((t) => t.replace(/\n/g, " ")) : texts, this.batchSize ); const batchRequests = batches.map((batch) => this.embeddingWithRetry(batch) ); const batchResponses = await Promise.all(batchRequests); const embeddings = batchResponses .map(({ embeddings }) => embeddings) .flat(); return embeddings; } /** * Method to generate an embedding for a single document. Calls the * embeddingWithRetry method with the document as the input. * @param {string} text Document to generate an embedding for. * @returns {Promise<number[]>} Promise that resolves to an embedding for the document. */ async embedQuery(text: string): Promise<number[]> { const { embeddings } = await this.embeddingWithRetry( this.stripNewLines ? text.replace(/\n/g, " ") : text ); return embeddings[0]; } /** * Private method to make a request to the Nomic API to generate * embeddings. Handles the retry logic and returns the response from the * API. * @param {string | Array<string>} input Text to send to the Nomic API. * @returns {Promise<NomicEmbeddingsResult>} Promise that resolves to the response from the API. */ private async embeddingWithRetry( input: string | Array<string> ): Promise<NomicEmbeddingsResult> { return this.caller.call(async () => { const result = await this.client.apiCall(`/v1/embedding/text`, "POST", { model: this.model, texts: Array.isArray(input) ? input : [input], task_type: this.taskType, dimensionality: this.dimensionality, }); return result; }); } }
0
lc_public_repos/langchainjs/libs/langchain-nomic/src
lc_public_repos/langchainjs/libs/langchain-nomic/src/tests/embeddings.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { NomicEmbeddings } from "../embeddings.js"; test("NomicEmbeddings can embed docs", async () => { const nomicEmbeddings = new NomicEmbeddings(); const docs = [ "hello world", "nomic embeddings!", "super special langchain integration package", "what color is the sky?", ]; const embeddings = await nomicEmbeddings.embedDocuments(docs); expect(embeddings).toHaveLength(4); expect(embeddings[0]).toHaveLength(768); }); test("NomicEmbeddings can embed more docs than the default batch size", async () => { const nomicEmbeddings = new NomicEmbeddings({ maxRetries: 0, }); // Batch size is 400. 800 docs should be 2 batches. const docs = Array.from({ length: 800 }, () => "hello world"); const embeddings = await nomicEmbeddings.embedDocuments(docs); expect(embeddings).toHaveLength(800); expect(embeddings[0]).toHaveLength(768); }); test("NomicEmbeddings can embed query", async () => { const nomicEmbeddings = new NomicEmbeddings(); const query = "hello world"; const embeddings = await nomicEmbeddings.embedQuery(query); expect(embeddings).toHaveLength(768); }); test("NomicEmbeddings can embed with non-default model", async () => { const nomicEmbeddings = new NomicEmbeddings({ modelName: "nomic-embed-text-v1.5", }); const query = "hello world"; const embeddings = await nomicEmbeddings.embedQuery(query); expect(embeddings).toHaveLength(768); }); test("NomicEmbeddings can embed with non-default num of dimensions", async () => { const nomicEmbeddings = new NomicEmbeddings({ modelName: "nomic-embed-text-v1.5", dimensionality: 256, }); const query = "hello world"; const embeddings = await nomicEmbeddings.embedQuery(query); // Different num of dimensions from default expect(embeddings).toHaveLength(256); });
0
lc_public_repos/langchainjs/libs/langchain-nomic
lc_public_repos/langchainjs/libs/langchain-nomic/scripts/jest-setup-after-env.js
import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { afterAll, jest } from "@jest/globals"; afterAll(awaitAllCallbacks); // Allow console.log to be disabled in tests if (process.env.DISABLE_CONSOLE_LOGS === "true") { console.log = jest.fn(); }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-groq/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": ["ES2021", "ES2022.Object", "DOM"], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": ["src/**/*"], "exclude": ["node_modules", "dist", "docs"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-groq/LICENSE
The MIT License Copyright (c) 2023 LangChain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-groq/jest.config.cjs
/** @type {import('ts-jest').JestConfigWithTsJest} */ module.exports = { preset: "ts-jest/presets/default-esm", testEnvironment: "./jest.env.cjs", modulePathIgnorePatterns: ["dist/", "docs/"], moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, transform: { "^.+\\.tsx?$": ["@swc/jest"], }, transformIgnorePatterns: [ "/node_modules/", "\\.pnp\\.[^\\/]+$", "./scripts/jest-setup-after-env.js", ], setupFiles: ["dotenv/config"], testTimeout: 20_000, passWithNoTests: true, };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-groq/jest.env.cjs
const { TestEnvironment } = require("jest-environment-node"); class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { constructor(config, context) { // Make `instanceof Float32Array` return true in tests // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 super(config, context); this.global.Float32Array = Float32Array; } } module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-groq/README.md
# @langchain/groq This package contains the LangChain.js integrations for Groq via the groq/sdk package. ## Installation ```bash npm2yarn npm install @langchain/groq @langchain/core ``` ## Chat models This package adds support for Groq chat model inference. Set the necessary environment variable (or pass it in via the constructor): ```bash export GROQ_API_KEY= ``` ```typescript import { ChatGroq } from "@langchain/groq"; import { HumanMessage } from "@langchain/core/messages"; const model = new ChatGroq({ apiKey: process.env.GROQ_API_KEY, // Default value. }); const message = new HumanMessage("What color is the sky?"); const res = await model.invoke([message]); ``` ## Development To develop the `@langchain/groq` package, you'll need to follow these instructions: ### Install dependencies ```bash yarn install ``` ### Build the package ```bash yarn build ``` Or from the repo root: ```bash yarn build --filter=@langchain/groq ``` ### Run tests Test files should live within a `tests/` file in the `src/` folder. Unit tests should end in `.test.ts` and integration tests should end in `.int.test.ts`: ```bash $ yarn test $ yarn test:int ``` ### Lint & Format Run the linter & formatter to ensure your code is up to standard: ```bash yarn lint && yarn format ``` ### Adding new entrypoints If you add a new file to be exported, either import & re-export from `src/index.ts`, or add it to the `entrypoints` field in the `config` variable located inside `langchain.config.js` and run `yarn build` to generate the new entrypoint.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-groq/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": [ "--workspaces-update=false" ] } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-groq/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof"], ignorePatterns: [ ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-groq/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals: [/node\:/, /@langchain\/core\//], entrypoints: { index: "index", }, tsConfigPath: resolve("./tsconfig.json"), cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-groq/package.json
{ "name": "@langchain/groq", "version": "0.1.2", "description": "Groq integration for LangChain.js", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-groq/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/groq", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "clean": "rm -rf .turbo dist/", "prepack": "yarn build", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard:unit": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:standard": "yarn test:standard:unit && yarn test:standard:int", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "@langchain/openai": "~0.3.0", "groq-sdk": "^0.5.0", "zod": "^3.22.4", "zod-to-json-schema": "^3.22.5" }, "peerDependencies": { "@langchain/core": ">=0.2.21 <0.4.0" }, "devDependencies": { "@jest/globals": "^29.5.0", "@langchain/core": "workspace:*", "@langchain/openai": "workspace:^", "@langchain/scripts": ">=0.1.0 <0.2.0", "@langchain/standard-tests": "0.0.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@tsconfig/recommended": "^1.0.3", "@types/uuid": "^9", "@typescript-eslint/eslint-plugin": "^6.12.0", "@typescript-eslint/parser": "^6.12.0", "dotenv": "^16.3.1", "dpdm": "^3.12.0", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "prettier": "^2.8.3", "release-it": "^17.6.0", "rollup": "^4.5.2", "ts-jest": "^29.1.0", "typescript": "<5.2.0" }, "publishConfig": { "access": "public" }, "exports": { ".": { "types": { "import": "./index.d.ts", "require": "./index.d.cts", "default": "./index.d.ts" }, "import": "./index.js", "require": "./index.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "index.cjs", "index.js", "index.d.ts", "index.d.cts" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-groq/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": ["node_modules", "dist", "docs", "**/tests"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-groq/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-groq/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/libs/langchain-groq
lc_public_repos/langchainjs/libs/langchain-groq/src/index.ts
export * from "./chat_models.js";
0
lc_public_repos/langchainjs/libs/langchain-groq
lc_public_repos/langchainjs/libs/langchain-groq/src/chat_models.ts
import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { NewTokenIndices } from "@langchain/core/callbacks/base"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { BaseChatModel, BaseChatModelCallOptions, BindToolsInput, LangSmithParams, type BaseChatModelParams, } from "@langchain/core/language_models/chat_models"; import * as ChatCompletionsAPI from "groq-sdk/resources/chat/completions"; import * as CompletionsAPI from "groq-sdk/resources/completions"; import { AIMessage, AIMessageChunk, BaseMessage, ChatMessage, ChatMessageChunk, HumanMessageChunk, SystemMessageChunk, ToolMessage, OpenAIToolCall, isAIMessage, BaseMessageChunk, UsageMetadata, } from "@langchain/core/messages"; import { ChatGeneration, ChatGenerationChunk, ChatResult, } from "@langchain/core/outputs"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { type OpenAICoreRequestOptions, type OpenAIClient, } from "@langchain/openai"; import { isZodSchema } from "@langchain/core/utils/types"; import Groq from "groq-sdk"; import { ChatCompletion, ChatCompletionCreateParams, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, CompletionCreateParams, } from "groq-sdk/resources/chat/completions"; import { Runnable, RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables"; import { BaseLanguageModelInput, FunctionDefinition, StructuredOutputMethodOptions, } from "@langchain/core/language_models/base"; import { BaseLLMOutputParser, JsonOutputParser, StructuredOutputParser, } from "@langchain/core/output_parsers"; import { JsonOutputKeyToolsParser, parseToolCall, makeInvalidToolCall, convertLangChainToolCallToOpenAI, } from "@langchain/core/output_parsers/openai_tools"; import { convertToOpenAITool } from "@langchain/core/utils/function_calling"; import { ToolCallChunk } from "@langchain/core/messages/tool"; type ChatGroqToolType = BindToolsInput | OpenAIClient.ChatCompletionTool; export interface ChatGroqCallOptions extends BaseChatModelCallOptions { headers?: Record<string, string>; tools?: ChatGroqToolType[]; tool_choice?: OpenAIClient.ChatCompletionToolChoiceOption | "any" | string; response_format?: CompletionCreateParams.ResponseFormat; } export interface ChatGroqInput extends BaseChatModelParams { /** * The Groq API key to use for requests. * @default process.env.GROQ_API_KEY */ apiKey?: string; /** * The name of the model to use. * Alias for `model` * @default "mixtral-8x7b-32768" */ modelName?: string; /** * The name of the model to use. * @default "mixtral-8x7b-32768" */ model?: string; /** * Up to 4 sequences where the API will stop generating further tokens. The * returned text will not contain the stop sequence. * Alias for `stopSequences` */ stop?: string | null | Array<string>; /** * Up to 4 sequences where the API will stop generating further tokens. The * returned text will not contain the stop sequence. */ stopSequences?: Array<string>; /** * Whether or not to stream responses. */ streaming?: boolean; /** * The temperature to use for sampling. * @default 0.7 */ temperature?: number; /** * The maximum number of tokens that the model can process in a single response. * This limits ensures computational efficiency and resource management. */ maxTokens?: number; } type GroqRoleEnum = "system" | "assistant" | "user" | "function"; interface TokenUsage { completionTokens?: number; promptTokens?: number; totalTokens?: number; } export function messageToGroqRole(message: BaseMessage): GroqRoleEnum { const type = message._getType(); switch (type) { case "system": return "system"; case "ai": return "assistant"; case "human": return "user"; case "function": return "function"; case "tool": // Not yet supported as a type return "tool" as GroqRoleEnum; default: throw new Error(`Unknown message type: ${type}`); } } function convertMessagesToGroqParams( messages: BaseMessage[] ): Array<ChatCompletionsAPI.ChatCompletionMessage> { return messages.map((message): ChatCompletionsAPI.ChatCompletionMessage => { if (typeof message.content !== "string") { throw new Error("Non string message content not supported"); } // eslint-disable-next-line @typescript-eslint/no-explicit-any const completionParam: Record<string, any> = { role: messageToGroqRole(message), content: message.content, name: message.name, function_call: message.additional_kwargs.function_call, tool_calls: message.additional_kwargs.tool_calls, tool_call_id: (message as ToolMessage).tool_call_id, }; if (isAIMessage(message) && !!message.tool_calls?.length) { completionParam.tool_calls = message.tool_calls.map( convertLangChainToolCallToOpenAI ); } else { if (message.additional_kwargs.tool_calls != null) { completionParam.tool_calls = message.additional_kwargs.tool_calls; } if ((message as ToolMessage).tool_call_id != null) { completionParam.tool_call_id = (message as ToolMessage).tool_call_id; } } return completionParam as ChatCompletionsAPI.ChatCompletionMessage; }); } function groqResponseToChatMessage( message: ChatCompletionsAPI.ChatCompletionMessage, usageMetadata?: UsageMetadata ): BaseMessage { const rawToolCalls: OpenAIToolCall[] | undefined = message.tool_calls as | OpenAIToolCall[] | undefined; switch (message.role) { case "assistant": { const toolCalls = []; const invalidToolCalls = []; for (const rawToolCall of rawToolCalls ?? []) { try { toolCalls.push(parseToolCall(rawToolCall, { returnId: true })); // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e: any) { invalidToolCalls.push(makeInvalidToolCall(rawToolCall, e.message)); } } return new AIMessage({ content: message.content || "", additional_kwargs: { tool_calls: rawToolCalls }, tool_calls: toolCalls, invalid_tool_calls: invalidToolCalls, usage_metadata: usageMetadata, }); } default: return new ChatMessage(message.content || "", message.role ?? "unknown"); } } function _convertDeltaToolCallToToolCallChunk( toolCalls?: ChatCompletionsAPI.ChatCompletionChunk.Choice.Delta.ToolCall[], index?: number ): ToolCallChunk[] | undefined { if (!toolCalls?.length) return undefined; return toolCalls.map((tc) => ({ id: tc.id, name: tc.function?.name, args: tc.function?.arguments, type: "tool_call_chunk", index, })); } function _convertDeltaToMessageChunk( // eslint-disable-next-line @typescript-eslint/no-explicit-any delta: Record<string, any>, index: number, xGroq?: ChatCompletionsAPI.ChatCompletionChunk.XGroq ): { message: BaseMessageChunk; toolCallData?: { id: string; name: string; index: number; type: "tool_call_chunk"; }[]; } { const { role } = delta; const content = delta.content ?? ""; let additional_kwargs; if (delta.function_call) { additional_kwargs = { function_call: delta.function_call, }; } else if (delta.tool_calls) { additional_kwargs = { tool_calls: delta.tool_calls, }; } else { additional_kwargs = {}; } let usageMetadata: UsageMetadata | undefined; let groqMessageId: string | undefined; if (xGroq?.usage) { usageMetadata = { input_tokens: xGroq.usage.prompt_tokens, output_tokens: xGroq.usage.completion_tokens, total_tokens: xGroq.usage.total_tokens, }; groqMessageId = xGroq.id; } if (role === "user") { return { message: new HumanMessageChunk({ content }), }; } else if (role === "assistant") { const toolCallChunks = _convertDeltaToolCallToToolCallChunk( delta.tool_calls, index ); return { message: new AIMessageChunk({ content, additional_kwargs, tool_call_chunks: toolCallChunks ? toolCallChunks.map((tc) => ({ type: tc.type, args: tc.args, index: tc.index, })) : undefined, usage_metadata: usageMetadata, id: groqMessageId, }), toolCallData: toolCallChunks ? toolCallChunks.map((tc) => ({ id: tc.id ?? "", name: tc.name ?? "", index: tc.index ?? index, type: "tool_call_chunk", })) : undefined, }; } else if (role === "system") { return { message: new SystemMessageChunk({ content }), }; } else { return { message: new ChatMessageChunk({ content, role }), }; } } /** * Groq chat model integration. * * The Groq API is compatible to the OpenAI API with some limitations. View the * full API ref at: * @link {https://docs.api.groq.com/md/openai.oas.html} * * Setup: * Install `@langchain/groq` and set an environment variable named `GROQ_API_KEY`. * * ```bash * npm install @langchain/groq * export GROQ_API_KEY="your-api-key" * ``` * * ## [Constructor args](https://api.js.langchain.com/classes/langchain_groq.ChatGroq.html#constructor) * * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_groq.ChatGroqCallOptions.html) * * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc. * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below: * * ```typescript * // When calling `.bind`, call options should be passed via the first argument * const llmWithArgsBound = llm.bind({ * stop: ["\n"], * tools: [...], * }); * * // When calling `.bindTools`, call options should be passed via the second argument * const llmWithTools = llm.bindTools( * [...], * { * tool_choice: "auto", * } * ); * ``` * * ## Examples * * <details open> * <summary><strong>Instantiate</strong></summary> * * ```typescript * import { ChatGroq } from '@langchain/groq'; * * const llm = new ChatGroq({ * model: "mixtral-8x7b-32768", * temperature: 0, * // other params... * }); * ``` * </details> * * <br /> * * <details> * <summary><strong>Invoking</strong></summary> * * ```typescript * const input = `Translate "I love programming" into French.`; * * // Models also accept a list of chat messages or a formatted prompt * const result = await llm.invoke(input); * console.log(result); * ``` * * ```txt * AIMessage { * "content": "The French translation of \"I love programming\" is \"J'aime programmer\". In this sentence, \"J'aime\" is the first person singular conjugation of the French verb \"aimer\" which means \"to love\", and \"programmer\" is the French infinitive for \"to program\". I hope this helps! Let me know if you have any other questions.", * "additional_kwargs": {}, * "response_metadata": { * "tokenUsage": { * "completionTokens": 82, * "promptTokens": 20, * "totalTokens": 102 * }, * "finish_reason": "stop" * }, * "tool_calls": [], * "invalid_tool_calls": [] * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Streaming Chunks</strong></summary> * * ```typescript * for await (const chunk of await llm.stream(input)) { * console.log(chunk); * } * ``` * * ```txt * AIMessageChunk { * "content": "", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": "The", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": " French", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": " translation", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": " of", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": " \"", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": "I", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": " love", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * ... * AIMessageChunk { * "content": ".", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": null * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * AIMessageChunk { * "content": "", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": "stop" * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Aggregate Streamed Chunks</strong></summary> * * ```typescript * import { AIMessageChunk } from '@langchain/core/messages'; * import { concat } from '@langchain/core/utils/stream'; * * const stream = await llm.stream(input); * let full: AIMessageChunk | undefined; * for await (const chunk of stream) { * full = !full ? chunk : concat(full, chunk); * } * console.log(full); * ``` * * ```txt * AIMessageChunk { * "content": "The French translation of \"I love programming\" is \"J'aime programmer\". In this sentence, \"J'aime\" is the first person singular conjugation of the French verb \"aimer\" which means \"to love\", and \"programmer\" is the French infinitive for \"to program\". I hope this helps! Let me know if you have any other questions.", * "additional_kwargs": {}, * "response_metadata": { * "finishReason": "stop" * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * ``` * </details> * * <br /> * * <details> * <summary><strong>Bind tools</strong></summary> * * ```typescript * import { z } from 'zod'; * * const llmForToolCalling = new ChatGroq({ * model: "llama3-groq-70b-8192-tool-use-preview", * temperature: 0, * // other params... * }); * * const GetWeather = { * name: "GetWeather", * description: "Get the current weather in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const GetPopulation = { * name: "GetPopulation", * description: "Get the current population in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]); * const aiMsg = await llmWithTools.invoke( * "Which city is hotter today and which is bigger: LA or NY?" * ); * console.log(aiMsg.tool_calls); * ``` * * ```txt * [ * { * name: 'GetWeather', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call', * id: 'call_cd34' * }, * { * name: 'GetWeather', * args: { location: 'New York, NY' }, * type: 'tool_call', * id: 'call_68rf' * }, * { * name: 'GetPopulation', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call', * id: 'call_f81z' * }, * { * name: 'GetPopulation', * args: { location: 'New York, NY' }, * type: 'tool_call', * id: 'call_8byt' * } * ] * ``` * </details> * * <br /> * * <details> * <summary><strong>Structured Output</strong></summary> * * ```typescript * import { z } from 'zod'; * * const Joke = z.object({ * setup: z.string().describe("The setup of the joke"), * punchline: z.string().describe("The punchline to the joke"), * rating: z.number().optional().describe("How funny the joke is, from 1 to 10") * }).describe('Joke to tell user.'); * * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: "Joke" }); * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats"); * console.log(jokeResult); * ``` * * ```txt * { * setup: "Why don't cats play poker in the wild?", * punchline: 'Because there are too many cheetahs.' * } * ``` * </details> * * <br /> */ export class ChatGroq extends BaseChatModel< ChatGroqCallOptions, AIMessageChunk > { lc_namespace = ["langchain", "chat_models", "groq"]; client: Groq; modelName = "mixtral-8x7b-32768"; model = "mixtral-8x7b-32768"; temperature = 0.7; stop?: string[]; stopSequences?: string[]; maxTokens?: number; streaming = false; apiKey?: string; static lc_name() { return "ChatGroq"; } _llmType() { return "groq"; } get lc_secrets(): { [key: string]: string } | undefined { return { apiKey: "GROQ_API_KEY", }; } lc_serializable = true; constructor(fields?: ChatGroqInput) { super(fields ?? {}); const apiKey = fields?.apiKey || getEnvironmentVariable("GROQ_API_KEY"); if (!apiKey) { throw new Error( `Groq API key not found. Please set the GROQ_API_KEY environment variable or provide the key into "apiKey"` ); } this.client = new Groq({ apiKey, dangerouslyAllowBrowser: true, }); this.apiKey = apiKey; this.temperature = fields?.temperature ?? this.temperature; this.modelName = fields?.model ?? fields?.modelName ?? this.model; this.model = this.modelName; this.streaming = fields?.streaming ?? this.streaming; this.stop = fields?.stopSequences ?? (typeof fields?.stop === "string" ? [fields.stop] : fields?.stop) ?? []; this.stopSequences = this.stop; this.maxTokens = fields?.maxTokens; } getLsParams(options: this["ParsedCallOptions"]): LangSmithParams { const params = this.invocationParams(options); return { ls_provider: "groq", ls_model_name: this.model, ls_model_type: "chat", ls_temperature: params.temperature ?? this.temperature, ls_max_tokens: params.max_tokens ?? this.maxTokens, ls_stop: options.stop, }; } async completionWithRetry( request: ChatCompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions ): Promise<AsyncIterable<ChatCompletionsAPI.ChatCompletionChunk>>; async completionWithRetry( request: ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions ): Promise<ChatCompletion>; async completionWithRetry( request: ChatCompletionCreateParams, options?: OpenAICoreRequestOptions ): Promise< AsyncIterable<ChatCompletionsAPI.ChatCompletionChunk> | ChatCompletion > { return this.caller.call(async () => this.client.chat.completions.create(request, options) ); } invocationParams( options: this["ParsedCallOptions"] ): ChatCompletionCreateParams { const params = super.invocationParams(options); if (options.tool_choice !== undefined) { params.tool_choice = options.tool_choice; } if (options.tools !== undefined) { params.tools = options.tools; } if (options.response_format !== undefined) { params.response_format = options.response_format; } return { ...params, stop: options.stop ?? this.stopSequences, model: this.model, temperature: this.temperature, max_tokens: this.maxTokens, }; } override bindTools( tools: ChatGroqToolType[], kwargs?: Partial<ChatGroqCallOptions> ): Runnable<BaseLanguageModelInput, AIMessageChunk, ChatGroqCallOptions> { return this.bind({ tools: tools.map((tool) => convertToOpenAITool(tool)), ...kwargs, }); } override async *_streamResponseChunks( messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): AsyncGenerator<ChatGenerationChunk> { const params = this.invocationParams(options); const messagesMapped = convertMessagesToGroqParams(messages); const response = await this.completionWithRetry( { ...params, messages: messagesMapped, stream: true, }, { signal: options?.signal, headers: options?.headers, } ); let role = ""; const toolCall: { id: string; name: string; index: number; type: "tool_call_chunk"; }[] = []; // eslint-disable-next-line @typescript-eslint/no-explicit-any let responseMetadata: Record<string, any> | undefined; for await (const data of response) { responseMetadata = data; const choice = data?.choices[0]; if (!choice) { continue; } // The `role` field is populated in the first delta of the response // but is not present in subsequent deltas. Extract it when available. if (choice.delta?.role) { role = choice.delta.role; } const { message, toolCallData } = _convertDeltaToMessageChunk( { ...choice.delta, role, } ?? {}, choice.index, data.x_groq ); if (toolCallData) { // First, ensure the ID is not already present in toolCall const newToolCallData = toolCallData.filter((tc) => toolCall.every((t) => t.id !== tc.id) ); toolCall.push(...newToolCallData); // Yield here, ensuring the ID and name fields are only yielded once. yield new ChatGenerationChunk({ message: new AIMessageChunk({ content: "", tool_call_chunks: newToolCallData, }), text: "", }); } const chunk = new ChatGenerationChunk({ message, text: choice.delta.content ?? "", generationInfo: { finishReason: choice.finish_reason, }, }); yield chunk; void runManager?.handleLLMNewToken(chunk.text ?? ""); } if (responseMetadata) { if ("choices" in responseMetadata) { delete responseMetadata.choices; } yield new ChatGenerationChunk({ message: new AIMessageChunk({ content: "", response_metadata: responseMetadata, }), text: "", }); } if (options.signal?.aborted) { throw new Error("AbortError"); } } override async _generate( messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun ): Promise<ChatResult> { if (this.streaming) { const tokenUsage: TokenUsage = {}; const stream = this._streamResponseChunks(messages, options, runManager); const finalChunks: Record<number, ChatGenerationChunk> = {}; for await (const chunk of stream) { const index = (chunk.generationInfo as NewTokenIndices)?.completion ?? 0; if (finalChunks[index] === undefined) { finalChunks[index] = chunk; } else { finalChunks[index] = finalChunks[index].concat(chunk); } } const generations = Object.entries(finalChunks) .sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10)) .map(([_, value]) => value); return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } }; } else { return this._generateNonStreaming(messages, options, runManager); } } async _generateNonStreaming( messages: BaseMessage[], options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun ): Promise<ChatResult> { const tokenUsage: TokenUsage = {}; const params = this.invocationParams(options); const messagesMapped = convertMessagesToGroqParams(messages); const data = await this.completionWithRetry( { ...params, stream: false, messages: messagesMapped, }, { signal: options?.signal, headers: options?.headers, } ); if ("usage" in data && data.usage) { const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, } = data.usage as CompletionsAPI.CompletionUsage; if (completionTokens) { tokenUsage.completionTokens = (tokenUsage.completionTokens ?? 0) + completionTokens; } if (promptTokens) { tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + promptTokens; } if (totalTokens) { tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + totalTokens; } } const generations: ChatGeneration[] = []; if ("choices" in data && data.choices) { for (const part of (data as ChatCompletion).choices) { const text = part.message?.content ?? ""; let usageMetadata: UsageMetadata | undefined; if (tokenUsage.totalTokens !== undefined) { usageMetadata = { input_tokens: tokenUsage.promptTokens ?? 0, output_tokens: tokenUsage.completionTokens ?? 0, total_tokens: tokenUsage.totalTokens, }; } const generation: ChatGeneration = { text, message: groqResponseToChatMessage( part.message ?? { role: "assistant" }, usageMetadata ), }; generation.generationInfo = { ...(part.finish_reason ? { finish_reason: part.finish_reason } : {}), ...(part.logprobs ? { logprobs: part.logprobs } : {}), }; generations.push(generation); } } return { generations, llmOutput: { tokenUsage }, }; } withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( outputSchema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: StructuredOutputMethodOptions<false> ): Runnable<BaseLanguageModelInput, RunOutput>; withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( outputSchema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: StructuredOutputMethodOptions<true> ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>; withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record<string, any> = Record<string, any> >( outputSchema: | z.ZodType<RunOutput> // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record<string, any>, config?: StructuredOutputMethodOptions<boolean> ): | Runnable<BaseLanguageModelInput, RunOutput> | Runnable< BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput } > { // eslint-disable-next-line @typescript-eslint/no-explicit-any const schema: z.ZodType<RunOutput> | Record<string, any> = outputSchema; const name = config?.name; const method = config?.method; const includeRaw = config?.includeRaw; let functionName = name ?? "extract"; let outputParser: BaseLLMOutputParser<RunOutput>; let llm: Runnable<BaseLanguageModelInput>; if (method === "jsonMode") { llm = this.bind({ response_format: { type: "json_object" }, }); if (isZodSchema(schema)) { outputParser = StructuredOutputParser.fromZodSchema(schema); } else { outputParser = new JsonOutputParser<RunOutput>(); } } else { if (isZodSchema(schema)) { const asJsonSchema = zodToJsonSchema(schema); llm = this.bind({ tools: [ { type: "function" as const, function: { name: functionName, description: asJsonSchema.description, parameters: asJsonSchema, }, }, ], tool_choice: { type: "function" as const, function: { name: functionName, }, }, }); outputParser = new JsonOutputKeyToolsParser({ returnSingle: true, keyName: functionName, zodSchema: schema, }); } else { let openAIFunctionDefinition: FunctionDefinition; if ( typeof schema.name === "string" && typeof schema.parameters === "object" && schema.parameters != null ) { openAIFunctionDefinition = schema as FunctionDefinition; functionName = schema.name; } else { functionName = schema.title ?? functionName; openAIFunctionDefinition = { name: functionName, description: schema.description ?? "", parameters: schema, }; } llm = this.bind({ tools: [ { type: "function" as const, function: openAIFunctionDefinition, }, ], tool_choice: { type: "function" as const, function: { name: functionName, }, }, }); outputParser = new JsonOutputKeyToolsParser<RunOutput>({ returnSingle: true, keyName: functionName, }); } } if (!includeRaw) { return llm.pipe(outputParser).withConfig({ runName: "ChatGroqStructuredOutput", }); } const parserAssign = RunnablePassthrough.assign({ // eslint-disable-next-line @typescript-eslint/no-explicit-any parsed: (input: any, config) => outputParser.invoke(input.raw, config), }); const parserNone = RunnablePassthrough.assign({ parsed: () => null, }); const parsedWithFallback = parserAssign.withFallbacks({ fallbacks: [parserNone], }); return RunnableSequence.from< BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput } >([ { raw: llm, }, parsedWithFallback, ]).withConfig({ runName: "ChatGroqStructuredOutput", }); } }
0
lc_public_repos/langchainjs/libs/langchain-groq/src
lc_public_repos/langchainjs/libs/langchain-groq/src/tests/agent.int.test.ts
// import { AgentExecutor, createOpenAIToolsAgent } from "langchain/agents"; // import { ChatPromptTemplate } from "@langchain/core/prompts"; // import { DynamicStructuredTool } from "@langchain/core/tools"; // import { z } from "zod"; // import { ChatGroq } from "../chat_models.js"; // TODO: This test breaks CI build due to dependencies. Figure out a way around it. test.skip("Model is compatible with OpenAI tools agent and Agent Executor", async () => { // const llm = new ChatGroq({ // temperature: 0, // modelName: "mixtral-8x7b-32768", // }); // const prompt = ChatPromptTemplate.fromMessages([ // [ // "system", // "You are an agent capable of retrieving current weather information.", // ], // ["human", "{input}"], // ["placeholder", "{agent_scratchpad}"], // ]); // const currentWeatherTool = new DynamicStructuredTool({ // name: "get_current_weather", // description: "Get the current weather in a given location", // schema: z.object({ // location: z // .string() // .describe("The city and state, e.g. San Francisco, CA"), // }), // func: async () => Promise.resolve("28 °C"), // }); // const agent = await createOpenAIToolsAgent({ // llm, // tools: [currentWeatherTool], // prompt, // }); // const agentExecutor = new AgentExecutor({ // agent, // tools: [currentWeatherTool], // }); // const input = "What's the weather like in Paris?"; // const { output } = await agentExecutor.invoke({ input }); // console.log(output); // expect(output).toBeDefined(); // expect(output).toContain("The current temperature in Paris is 28 °C"); });
0
lc_public_repos/langchainjs/libs/langchain-groq/src
lc_public_repos/langchainjs/libs/langchain-groq/src/tests/chat_models.standard.int.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelIntegrationTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { ChatGroq, ChatGroqCallOptions } from "../chat_models.js"; class ChatGroqStandardIntegrationTests extends ChatModelIntegrationTests< ChatGroqCallOptions, AIMessageChunk > { constructor() { if (!process.env.GROQ_API_KEY) { throw new Error( "Can not run Groq integration tests because GROQ_API_KEY is not set" ); } super({ Cls: ChatGroq, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: { model: "llama-3.1-70b-versatile", maxRetries: 1, }, }); } async testToolMessageHistoriesListContent() { this.skipTestMessage( "testToolMessageHistoriesListContent", "ChatGroq", "Complex message types not properly implemented" ); } async testCacheComplexMessageTypes() { this.skipTestMessage( "testCacheComplexMessageTypes", "ChatGroq", "Complex message types not properly implemented" ); } async testStreamTokensWithToolCalls() { this.skipTestMessage( "testStreamTokensWithToolCalls", "ChatGroq", "API does not consistently call tools. TODO: re-write with better prompting for tool call." ); } async testWithStructuredOutputIncludeRaw() { this.skipTestMessage( "testWithStructuredOutputIncludeRaw", "ChatGroq", "API does not consistently call tools. TODO: re-write with better prompting for tool call." ); } } const testClass = new ChatGroqStandardIntegrationTests(); test("ChatGroqStandardIntegrationTests", async () => { const testResults = await testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-groq/src
lc_public_repos/langchainjs/libs/langchain-groq/src/tests/chat_models.standard.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatModelUnitTests } from "@langchain/standard-tests"; import { AIMessageChunk } from "@langchain/core/messages"; import { ChatGroq, ChatGroqCallOptions } from "../chat_models.js"; class ChatGroqStandardUnitTests extends ChatModelUnitTests< ChatGroqCallOptions, AIMessageChunk > { constructor() { super({ Cls: ChatGroq, chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: {}, }); // This must be set so method like `.bindTools` or `.withStructuredOutput` // which we call after instantiating the model will work. // (constructor will throw if API key is not set) process.env.GROQ_API_KEY = "test"; } testChatModelInitApiKey() { // Unset the API key env var here so this test can properly check // the API key class arg. process.env.GROQ_API_KEY = ""; super.testChatModelInitApiKey(); // Re-set the API key env var here so other tests can run properly. process.env.GROQ_API_KEY = "test"; } } const testClass = new ChatGroqStandardUnitTests(); test("ChatGroqStandardUnitTests", () => { const testResults = testClass.runTests(); expect(testResults).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-groq/src
lc_public_repos/langchainjs/libs/langchain-groq/src/tests/chat_models.int.test.ts
import { test } from "@jest/globals"; import { AIMessage, AIMessageChunk, HumanMessage, ToolMessage, } from "@langchain/core/messages"; import { tool } from "@langchain/core/tools"; import { z } from "zod"; import { concat } from "@langchain/core/utils/stream"; import { ChatGroq } from "../chat_models.js"; test("invoke", async () => { const chat = new ChatGroq({ maxRetries: 0, }); const message = new HumanMessage("What color is the sky?"); const res = await chat.invoke([message]); // console.log({ res }); expect(res.content.length).toBeGreaterThan(10); }); test("invoke with stop sequence", async () => { const chat = new ChatGroq({ maxRetries: 0, }); const message = new HumanMessage("Count to ten."); const res = await chat.bind({ stop: ["5", "five"] }).invoke([message]); // console.log({ res }); expect((res.content as string).toLowerCase()).not.toContain("6"); expect((res.content as string).toLowerCase()).not.toContain("six"); }); test("invoke should respect passed headers", async () => { const chat = new ChatGroq({ maxRetries: 0, }); const message = new HumanMessage("Count to ten."); await expect(async () => { await chat.invoke([message], { headers: { Authorization: "badbadbad" }, }); }).rejects.toThrowError(); }); test("stream should respect passed headers", async () => { const chat = new ChatGroq({ maxRetries: 0, }); const message = new HumanMessage("Count to ten."); await expect(async () => { await chat.stream([message], { headers: { Authorization: "badbadbad" }, }); }).rejects.toThrowError(); }); test("generate", async () => { const chat = new ChatGroq(); const message = new HumanMessage("Hello!"); const res = await chat.generate([[message]]); // console.log(JSON.stringify(res, null, 2)); expect(res.generations[0][0].text.length).toBeGreaterThan(10); }); test("streaming", async () => { const chat = new ChatGroq(); const message = new HumanMessage("What color is the sky?"); const stream = await chat.stream([message]); let iters = 0; let finalRes = ""; for await (const chunk of stream) { iters += 1; finalRes += chunk.content; } // console.log({ finalRes, iters }); expect(iters).toBeGreaterThan(1); }); test("invoke with bound tools", async () => { const chat = new ChatGroq({ maxRetries: 0, modelName: "mixtral-8x7b-32768", }); const message = new HumanMessage("What is the current weather in Hawaii?"); const res = await chat .bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, }, ], tool_choice: "auto", }) .invoke([message]); // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls?.length).toEqual(1); expect( JSON.parse( res.additional_kwargs?.tool_calls?.[0].function.arguments ?? "{}" ) ).toEqual(res.tool_calls?.[0].args); }); test("stream with bound tools, yielding a single chunk", async () => { const chat = new ChatGroq({ maxRetries: 0, }); const message = new HumanMessage("What is the current weather in Hawaii?"); const stream = await chat .bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, }, ], tool_choice: "auto", }) .stream([message]); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var for await (const chunk of stream) { // console.log(JSON.stringify(chunk)); } }); test("Few shotting with tool calls", async () => { const chat = new ChatGroq({ modelName: "mixtral-8x7b-32768", temperature: 0, }).bind({ tools: [ { type: "function", function: { name: "get_current_weather", description: "Get the current weather in a given location", parameters: { type: "object", properties: { location: { type: "string", description: "The city and state, e.g. San Francisco, CA", }, unit: { type: "string", enum: ["celsius", "fahrenheit"] }, }, required: ["location"], }, }, }, ], tool_choice: "auto", }); const res = await chat.invoke([ new HumanMessage("What is the weather in SF?"), new AIMessage({ content: "", tool_calls: [ { id: "12345", name: "get_current_weather", args: { location: "SF", }, }, ], }), new ToolMessage({ tool_call_id: "12345", content: "It is currently 24 degrees with hail in SF.", }), new AIMessage("It is currently 24 degrees in SF with hail in SF."), new HumanMessage("What did you say the weather was?"), ]); // console.log(res); expect(res.content).toContain("24"); }); test("Groq can stream tool calls", async () => { const model = new ChatGroq({ model: "llama-3.1-70b-versatile", temperature: 0, }); const weatherTool = tool((_) => "The temperature is 24 degrees with hail.", { name: "get_current_weather", schema: z.object({ location: z .string() .describe("The location to get the current weather for."), }), description: "Get the current weather in a given location.", }); const modelWithTools = model.bindTools([weatherTool]); const stream = await modelWithTools.stream( "What is the weather in San Francisco?" ); let finalMessage: AIMessageChunk | undefined; for await (const chunk of stream) { finalMessage = !finalMessage ? chunk : concat(finalMessage, chunk); } expect(finalMessage).toBeDefined(); if (!finalMessage) return; expect(finalMessage.tool_calls?.[0]).toBeDefined(); if (!finalMessage.tool_calls?.[0]) return; expect(finalMessage.tool_calls?.[0].name).toBe("get_current_weather"); expect(finalMessage.tool_calls?.[0].args).toHaveProperty("location"); expect(finalMessage.tool_calls?.[0].id).toBeDefined(); });
0
lc_public_repos/langchainjs/libs/langchain-groq/src
lc_public_repos/langchainjs/libs/langchain-groq/src/tests/chat_models.test.ts
/* eslint-disable no-process-env */ import { test, expect } from "@jest/globals"; import { ChatGroq } from "../chat_models.js"; test("Serialization", () => { const model = new ChatGroq({ apiKey: "foo", }); expect(JSON.stringify(model)).toEqual( `{"lc":1,"type":"constructor","id":["langchain","chat_models","groq","ChatGroq"],"kwargs":{"api_key":{"lc":1,"type":"secret","id":["GROQ_API_KEY"]}}}` ); }); test("Serialization with no params", () => { process.env.GROQ_API_KEY = "foo"; const model = new ChatGroq(); expect(JSON.stringify(model)).toEqual( `{"lc":1,"type":"constructor","id":["langchain","chat_models","groq","ChatGroq"],"kwargs":{"api_key":{"lc":1,"type":"secret","id":["GROQ_API_KEY"]}}}` ); });
0
lc_public_repos/langchainjs/libs/langchain-groq/src
lc_public_repos/langchainjs/libs/langchain-groq/src/tests/chat_models_structured_output.int.test.ts
import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { AIMessage } from "@langchain/core/messages"; import { ChatGroq } from "../chat_models.js"; test("withStructuredOutput zod schema function calling", async () => { const model = new ChatGroq({ temperature: 0, modelName: "mixtral-8x7b-32768", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( calculatorSchema, { name: "calculator", } ); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are VERY bad at math and must always use a calculator."], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput zod schema JSON mode", async () => { const model = new ChatGroq({ temperature: 0, modelName: "mixtral-8x7b-32768", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( calculatorSchema, { name: "calculator", method: "jsonMode", } ); const prompt = ChatPromptTemplate.fromMessages([ [ "system", `You are VERY bad at math and must always use a calculator. Respond with a JSON object containing three keys: 'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide', 'number1': the first number to operate on, 'number2': the second number to operate on. `, ], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput JSON schema function calling", async () => { const model = new ChatGroq({ temperature: 0, modelName: "mixtral-8x7b-32768", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( zodToJsonSchema(calculatorSchema), { name: "calculator", } ); const prompt = ChatPromptTemplate.fromMessages([ ["system", `You are VERY bad at math and must always use a calculator.`], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput OpenAI function definition function calling", async () => { const model = new ChatGroq({ temperature: 0, modelName: "mixtral-8x7b-32768", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput({ name: "calculator", parameters: zodToJsonSchema(calculatorSchema), }); const prompt = ChatPromptTemplate.fromMessages([ "system", `You are VERY bad at math and must always use a calculator.`, "human", "Please help me!! What is 2 + 2?", ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput JSON schema JSON mode", async () => { const model = new ChatGroq({ temperature: 0, modelName: "mixtral-8x7b-32768", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( zodToJsonSchema(calculatorSchema), { name: "calculator", method: "jsonMode", } ); const prompt = ChatPromptTemplate.fromMessages([ [ "system", `You are VERY bad at math and must always use a calculator. Respond with a JSON object containing three keys: 'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide', 'number1': the first number to operate on, 'number2': the second number to operate on. `, ], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput JSON schema", async () => { const model = new ChatGroq({ temperature: 0, modelName: "mixtral-8x7b-32768", }); const jsonSchema = { title: "calculator", description: "A simple calculator", type: "object", properties: { operation: { type: "string", enum: ["add", "subtract", "multiply", "divide"], }, number1: { type: "number" }, number2: { type: "number" }, }, }; const modelWithStructuredOutput = model.withStructuredOutput(jsonSchema); const prompt = ChatPromptTemplate.fromMessages([ [ "system", `You are VERY bad at math and must always use a calculator. Respond with a JSON object containing three keys: 'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide', 'number1': the first number to operate on, 'number2': the second number to operate on. `, ], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); }); test("withStructuredOutput includeRaw true", async () => { const model = new ChatGroq({ temperature: 0, modelName: "mixtral-8x7b-32768", }); const calculatorSchema = z.object({ operation: z.enum(["add", "subtract", "multiply", "divide"]), number1: z.number(), number2: z.number(), }); const modelWithStructuredOutput = model.withStructuredOutput( calculatorSchema, { name: "calculator", includeRaw: true, } ); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are VERY bad at math and must always use a calculator."], ["human", "Please help me!! What is 2 + 2?"], ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); // console.log(result); expect("parsed" in result).toBe(true); // Need to make TS happy :) if (!("parsed" in result)) { throw new Error("parsed not in result"); } const { parsed } = result; expect("operation" in parsed).toBe(true); expect("number1" in parsed).toBe(true); expect("number2" in parsed).toBe(true); expect("raw" in result).toBe(true); // Need to make TS happy :) if (!("raw" in result)) { throw new Error("raw not in result"); } const { raw } = result as { raw: AIMessage }; expect(raw.additional_kwargs.tool_calls?.length).toBeGreaterThan(0); expect(raw.additional_kwargs.tool_calls?.[0].function.name).toBe( "calculator" ); expect( "operation" in JSON.parse(raw.additional_kwargs.tool_calls?.[0].function.arguments ?? "") ).toBe(true); expect( "number1" in JSON.parse(raw.additional_kwargs.tool_calls?.[0].function.arguments ?? "") ).toBe(true); expect( "number2" in JSON.parse(raw.additional_kwargs.tool_calls?.[0].function.arguments ?? "") ).toBe(true); });
0
lc_public_repos/langchainjs/libs/langchain-groq
lc_public_repos/langchainjs/libs/langchain-groq/scripts/jest-setup-after-env.js
import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { afterAll, jest } from "@jest/globals"; afterAll(awaitAllCallbacks); // Allow console.log to be disabled in tests if (process.env.DISABLE_CONSOLE_LOGS === "true") { console.log = jest.fn(); }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-webauth/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": ["ES2021", "ES2022.Object", "DOM"], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": ["src/**/*"], "exclude": ["node_modules", "dist", "docs"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-webauth/LICENSE
The MIT License Copyright (c) 2023 LangChain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-webauth/jest.config.cjs
/** @type {import('ts-jest').JestConfigWithTsJest} */ module.exports = { preset: "ts-jest/presets/default-esm", testEnvironment: "./jest.env.cjs", modulePathIgnorePatterns: ["dist/", "docs/"], moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, transform: { "^.+\\.tsx?$": ["@swc/jest"], }, transformIgnorePatterns: [ "/node_modules/", "\\.pnp\\.[^\\/]+$", "./scripts/jest-setup-after-env.js", ], setupFiles: ["dotenv/config"], testTimeout: 20_000, passWithNoTests: true, };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-webauth/jest.env.cjs
const { TestEnvironment } = require("jest-environment-node"); class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { constructor(config, context) { // Make `instanceof Float32Array` return true in tests // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 super(config, context); this.global.Float32Array = Float32Array; } } module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-webauth/README.md
# LangChain google-webauth This package contains resources to access Google AI/ML models and other Google services. Authorization to these services use either an API Key or service account credentials that are included in an environment variable. If you are running this on the Google Cloud Platform, or in a way where service account credentials can be stored on a file system, consider using the @langchain/google-gauth package *instead*. You do not need to use both packages. See the section on **Authorization** below. ## Installation ```bash $ yarn add @langchain/google-webauth ``` ## Authorization Authorization is either done through the use of an API Key, if it is supported for the service you're using, or a Google Cloud Service Account. To handle service accounts, this package uses the `google-auth-library` package, and you may wish to consult the documentation for that library about how it does so. But in short, classes in this package will use credentials from the first of the following that apply: 1. An API Key that is passed to the constructor using the `apiKey` attribute 2. Credentials that are passed to the constructor using the `authInfo` attribute 3. An API Key that is set in the environment variable `API_KEY` 4. The Service Account credentials that are saved directly into the `GOOGLE_WEB_CREDENTIALS` 5. The Service Account credentials that are saved directly into the `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` (deprecated)
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-webauth/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": [ "--workspaces-update=false" ] } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-webauth/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof"], ignorePatterns: [ ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-webauth/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals: [/node\:/, /@langchain\/core\//, /web-auth-library\/google/, /@langchain\/google-common/], entrypoints: { index: "index", utils: "utils", types: "types", }, tsConfigPath: resolve("./tsconfig.json"), cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-webauth/package.json
{ "name": "@langchain/google-webauth", "version": "0.1.3", "description": "Web-based authentication support for Google services", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-google-webauth/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/google-webauth", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "clean": "rm -rf .turbo dist/", "prepack": "yarn build", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "@langchain/google-common": "~0.1.3", "web-auth-library": "^1.0.3" }, "peerDependencies": { "@langchain/core": ">=0.2.21 <0.4.0" }, "devDependencies": { "@jest/globals": "^29.5.0", "@langchain/core": "workspace:*", "@langchain/scripts": ">=0.1.0 <0.2.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@tsconfig/recommended": "^1.0.3", "@typescript-eslint/eslint-plugin": "^6.12.0", "@typescript-eslint/parser": "^6.12.0", "dotenv": "^16.3.1", "dpdm": "^3.12.0", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "prettier": "^2.8.3", "release-it": "^17.6.0", "rollup": "^4.5.2", "ts-jest": "^29.1.0", "typescript": "<5.2.0", "zod": "^3.23.8" }, "publishConfig": { "access": "public" }, "exports": { ".": { "types": { "import": "./index.d.ts", "require": "./index.d.cts", "default": "./index.d.ts" }, "import": "./index.js", "require": "./index.cjs" }, "./utils": { "types": { "import": "./utils.d.ts", "require": "./utils.d.cts", "default": "./utils.d.ts" }, "import": "./utils.js", "require": "./utils.cjs" }, "./types": { "types": { "import": "./types.d.ts", "require": "./types.d.cts", "default": "./types.d.ts" }, "import": "./types.js", "require": "./types.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "index.cjs", "index.js", "index.d.ts", "index.d.cts", "utils.cjs", "utils.js", "utils.d.ts", "utils.d.cts", "types.cjs", "types.js", "types.d.ts", "types.d.cts" ] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-webauth/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": ["node_modules", "dist", "docs", "**/tests"] }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-webauth/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs/libs
lc_public_repos/langchainjs/libs/langchain-google-webauth/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/libs/langchain-google-webauth
lc_public_repos/langchainjs/libs/langchain-google-webauth/src/auth.ts
import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { ensureAuthOptionScopes, GoogleAbstractedClientOps, GoogleAbstractedFetchClient, GoogleBaseLLMInput, } from "@langchain/google-common"; import { getAccessToken, getCredentials, Credentials, } from "web-auth-library/google"; export type WebGoogleAuthOptions = { credentials: string | Credentials; scope?: string | string[]; accessToken?: string; }; export class WebGoogleAuth extends GoogleAbstractedFetchClient { options: WebGoogleAuthOptions; constructor(fields: GoogleBaseLLMInput<WebGoogleAuthOptions> | undefined) { super(); const options = fields?.authOptions; const accessToken = options?.accessToken; const credentials = options?.credentials ?? getEnvironmentVariable("GOOGLE_WEB_CREDENTIALS") ?? getEnvironmentVariable("GOOGLE_VERTEX_AI_WEB_CREDENTIALS"); if (credentials === undefined) throw new Error( `Credentials not found. Please set the GOOGLE_WEB_CREDENTIALS environment variable or pass credentials into "authOptions.credentials".` ); this.options = ensureAuthOptionScopes<WebGoogleAuthOptions>( { ...options, accessToken, credentials }, "scope", fields?.platformType ); } get clientType(): string { return "webauth"; } async getProjectId(): Promise<string> { const credentials = getCredentials(this.options.credentials); return credentials.project_id; } async request(opts: GoogleAbstractedClientOps): Promise<unknown> { let { accessToken } = this.options; if (accessToken === undefined) { accessToken = await getAccessToken(this.options); } const authHeader = { Authorization: `Bearer ${accessToken}`, }; return this._request(opts?.url, opts, authHeader); } }
0
lc_public_repos/langchainjs/libs/langchain-google-webauth
lc_public_repos/langchainjs/libs/langchain-google-webauth/src/types.ts
export * from "@langchain/google-common/types";
0
lc_public_repos/langchainjs/libs/langchain-google-webauth
lc_public_repos/langchainjs/libs/langchain-google-webauth/src/llms.ts
import { GoogleAbstractedClient, GoogleBaseLLM, GoogleBaseLLMInput, } from "@langchain/google-common"; import { WebGoogleAuth, WebGoogleAuthOptions } from "./auth.js"; /** * Input to LLM class. */ export interface GoogleLLMInput extends GoogleBaseLLMInput<WebGoogleAuthOptions> {} /** * Integration with an LLM. */ export class GoogleLLM extends GoogleBaseLLM<WebGoogleAuthOptions> implements GoogleLLMInput { // Used for tracing, replace with the same name as your class static lc_name() { return "GoogleLLM"; } lc_serializable = true; constructor(fields?: GoogleLLMInput) { super(fields); } buildAbstractedClient( fields: GoogleBaseLLMInput<WebGoogleAuthOptions> | undefined ): GoogleAbstractedClient { return new WebGoogleAuth(fields); } }
0
lc_public_repos/langchainjs/libs/langchain-google-webauth
lc_public_repos/langchainjs/libs/langchain-google-webauth/src/index.ts
export * from "./chat_models.js"; export * from "./llms.js"; export * from "./embeddings.js";
0
lc_public_repos/langchainjs/libs/langchain-google-webauth
lc_public_repos/langchainjs/libs/langchain-google-webauth/src/chat_models.ts
import { ChatGoogleBase, ChatGoogleBaseInput, GoogleAbstractedClient, GoogleBaseLLMInput, } from "@langchain/google-common"; import { WebGoogleAuth, WebGoogleAuthOptions } from "./auth.js"; /** * Input to chat model class. */ export interface ChatGoogleInput extends ChatGoogleBaseInput<WebGoogleAuthOptions> {} /** * Integration with a chat model. */ export class ChatGoogle extends ChatGoogleBase<WebGoogleAuthOptions> implements ChatGoogleInput { // Used for tracing, replace with the same name as your class static lc_name() { return "ChatGoogle"; } constructor(fields?: ChatGoogleInput) { super(fields); } buildAbstractedClient( fields: GoogleBaseLLMInput<WebGoogleAuthOptions> | undefined ): GoogleAbstractedClient { return new WebGoogleAuth(fields); } }
0
lc_public_repos/langchainjs/libs/langchain-google-webauth
lc_public_repos/langchainjs/libs/langchain-google-webauth/src/embeddings.ts
import { GoogleAbstractedClient, GoogleConnectionParams, BaseGoogleEmbeddings, BaseGoogleEmbeddingsParams, } from "@langchain/google-common"; import { WebGoogleAuth, WebGoogleAuthOptions } from "./auth.js"; /** * Input to LLM class. */ export interface GoogleEmbeddingsInput extends BaseGoogleEmbeddingsParams<WebGoogleAuthOptions> {} /** * Integration with an LLM. */ export class GoogleEmbeddings extends BaseGoogleEmbeddings<WebGoogleAuthOptions> implements GoogleEmbeddingsInput { // Used for tracing, replace with the same name as your class static lc_name() { return "GoogleEmbeddings"; } lc_serializable = true; constructor(fields: GoogleEmbeddingsInput) { super(fields); } buildAbstractedClient( fields?: GoogleConnectionParams<WebGoogleAuthOptions> ): GoogleAbstractedClient { return new WebGoogleAuth(fields); } }
0
lc_public_repos/langchainjs/libs/langchain-google-webauth
lc_public_repos/langchainjs/libs/langchain-google-webauth/src/media.ts
import { GoogleAbstractedClient, GoogleBaseLLMInput, } from "@langchain/google-common"; import { BlobStoreAIStudioFileBase, BlobStoreAIStudioFileBaseParams, BlobStoreGoogleCloudStorageBase, BlobStoreGoogleCloudStorageBaseParams, } from "@langchain/google-common/experimental/media"; import { WebGoogleAuth, WebGoogleAuthOptions } from "./auth.js"; export interface BlobStoreGoogleCloudStorageParams extends BlobStoreGoogleCloudStorageBaseParams<WebGoogleAuthOptions> {} export class BlobStoreGoogleCloudStorage extends BlobStoreGoogleCloudStorageBase<WebGoogleAuthOptions> { buildClient( fields?: GoogleBaseLLMInput<WebGoogleAuthOptions> ): GoogleAbstractedClient { return new WebGoogleAuth(fields); } } export interface BlobStoreAIStudioFileParams extends BlobStoreAIStudioFileBaseParams<WebGoogleAuthOptions> {} export class BlobStoreAIStudioFile extends BlobStoreAIStudioFileBase<WebGoogleAuthOptions> { buildAbstractedClient( fields?: BlobStoreAIStudioFileParams ): GoogleAbstractedClient { return new WebGoogleAuth(fields); } }
0
lc_public_repos/langchainjs/libs/langchain-google-webauth
lc_public_repos/langchainjs/libs/langchain-google-webauth/src/utils.ts
export * from "@langchain/google-common/utils";
0
lc_public_repos/langchainjs/libs/langchain-google-webauth/src
lc_public_repos/langchainjs/libs/langchain-google-webauth/src/tests/chat_models.int.test.ts
/* eslint-disable import/no-extraneous-dependencies */ import { StructuredTool } from "@langchain/core/tools"; import { z } from "zod"; import { test } from "@jest/globals"; import { AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, HumanMessage, HumanMessageChunk, MessageContentComplex, SystemMessage, ToolMessage, } from "@langchain/core/messages"; import { BaseLanguageModelInput } from "@langchain/core/language_models/base"; import { ChatPromptValue } from "@langchain/core/prompt_values"; import { MediaManager, SimpleWebBlobStore, } from "@langchain/google-common/experimental/utils/media_core"; import { ChatGoogle } from "../chat_models.js"; import { BlobStoreAIStudioFile } from "../media.js"; class WeatherTool extends StructuredTool { schema = z.object({ locations: z .array(z.object({ name: z.string() })) .describe("The name of cities to get the weather for."), }); description = "Get the weather of a specific location and return the temperature in Celsius."; name = "get_weather"; async _call(input: z.infer<typeof this.schema>) { console.log(`WeatherTool called with input: ${input}`); return `The weather in ${JSON.stringify(input.locations)} is 25°C`; } } describe("Google APIKey Chat", () => { test("invoke", async () => { const model = new ChatGoogle(); try { const res = await model.invoke("What is 1 + 1?"); console.log(res); expect(res).toBeDefined(); expect(res._getType()).toEqual("ai"); const aiMessage = res as AIMessageChunk; console.log(aiMessage); expect(aiMessage.content).toBeDefined(); expect(aiMessage.content.length).toBeGreaterThan(0); expect(aiMessage.content[0]).toBeDefined(); // const content = aiMessage.content[0] as MessageContentComplex; // expect(content).toHaveProperty("type"); // expect(content.type).toEqual("text"); // const textContent = content as MessageContentText; // expect(textContent.text).toBeDefined(); // expect(textContent.text).toEqual("2"); } catch (e) { console.error(e); throw e; } }); test("generate", async () => { const model = new ChatGoogle(); try { const messages: BaseMessage[] = [ new SystemMessage( "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails." ), new HumanMessage("Flip it"), new AIMessage("T"), new HumanMessage("Flip the coin again"), ]; const res = await model.predictMessages(messages); expect(res).toBeDefined(); expect(res._getType()).toEqual("ai"); const aiMessage = res as AIMessageChunk; expect(aiMessage.content).toBeDefined(); expect(aiMessage.content.length).toBeGreaterThan(0); expect(aiMessage.content[0]).toBeDefined(); console.log(aiMessage); // const content = aiMessage.content[0] as MessageContentComplex; // expect(content).toHaveProperty("type"); // expect(content.type).toEqual("text"); // const textContent = content as MessageContentText; // expect(textContent.text).toBeDefined(); // expect(["H", "T"]).toContainEqual(textContent.text); } catch (e) { console.error(e); throw e; } }); test("stream", async () => { const model = new ChatGoogle(); try { const input: BaseLanguageModelInput = new ChatPromptValue([ new SystemMessage( "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails." ), new HumanMessage("Flip it"), new AIMessage("T"), new HumanMessage("Flip the coin again"), ]); const res = await model.stream(input); const resArray: BaseMessageChunk[] = []; for await (const chunk of res) { resArray.push(chunk); } expect(resArray).toBeDefined(); expect(resArray.length).toBeGreaterThanOrEqual(1); const lastChunk = resArray[resArray.length - 1]; expect(lastChunk).toBeDefined(); expect(lastChunk._getType()).toEqual("ai"); const aiChunk = lastChunk as AIMessageChunk; console.log(aiChunk); console.log(JSON.stringify(resArray, null, 2)); } catch (e) { console.error(e); throw e; } }); test.skip("Tool call", async () => { const chat = new ChatGoogle().bindTools([new WeatherTool()]); const res = await chat.invoke("What is the weather in SF and LA"); console.log(res); expect(res.tool_calls?.length).toEqual(1); expect(res.tool_calls?.[0].args).toEqual( JSON.parse(res.additional_kwargs.tool_calls?.[0].function.arguments ?? "") ); }); test.skip("Few shotting with tool calls", async () => { const chat = new ChatGoogle().bindTools([new WeatherTool()]); const res = await chat.invoke("What is the weather in SF"); console.log(res); const res2 = await chat.invoke([ new HumanMessage("What is the weather in SF?"), new AIMessage({ content: "", tool_calls: [ { id: "12345", name: "get_current_weather", args: { location: "SF", }, }, ], }), new ToolMessage({ tool_call_id: "12345", content: "It is currently 24 degrees with hail in SF.", }), new AIMessage("It is currently 24 degrees in SF with hail in SF."), new HumanMessage("What did you say the weather was?"), ]); console.log(res2); expect(res2.content).toContain("24"); }); test.skip("withStructuredOutput", async () => { const tool = { name: "get_weather", description: "Get the weather of a specific location and return the temperature in Celsius.", parameters: { type: "object", properties: { location: { type: "string", description: "The name of city to get the weather for.", }, }, required: ["location"], }, }; const model = new ChatGoogle().withStructuredOutput(tool); const result = await model.invoke("What is the weather in Paris?"); expect(result).toHaveProperty("location"); }); test("media - fileData", async () => { const canonicalStore = new BlobStoreAIStudioFile({}); const resolver = new SimpleWebBlobStore(); const mediaManager = new MediaManager({ store: canonicalStore, resolvers: [resolver], }); const model = new ChatGoogle({ modelName: "gemini-1.5-flash", apiVersion: "v1beta", apiConfig: { mediaManager, }, }); const message: MessageContentComplex[] = [ { type: "text", text: "What is in this image?", }, { type: "media", fileUri: "https://js.langchain.com/v0.2/img/brand/wordmark.png", }, ]; const messages: BaseMessage[] = [ new HumanMessageChunk({ content: message }), ]; try { const res = await model.invoke(messages); // console.log(res); expect(res).toBeDefined(); expect(res._getType()).toEqual("ai"); const aiMessage = res as AIMessageChunk; expect(aiMessage.content).toBeDefined(); expect(typeof aiMessage.content).toBe("string"); const text = aiMessage.content as string; expect(text).toMatch(/LangChain/); // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (e: any) { console.error(e); console.error(JSON.stringify(e.details, null, 1)); throw e; } }); });
0
lc_public_repos/langchainjs/libs/langchain-google-webauth/src
lc_public_repos/langchainjs/libs/langchain-google-webauth/src/tests/media.int.test.ts
import fs from "fs/promises"; import { test } from "@jest/globals"; import { GoogleCloudStorageUri } from "@langchain/google-common/experimental/media"; import { MediaBlob } from "@langchain/google-common/experimental/utils/media_core"; import { BlobStoreAIStudioFile, BlobStoreGoogleCloudStorage, BlobStoreGoogleCloudStorageParams, } from "../media.js"; describe("Google Webauth GCS store", () => { test("save text no-metadata", async () => { const uriPrefix = new GoogleCloudStorageUri("gs://test-langchainjs/"); const uri = `gs://test-langchainjs/text/test-${Date.now()}-nm`; const content = "This is a test"; const blob = await MediaBlob.fromBlob( new Blob([content], { type: "text/plain" }), { path: uri, } ); const config: BlobStoreGoogleCloudStorageParams = { uriPrefix, }; const blobStore = new BlobStoreGoogleCloudStorage(config); const storedBlob = await blobStore.store(blob); // console.log(storedBlob); expect(storedBlob?.path).toEqual(uri); expect(await storedBlob?.asString()).toEqual(content); expect(storedBlob?.mimetype).toEqual("text/plain"); expect(storedBlob?.metadata).not.toHaveProperty("metadata"); expect(storedBlob?.size).toEqual(content.length); expect(storedBlob?.metadata?.kind).toEqual("storage#object"); }); test("save text with-metadata", async () => { const uriPrefix = new GoogleCloudStorageUri("gs://test-langchainjs/"); const uri = `gs://test-langchainjs/text/test-${Date.now()}-wm`; const content = "This is a test"; const blob = await MediaBlob.fromBlob( new Blob([content], { type: "text/plain" }), { path: uri, metadata: { alpha: "one", bravo: "two", }, } ); const config: BlobStoreGoogleCloudStorageParams = { uriPrefix, }; const blobStore = new BlobStoreGoogleCloudStorage(config); const storedBlob = await blobStore.store(blob); // console.log(storedBlob); expect(storedBlob?.path).toEqual(uri); expect(await storedBlob?.asString()).toEqual(content); expect(storedBlob?.mimetype).toEqual("text/plain"); expect(storedBlob?.metadata).toHaveProperty("metadata"); expect(storedBlob?.metadata?.metadata?.alpha).toEqual("one"); expect(storedBlob?.metadata?.metadata?.bravo).toEqual("two"); expect(storedBlob?.size).toEqual(content.length); expect(storedBlob?.metadata?.kind).toEqual("storage#object"); }); test("save image no-metadata", async () => { const filename = `src/tests/data/blue-square.png`; const dataBuffer = await fs.readFile(filename); const data = new Blob([dataBuffer], { type: "image/png" }); const uriPrefix = new GoogleCloudStorageUri("gs://test-langchainjs/"); const uri = `gs://test-langchainjs/image/test-${Date.now()}-nm`; const blob = await MediaBlob.fromBlob(data, { path: uri, }); const config: BlobStoreGoogleCloudStorageParams = { uriPrefix, }; const blobStore = new BlobStoreGoogleCloudStorage(config); const storedBlob = await blobStore.store(blob); // console.log(storedBlob); expect(storedBlob?.path).toEqual(uri); expect(storedBlob?.size).toEqual(176); expect(storedBlob?.mimetype).toEqual("image/png"); expect(storedBlob?.metadata?.kind).toEqual("storage#object"); }); test("get text no-metadata", async () => { const uriPrefix = new GoogleCloudStorageUri("gs://test-langchainjs/"); const uri: string = "gs://test-langchainjs/text/test-nm"; const config: BlobStoreGoogleCloudStorageParams = { uriPrefix, }; const blobStore = new BlobStoreGoogleCloudStorage(config); const blob = await blobStore.fetch(uri); // console.log(blob); expect(blob?.path).toEqual(uri); expect(await blob?.asString()).toEqual("This is a test"); expect(blob?.mimetype).toEqual("text/plain"); expect(blob?.metadata).not.toHaveProperty("metadata"); expect(blob?.size).toEqual(14); expect(blob?.metadata?.kind).toEqual("storage#object"); }); test("get text with-metadata", async () => { const uriPrefix = new GoogleCloudStorageUri("gs://test-langchainjs/"); const uri: string = "gs://test-langchainjs/text/test-wm"; const config: BlobStoreGoogleCloudStorageParams = { uriPrefix, }; const blobStore = new BlobStoreGoogleCloudStorage(config); const blob = await blobStore.fetch(uri); // console.log(blob); expect(blob?.path).toEqual(uri); expect(await blob?.asString()).toEqual("This is a test"); expect(blob?.mimetype).toEqual("text/plain"); expect(blob?.metadata).toHaveProperty("metadata"); expect(blob?.metadata?.metadata?.alpha).toEqual("one"); expect(blob?.metadata?.metadata?.bravo).toEqual("two"); expect(blob?.size).toEqual(14); expect(blob?.metadata?.kind).toEqual("storage#object"); }); test("get image no-metadata", async () => { const uriPrefix = new GoogleCloudStorageUri("gs://test-langchainjs/"); const uri: string = "gs://test-langchainjs/image/test-nm"; const config: BlobStoreGoogleCloudStorageParams = { uriPrefix, }; const blobStore = new BlobStoreGoogleCloudStorage(config); const blob = await blobStore.fetch(uri); // console.log(storedBlob); expect(blob?.path).toEqual(uri); expect(blob?.size).toEqual(176); expect(blob?.mimetype).toEqual("image/png"); expect(blob?.metadata?.kind).toEqual("storage#object"); }); }); describe("Google APIKey AIStudioBlobStore", () => { test("save image no metadata", async () => { const filename = `src/tests/data/blue-square.png`; const dataBuffer = await fs.readFile(filename); const data = new Blob([dataBuffer], { type: "image/png" }); const blob = await MediaBlob.fromBlob(data, { path: filename, }); const blobStore = new BlobStoreAIStudioFile(); const storedBlob = await blobStore.store(blob); console.log(storedBlob); // The blob itself is expected to have no data right now, // but this will hopefully change in the future. expect(storedBlob?.size).toEqual(0); expect(storedBlob?.dataType).toEqual("image/png"); expect(storedBlob?.metadata?.sizeBytes).toEqual("176"); expect(storedBlob?.metadata?.state).toEqual("ACTIVE"); }); test("save video with retry", async () => { const filename = `src/tests/data/rainbow.mp4`; const dataBuffer = await fs.readFile(filename); const data = new Blob([dataBuffer], { type: "video/mp4" }); const blob = await MediaBlob.fromBlob(data, { path: filename, }); const blobStore = new BlobStoreAIStudioFile(); const storedBlob = await blobStore.store(blob); console.log(storedBlob); // The blob itself is expected to have no data right now, // but this will hopefully change in the future. expect(storedBlob?.size).toEqual(0); expect(storedBlob?.dataType).toEqual("video/mp4"); expect(storedBlob?.metadata?.sizeBytes).toEqual("1020253"); expect(storedBlob?.metadata?.state).toEqual("ACTIVE"); expect(storedBlob?.metadata?.videoMetadata?.videoDuration).toEqual("8s"); }); test("save video no retry", async () => { const filename = `src/tests/data/rainbow.mp4`; const dataBuffer = await fs.readFile(filename); const data = new Blob([dataBuffer], { type: "video/mp4" }); const blob = await MediaBlob.fromBlob(data, { path: filename, }); const blobStore = new BlobStoreAIStudioFile({ retryTime: -1, }); const storedBlob = await blobStore.store(blob); console.log(storedBlob); // The blob itself is expected to have no data right now, // but this will hopefully change in the future. expect(storedBlob?.size).toEqual(0); expect(storedBlob?.dataType).toEqual("video/mp4"); expect(storedBlob?.metadata?.sizeBytes).toEqual("1020253"); expect(storedBlob?.metadata?.state).toEqual("PROCESSING"); expect(storedBlob?.metadata?.videoMetadata).toBeUndefined(); }); });